repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Vutshi/qutip | qutip/examples/ex_52.py | 1 | 2978 | #
# Landau-Zener-Stuckelberg interferometry: steady state of repeated
# Landau-Zener like avoided-level crossing, as a function of driving amplitude
# and bias.
#
# Note: In order to get this example to work properly in the demos window,
# we have had to pass many more variables to parfor than is typically
# necessary.
from qutip import *
from pylab import *
# a task function for the for-loop parallelization:
# the m-index is parallelized in loop over the elements of p_mat[m,n]
def task(args):
m, H_td, c_op_list, sn, A_list, T, w, eps = args
p_mat_m = zeros(len(A_list))
for n, A in enumerate(A_list):
# change args sent to solver, w is really a constant though.
Hargs = {'w': w, 'eps': eps, 'A': A}
# settings (for reusing list-str format Hamiltonian)
U = propagator(H_td, T, c_op_list, Hargs, Odeoptions(rhs_reuse=True))
rho_ss = propagator_steadystate(U)
p_mat_m[n] = expect(sn, rho_ss)
return [m, p_mat_m]
def run():
# set up the parameters and start calculation
delta = 0.1 * 2 * pi # qubit sigma_x coefficient
w = 2.0 * 2 * pi # driving frequency
T = 2 * pi / w # driving period
gamma1 = 0.00001 # relaxation rate
gamma2 = 0.005 # dephasing rate
eps_list = linspace(-10.0, 10.0, 101) * 2 * pi
A_list = linspace(0.0, 20.0, 101) * 2 * pi
# pre-calculate the necessary operators
sx = sigmax()
sz = sigmaz()
sm = destroy(2)
sn = num(2)
# collapse operators: relaxation and dephasing
c_op_list = [sqrt(gamma1) * sm, sqrt(gamma2) * sz]
# setup time-dependent Hamiltonian (list-string format)
H0 = -delta / 2.0 * sx
H1 = [sz, '-eps/2.0+A/2.0*sin(w * t)']
H_td = [H0, H1]
Hargs = {'w': w, 'eps': eps_list[0], 'A': A_list[0]}
# pre-generate RHS so we can use parfor
rhs_generate(H_td, c_op_list, Hargs, name='lz_func')
# start a parallel for loop over bias point values (eps_list)
parfor_args = [[k, H_td, c_op_list, sn, A_list, T, w, eps_list[k]]
for k in range(len(eps_list))]
p_mat_list = parfor(task, parfor_args)
# assemble a matrix p_mat from list of (index,array) tuples returned by
# parfor
p_mat = zeros((len(eps_list), len(A_list)))
for m, p_mat_m in p_mat_list:
p_mat[m, :] = p_mat_m
# Plot the results
A_mat, eps_mat = meshgrid(A_list / (2 * pi), eps_list / (2 * pi))
fig = figure()
ax = fig.add_axes([0.1, 0.1, 0.9, 0.8])
c = ax.pcolor(eps_mat, A_mat, p_mat)
c.set_cmap('RdYlBu_r')
cbar = fig.colorbar(c)
cbar.set_label("Probability")
ax.set_xlabel(r'Bias point $\epsilon$')
ax.set_ylabel(r'Amplitude $A$')
ax.autoscale(tight=True)
title('Steadystate excitation probability\n' +
r'$H = -\frac{1}{2}\Delta\sigma_x - \frac{1}{2}\epsilon\sigma_z' +
r' - \frac{1}{2}A\sin(\omega t)$' + '\\n')
show()
close()
if __name__ == '__main__':
run()
| gpl-3.0 | -7,503,352,612,644,134,000 | 34.035294 | 78 | 0.595366 | false |
listyque/TACTIC-Handler | thlib/side/console/ui/output_window.py | 1 | 2451 | from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtGui as Qt4Gui
class OutputWindow(QtGui.QPlainTextEdit):
def __init__(self, parent=None):
"""
Initialize default settings.
"""
QtGui.QPlainTextEdit.__init__(self, parent)
self.setTabStopWidth(4 * self.fontMetrics().width(" "))
self.__current_write_state = "output"
def scroll_to_bottom(self):
"""
Scroll to bottom.
"""
scrollbar = self.verticalScrollBar()
scrollbar.setValue(scrollbar.maximum())
self.moveCursor(Qt4Gui.QTextCursor.End)
def write_input(self, text):
if self.__current_write_state != "input":
self.__current_write_state = "input"
# text = unicode(text)
text = text.replace("\\r", "\r")
text = text.replace("\\n", "\n")
text = text.replace(" ", " ")
text = text.replace("<", "<")
text = text.replace(">", ">")
for line in text.splitlines():
line = '<font color="#A9A9A9">' + line + '</font><br>'
self.__write_html_output(line)
# QtCore.QCoreApplication.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents)
self.scroll_to_bottom()
def write_output(self, text):
if self.__current_write_state != "output":
self.__current_write_state = "output"
text = text.replace("\\r", "\r")
text = text.replace("\\n", "\n")
self.moveCursor(Qt4Gui.QTextCursor.End)
self.insertPlainText(text)
self.moveCursor(Qt4Gui.QTextCursor.End)
self.scroll_to_bottom()
def write_error(self, text):
if self.__current_write_state != "error":
self.__current_write_state = "error"
# text = unicode(text)
text = text.replace("\\r", "\r")
text = text.replace("\\n", "\n")
text = text.replace(" ", " ")
text = text.replace("<", "<")
text = text.replace(">", ">")
for line in text.splitlines():
line = '<font color="#ff9999">' + line + '</font><br>'
self.__write_html_output(line)
self.scroll_to_bottom()
def __write_html_output(self, text):
"""
Write text as html output.
"""
self.moveCursor(Qt4Gui.QTextCursor.End)
self.textCursor().insertHtml(text)
self.moveCursor(Qt4Gui.QTextCursor.End)
| epl-1.0 | -5,648,691,627,177,380,000 | 28.178571 | 93 | 0.555692 | false |
excel-analytics/telegram_chat_bot | tg_bot/s2s.py | 1 | 1734 | from multiprocessing import Process, Queue
# from multiprocessing.queue import Empty
import time
import telepot
import yaml
from seq2seq.runner import decode
config = yaml.load(open('config.yml').read())
in_msg = Queue()
out_msg = Queue()
chat_id = config['chat_id']
reload_msg = '/reload'
def run_tg(bot):
bot.handle = handle
print('I am listening ...')
bot.message_loop()
while 1:
time.sleep(10)
def f(q_to, q_from):
decode(q_to, q_from)
def work_with_model(bot):
while 1:
q_to = Queue()
q_from = Queue()
p = Process(target=f, args=(q_to, q_from))
p.start()
init = q_from.get()
bot.sendMessage(chat_id, init)
while 1:
message = in_msg.get()
if message.startswith(reload_msg):
bot.sendMessage(chat_id, 'Wait a lot.')
break
q_to.put(message)
from_model = q_from.get()
out_msg.put(from_model)
p.terminate()
def handle(msg):
# print(msg)
if 'chat' not in msg:
return
if 'id' not in msg['chat']:
return
if msg['chat']['id'] != chat_id:
return
if 'text' in msg:
in_msg.put(msg['text'].lower())
# print(msg['text'].startswith(reload_msg))
if not msg['text'].startswith(reload_msg):
answer = out_msg.get()
if answer.strip() == '':
answer = '%NO_MSG%'
bot.sendMessage(chat_id, answer, reply_to_message_id=msg['message_id'])
# if __name__ == '__main__':
config = yaml.load(open('config.yml').read())
bot = telepot.Bot(config['telegram'])
p = Process(target=run_tg, args=(bot,))
p.start()
work_with_model(bot)
# p.join()
| mit | -9,018,540,958,632,976,000 | 22.753425 | 83 | 0.555363 | false |
Sterncat/opticspy | opticspy/lens/aberration.py | 1 | 2204 | import numpy as __np__
import matplotlib.pyplot as __plt__
def third(s1,s2,s3,s4,s5):
"""
Third order aberrations:
Ray aberrations
Field curve
Distortion
input: third order aberration coefficient
sigma 1~5
output: third order aberration graph
"""
print("third order aberration")
py = __np__.linspace(-1,1,100)
px = __np__.linspace(0,1,50)
height = [1,0.7,0]
count = 0
ax = []
maxTan = 0
maxSag = 0
fig = __plt__.figure(1)
for h in height:
Tan = s1*py**3+3*s2*h*py**2+(3*s3+s4)*h**2.*py+s5*h**3
ax.append(__plt__.subplot2grid((3, 3), (count, 0), colspan=2))
__plt__.plot(py, Tan)
if maxTan < max(abs(Tan)): maxTan = max(abs(Tan))
if count == 0: __plt__.title('TANGENTIAL')
__plt__.axis([-1, 1, -maxTan, maxTan])
if count == len(height)-1: __plt__.xlabel('\n' + r'$\rho_y$',fontsize=20)
__plt__.ylabel('h = '+str(h),fontsize=15)
__plt__.grid(True)
Sag = s1*px**3+(s3+s4)*h**2*px
ax.append(__plt__.subplot2grid((3, 3), (count, 2)))
__plt__.plot(px, Sag)
if maxSag < max(abs(Sag)): maxSag = max(abs(Sag))
__plt__.axis([0, 1, -maxSag, maxSag])
if count == 0: __plt__.title('SAGITTAL')
if count == len(height)-1: __plt__.xlabel('\n' + r'$\rho_x$',fontsize=20)
__plt__.grid(True)
count = count + 1
fig.set_tight_layout(True)
__plt__.show()
def fieldcurve(sigma3 = 0.05, sigma4 = -0.05, FNO = 10, H = 20):
"""
sigma3 Astigmatism Coefficient
sigma4 Petzval Coefficient
FNO F-number
H Image Height
"""
uak = -1.00/(2*FNO) # maginal ray angle
h = __np__.linspace(0,1,40)
XP = -sigma4/uak*h**2
XT = -(3*sigma3+sigma4)/uak*h**2
XS = -(sigma3+sigma4)/uak*h**2
fig = __plt__.figure(figsize=(6, 8), dpi=80)
__plt__.plot(XP, h*H, 'b-*', label='P')
__plt__.plot(XT, h*H, 'b--', label='T')
__plt__.plot(XS, h*H, 'b', label='S')
__plt__.xlabel('Surface sag(mm)',fontsize=18)
__plt__.ylabel('Real image height(mm)',fontsize=18)
legend = __plt__.legend(loc='lower left', shadow=True, fontsize='x-large')
__plt__.title(r'$\sigma3 = $'+str(round(sigma3,4))+' '+r'$\sigma4 = $'+str(sigma4),fontsize=18)
#__plt__.axis([-16, 5, 0, H])
__plt__.grid(b=True, which='both', color='0.65',linestyle='--')
__plt__.show()
return 0 | mit | -6,850,834,233,485,839,000 | 27.636364 | 96 | 0.585299 | false |
xraywu/wegene-python-sdk | wegene/Controllers/Psychology.py | 1 | 2949 | # -*- coding: utf-8 -*-
"""
wegene.Controllers.PsychologyController
This file was automatically generated by APIMATIC BETA v2.0 on 02/22/2016
"""
import requests
from wegene.APIHelper import APIHelper
from wegene.Configuration import Configuration
from wegene.APIException import APIException
from wegene.Models.Report import Report
class Psychology(object):
"""A Controller to access Endpoints in the WeGeneAPILib API."""
def get_psychology(self,
profile_id,
report_id):
"""Does a POST request to /psychology/{profile_id}.
Psychology profile based on genetic information
Args:
profile_id (string): Genetic profile id
report_id (string): Report Id for the specific health risk to
look
Returns:
Report: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/psychology/{profile_id}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"profile_id": profile_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"Authorization": "Bearer " + Configuration.o_auth_access_token,
"user-agent": "WeGene SDK",
"accept": "application/json",
}
# Prepare parameters
parameters = {
"report_id": report_id
}
# Prepare and invoke the API call request to fetch the response
response = requests.post(query_url, headers=headers, data=parameters)
# Error handling using HTTP status codes
if response.status_code < 200 or response.status_code > 206: # 200 = HTTP OK
raise APIException("HTTP Response Not OK",
response.status_code, response.json())
# Try to cast response to desired type
if isinstance(response.json(), dict):
# Response is already in a dictionary, return the object
try:
return Report(**response.json())
except TypeError:
raise APIException("Invalid JSON returned",
response.status_code, response.json())
# If we got here then an error occured while trying to parse the response
raise APIException("Invalid JSON returned",
response.status_code, response.json())
| mit | -6,103,705,326,501,828,000 | 32.134831 | 86 | 0.602238 | false |
hajgato/easybuild-easyblocks | test/easyblocks/init_easyblocks.py | 1 | 6139 | ##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for initializing easyblocks.
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import re
import tempfile
from vsc import fancylogger
from unittest import TestCase, TestLoader, main
import easybuild.tools.options as eboptions
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import MANDATORY
from easybuild.framework.easyconfig.easyconfig import EasyConfig, get_easyblock_class
from easybuild.framework.easyconfig.tools import get_paths_for
from easybuild.tools import config
class InitTest(TestCase):
""" Baseclass for easyblock testcases """
# initialize configuration (required for e.g. default modules_tool setting)
eb_go = eboptions.parse_options()
config.init(eb_go.options, eb_go.get_options_by_section('config'))
build_options = {
'valid_module_classes': config.module_classes(),
'valid_stops': [x[0] for x in EasyBlock.get_steps()],
}
config.init_build_options(build_options=build_options)
config.set_tmpdir()
del eb_go
def writeEC(self, easyblock, extratxt=''):
""" create temporary easyconfig file """
txt = '\n'.join([
'easyblock = "%s"',
'name = "foo"',
'version = "1.3.2"',
'homepage = "http://example.com"',
'description = "Dummy easyconfig file."',
'toolchain = {"name": "dummy", "version": "dummy"}',
'sources = []',
extratxt,
])
f = open(self.eb_file, "w")
f.write(txt % easyblock)
f.close()
def setUp(self):
"""Setup test."""
self.log = fancylogger.getLogger("EasyblocksInitTest", fname=False)
fd, self.eb_file = tempfile.mkstemp(prefix='easyblocks_init_test_', suffix='.eb')
os.close(fd)
def tearDown(self):
"""Cleanup."""
try:
os.remove(self.eb_file)
except OSError, err:
self.log.error("Failed to remove %s/%s: %s" % (self.eb_file, err))
def template_init_test(self, easyblock):
"""Test whether all easyconfigs can be initialized."""
def check_extra_options_format(extra_options):
"""Make sure extra_options value is of correct format."""
# EasyBuild v1.x
self.assertTrue(isinstance(extra_options, list))
for extra_option in extra_options:
self.assertTrue(isinstance(extra_option, tuple))
self.assertEqual(len(extra_option), 2)
self.assertTrue(isinstance(extra_option[0], basestring))
self.assertTrue(isinstance(extra_option[1], list))
self.assertEqual(len(extra_option[1]), 3)
# EasyBuild v2.0 (breaks backward compatibility compared to v1.x)
#self.assertTrue(isinstance(extra_options, dict))
#for key in extra_options:
# self.assertTrue(isinstance(extra_options[key], list))
# self.assertTrue(len(extra_options[key]), 3)
class_regex = re.compile("^class (.*)\(.*", re.M)
self.log.debug("easyblock: %s" % easyblock)
# obtain easyblock class name using regex
f = open(easyblock, "r")
txt = f.read()
f.close()
res = class_regex.search(txt)
if res:
ebname = res.group(1)
self.log.debug("Found class name for easyblock %s: %s" % (easyblock, ebname))
# figure out list of mandatory variables, and define with dummy values as necessary
app_class = get_easyblock_class(ebname)
extra_options = app_class.extra_options()
check_extra_options_format(extra_options)
# extend easyconfig to make sure mandatory custom easyconfig paramters are defined
extra_txt = ''
for (key, val) in extra_options:
if val[2] == MANDATORY:
extra_txt += '%s = "foo"\n' % key
# write easyconfig file
self.writeEC(ebname, extra_txt)
# initialize easyblock
# if this doesn't fail, the test succeeds
app = app_class(EasyConfig(self.eb_file))
# cleanup
app.close_log()
os.remove(app.logfile)
else:
self.assertTrue(False, "Class found in easyblock %s" % easyblock)
def suite():
"""Return all easyblock initialisation tests."""
# dynamically generate a separate test for each of the available easyblocks
easyblocks_path = get_paths_for("easyblocks")[0]
all_pys = glob.glob('%s/*/*.py' % easyblocks_path)
easyblocks = [eb for eb in all_pys if not eb.endswith('__init__.py') and not '/test/' in eb]
for easyblock in easyblocks:
# dynamically define new inner functions that can be added as class methods to InitTest
exec("def innertest(self): template_init_test(self, '%s')" % easyblock)
innertest.__doc__ = "Test for initialisation of easyblock %s" % easyblock
innertest.__name__ = "test_easyblock_%s" % '_'.join(easyblock.replace('.py', '').split('/'))
setattr(InitTest, innertest.__name__, innertest)
return TestLoader().loadTestsFromTestCase(InitTest)
if __name__ == '__main__':
main()
| gpl-2.0 | 4,414,477,057,824,884,700 | 35.541667 | 100 | 0.650757 | false |
wphicks/Writing3D | pyw3d/blender_actions/visibility.py | 1 | 3616 | # Copyright (C) 2016 William Hicks
#
# This file is part of Writing3D.
#
# Writing3D is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""Tools for changing the visibility of a Blender object"""
class VisibilityAction(object):
"""Generate Python logic for how visibility should change when action first
starts, as it continues, and when it ends
:param bool visibility: The visibility to transition to
:param float duration: Time for action to complete in seconds
:param int offset: A number of tabs (4 spaces) to add before Python logic
strings"""
@property
def start_string(self):
script_text = []
# TODO: Fade out timing appears to be mucked
script_text.extend([
"blender_object.color[3] = int(blender_object.visible)",
"blender_object.setVisible(True)",
"delta_alpha = {} - blender_object.color[3]".format(
int(self.visible)),
"W3D_LOG.debug(",
" 'object {} visibility set to {}'.format(",
" blender_object.name, delta_alpha > 0",
" )",
")",
"blender_object['visible_tag'] = 'delta_alpha > 0'",
"blender_object['visV'] = delta_alpha/{}".format(
("({}*bge.logic.getLogicTicRate())".format(self.duration), 1)[
self.duration == 0])]
)
try:
script_text[0] = "{}{}".format(" "*self.offset, script_text[0])
except IndexError:
return ""
return "\n{}".format(" "*self.offset).join(script_text)
@property
def continue_string(self):
script_text = [
"new_color = blender_object.color",
"new_color[3] += blender_object['visV']",
"blender_object.color = new_color"
]
try:
script_text[0] = "{}{}".format(" "*self.offset, script_text[0])
except IndexError:
return ""
return "\n{}".format(" "*self.offset).join(script_text)
@property
def end_string(self):
script_text = [
"new_color = blender_object.color",
"new_color[3] = {}".format(int(self.visible)),
"blender_object.color = new_color",
"blender_object.setVisible({})".format(self.visible),
"if 'clicks' in blender_object:",
" if blender_object.visible:",
" blender_object['clickable'] = True",
" else:",
" try:",
" del blender_object['clickable']",
" except KeyError:",
" pass # Already unclickable",
]
try:
script_text[0] = "{}{}".format(" "*self.offset, script_text[0])
except IndexError:
return ""
return "\n{}".format(" "*self.offset).join(script_text)
def __init__(self, visibility, duration, offset=0):
self.visible = visibility
self.duration = duration
self.offset = offset
| gpl-3.0 | 4,423,988,806,805,680,000 | 37.468085 | 79 | 0.568584 | false |
igrlas/CentralHub | CHPackage/src/centralhub/helpers/data_operations.py | 1 | 1674 |
def element_dict_to_tuple(my_dict):
"""Of type Element"""
if 'hid' not in my_dict:
my_dict['hid'] = None
if 'name' not in my_dict:
my_dict['name'] = None
if 'type' not in my_dict:
my_dict['type'] = None
if 'state' not in my_dict:
my_dict['state'] = None
if 'override' not in my_dict:
my_dict['override'] = None
if 'temperature' not in my_dict:
my_dict['temperature'] = None
if 'defaultTemperature' not in my_dict:
my_dict['defaultTemperature'] = None
if 'desiredTemperature' not in my_dict:
my_dict['desiredTemperature'] = None
if 'address' not in my_dict:
my_dict['address'] = None
return (my_dict['hid'], my_dict['name'], my_dict['type'], my_dict['state'], my_dict['override'],
my_dict['temperature'], my_dict['defaultTemperature'],my_dict['desiredTemperature'], my_dict['address'])
def tuple_to_dict(properties, values):
"""Any tuple to any dict"""
if properties.__len__() != values.__len__():
raise Exception('number of properties does not match number of values supplied')
final_dict = {}
for i, prop in enumerate(properties):
final_dict[prop] = values[i]
return final_dict
def create_element(hid=None, name=None, type=None, state=None, override=None, temperature=None, defaultTemperature=None, desiredTemperature=None, address=None):
"""of type Element"""
return {'hid': hid, 'name': name, 'type': type, 'state': state, 'override': override, 'temperature': temperature,
'defaultTemperature': defaultTemperature, 'desiredTemperature': desiredTemperature, 'address': address}
| gpl-2.0 | 4,153,254,492,931,863,600 | 37.045455 | 160 | 0.630824 | false |
conorsch/securedrop | molecule/testinfra/common/test_system_hardening.py | 1 | 5786 | import pytest
import re
import testutils
sdvars = testutils.securedrop_test_vars
testinfra_hosts = [sdvars.app_hostname, sdvars.monitor_hostname]
@pytest.mark.parametrize('sysctl_opt', [
('net.ipv4.conf.all.accept_redirects', 0),
('net.ipv4.conf.all.accept_source_route', 0),
('net.ipv4.conf.all.rp_filter', 1),
('net.ipv4.conf.all.secure_redirects', 0),
('net.ipv4.conf.all.send_redirects', 0),
('net.ipv4.conf.default.accept_redirects', 0),
('net.ipv4.conf.default.accept_source_route', 0),
('net.ipv4.conf.default.rp_filter', 1),
('net.ipv4.conf.default.secure_redirects', 0),
('net.ipv4.conf.default.send_redirects', 0),
('net.ipv4.icmp_echo_ignore_broadcasts', 1),
('net.ipv4.ip_forward', 0),
('net.ipv4.tcp_max_syn_backlog', 4096),
('net.ipv4.tcp_syncookies', 1),
('net.ipv6.conf.all.disable_ipv6', 1),
('net.ipv6.conf.default.disable_ipv6', 1),
('net.ipv6.conf.lo.disable_ipv6', 1),
])
def test_sysctl_options(host, sysctl_opt):
"""
Ensure sysctl flags are set correctly. Most of these checks
are disabling IPv6 and hardening IPv4, which is appropriate
due to the heavy use of Tor.
"""
with host.sudo():
# For Focal, we disable IPv6 entirely, so the IPv6 sysctl options won't exist
if sysctl_opt[0].startswith("net.ipv6") and host.system_info.codename == "focal":
return True
assert host.sysctl(sysctl_opt[0]) == sysctl_opt[1]
def test_dns_setting(host):
"""
Ensure DNS service is hard-coded in resolv.conf config.
"""
if host.system_info.codename == "focal":
fpath = "/etc/resolv.conf"
else:
fpath = "/etc/resolvconf/resolv.conf.d/base"
f = host.file(fpath)
assert f.is_file
assert f.user == "root"
assert f.group == "root"
assert f.mode == 0o644
assert f.contains(r'^nameserver 8\.8\.8\.8$')
@pytest.mark.parametrize('kernel_module', [
'bluetooth',
'iwlwifi',
])
def test_blacklisted_kernel_modules(host, kernel_module):
"""
Test that unwanted kernel modules are blacklisted on the system.
Mostly these checks are defense-in-depth approaches to ensuring
that wireless interfaces will not work.
"""
with host.sudo():
c = host.run("lsmod")
assert kernel_module not in c.stdout
f = host.file("/etc/modprobe.d/blacklist.conf")
assert f.contains("^blacklist {}$".format(kernel_module))
def test_swap_disabled(host):
"""
Ensure swap space is disabled. Prohibit writing memory to swapfiles
to reduce the threat of forensic analysis leaking any sensitive info.
"""
hostname = host.check_output('hostname')
# Mon doesn't have swap disabled yet
if hostname.startswith('mon'):
return True
c = host.check_output('swapon --summary')
# A leading slash will indicate full path to a swapfile.
assert not re.search("^/", c, re.M)
# On Xenial, swapon 2.27.1 shows blank output, with no headers, so
# check for empty output as confirmation of no swap.
rgx = re.compile("^$")
assert re.search(rgx, c)
def test_twofactor_disabled_on_tty(host):
"""
Having 2FA on TTY logins is cumbersome on systems without encrypted drives.
Let's make sure this option is disabled!
"""
pam_auth_file = host.file("/etc/pam.d/common-auth").content_string
assert "auth required pam_google_authenticator.so" not in pam_auth_file
assert "pam_ecryptfs.so unwrap" not in pam_auth_file
@pytest.mark.parametrize('sshd_opts', [
('UsePAM', 'no'),
('ChallengeResponseAuthentication', 'no'),
('PasswordAuthentication', 'no'),
('PubkeyAuthentication', 'yes'),
('RSAAuthentication', 'yes'),
('AllowGroups', 'ssh'),
('AllowTcpForwarding', 'no'),
('AllowAgentForwarding', 'no'),
('PermitTunnel', 'no'),
('X11Forwarding', 'no'),
])
def test_sshd_config(host, sshd_opts):
"""
Let's ensure sshd does not fall back to password-based authentication
"""
sshd_config_file = host.file("/etc/ssh/sshd_config").content_string
line = "{} {}".format(sshd_opts[0], sshd_opts[1])
assert line in sshd_config_file
@pytest.mark.parametrize('logfile', [
'/var/log/auth.log',
'/var/log/syslog',
])
def test_no_ecrypt_messages_in_logs(host, logfile):
"""
Ensure pam_ecryptfs is removed from /etc/pam.d/common-auth : not only is
no longer needed, it causes error messages (see issue #3963)
"""
error_message = "pam_ecryptfs.so: cannot open shared object file"
with host.sudo():
f = host.file(logfile)
# Not using `f.contains(<pattern>)` because that'd cause the sought
# string to make it into syslog as a side-effect of the testinfra
# invocation, causing subsequent test runs to report failure.
assert error_message not in f.content_string
@pytest.mark.parametrize('package', [
'aptitude',
'cloud-init',
'libiw30',
'python-is-python2',
'snapd',
'torsocks',
'wireless-tools',
'wpasupplicant',
])
def test_unused_packages_are_removed(host, package):
""" Check if unused package is present """
assert host.package(package).is_installed is False
def test_iptables_packages(host):
"""
Focal hosts should use iptables-persistent for enforcing
firewall config across reboots.
"""
if host.system_info.codename == "focal":
assert host.package("iptables-persistent").is_installed
else:
assert not host.package("iptables-persistent").is_installed
def test_snapd_absent(host):
assert not host.file("/lib/systemd/system/snapd.service").exists
assert not host.file("/etc/apparmor.d/usr.lib.snapd.snap-confine.real").exists
assert not host.file("/usr/bin/snap").exists
assert not host.file("/var/lib/snapd/snaps").exists
| agpl-3.0 | -8,633,960,916,756,823,000 | 30.966851 | 89 | 0.663498 | false |
Jbkwok/is210_lesson_02 | identity.py | 1 | 1489 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides the is_empty() method."""
def get_member_count(my_sequence):
"""Returns the number of members of a list object.
Args:
my_sequence (sequence): The sequence object being measured.
Returns:
mixed: If the object can be measured it returns an integer. If not it
returns ``False``
Examples:
>>> get_member_count(42)
False
>>> get_member_count('duck')
4
>>> get_member_count(['knights', 'who', 'say', 'ni'])
4
"""
try:
length = len(my_sequence)
except TypeError:
length = False
return length
def is_empty(my_sequence):
"""Tests whether or not the passed sequence is empty.
Args:
my_sequence (sequence): The sequence object being measured.
Returns:
bool: If empty, returns True, otherwise, False.
Raises:
TypeError: If my_sequence is not a sequence object type.
Examples:
>>> is_empty('')
True
>>> is_empty('apple')
False
>>> is_empty([])
True
>>> is_empty(42)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: object has no len()
"""
count = get_member_count(my_sequence)
if count != False:
return count == 0
else:
raise TypeError('Object has no len()')
TEST = ''
print len(TEST)
print is_empty(TEST)
| mpl-2.0 | 3,495,747,279,157,622,000 | 19.971831 | 77 | 0.556749 | false |
sdenisen/python | yandex/task3/task3_tests.py | 1 | 1832 | import functools
import random
import string
import unittest
from timeit import Timer
from task3.task3_resolve import get_unique_offers
class MyTestCase(unittest.TestCase):
def test_something(self):
fid_1 = {"offers": [{"offer_id": "offer1", "market_sku": 10846332, "price": 1490},
{"offer_id": "offer2", "market_sku": 682644, "price": 499}]}
fid_2 = {"offers": [{"offer_id": "offer3", "market_sku": 832784, "price": 14000}]}
expected_offers = {"offers": [{"market_sku": 682644, "offer_id": "offer2", "price": 499},
{"market_sku": 10846332, "offer_id": "offer1", "price": 1490},
{"market_sku": 832784, "offer_id": "offer3", "price": 14000},
]}
input_data = [fid_1, fid_2]
sorted_offers = get_unique_offers(input_data)
self.assertDictEqual(expected_offers, sorted_offers)
def test_big_data(self):
n = 200
count_fid = 2000
input_data = []
# fid_template = {"offer_id": "str", "market_sku": 555, "price": 444}
for i in range(n):
fid = []
for j in range(count_fid):
offer_id = ''.join(random.choices(string.ascii_uppercase + string.digits + string.ascii_lowercase, k=9))
market_sku = random.randint(1, 30)
price = random.randint(1, 30)
fid_template = {"offer_id": offer_id, "market_sku": market_sku, "price": price}
fid.append(fid_template)
input_data.append({"offers": fid})
print("start time test:")
t = Timer(functools.partial(get_unique_offers, input_data), globals=globals())
print(t.timeit(1)/1)
if __name__ == '__main__':
unittest.main()
| unlicense | -4,633,237,976,848,033,000 | 38.826087 | 120 | 0.537118 | false |
bschug/poe-loot-gen | uniques.py | 1 | 2853 | import requests
from collections import defaultdict
import sys
FATED_UNIQUES = {
'Amplification Rod',
'Cragfall',
'Death\'s Opus',
'Deidbellow',
'Doomfletch\'s Prism',
'Ezomyte Hold',
'Hrimburn',
'Hrimnor\'s Dirge',
'Kaltensoul',
'Kaom\'s Way',
'Karui Charge',
'Martyr\'s Crown',
'Ngamahu Tiki',
'Queen\'s Escape',
'Realm Ender',
'Shavronne\'s Gambit',
'Silverbough',
'The Cauteriser',
'The Gryphon',
'The Oak',
'The Signal Fire',
'The Tempest',
'Thirst for Horrors',
'Wall of Brambles',
'Voidheart'
}
def get_unique_prices(league):
unique_prices = defaultdict(lambda: 0)
get_unique_prices_from_url('http://poeninja.azureedge.net/api/Data/GetUniqueWeaponOverview', league, unique_prices)
get_unique_prices_from_url('http://poeninja.azureedge.net/api/Data/GetUniqueArmourOverview', league, unique_prices)
get_unique_prices_from_url('http://poeninja.azureedge.net/api/Data/GetUniqueAccessoryOverview', league, unique_prices)
get_unique_prices_from_url('http://poeninja.azureedge.net/api/Data/GetUniqueFlaskOverview', league, unique_prices)
return unique_prices
def get_unique_prices_from_url(url, league, unique_prices):
response = requests.get(url, {'league': league}).json()
for item in response['lines']:
if item['name'] in FATED_UNIQUES:
continue
unique_prices[item['baseType']] = max(unique_prices[item['baseType']], item['chaosValue'])
def build_filter_code(unique_prices):
worthless, mediocre, valuable, awesome = [], [], [], []
for k, v in unique_prices.items():
if v < 0.5:
worthless.append(k)
elif v < 2:
mediocre.append(k)
elif v < 15:
valuable.append(k)
else:
awesome.append(k)
code = """
# Top Tier Uniques (15c+)
Show
Rarity Unique
BaseType {}
SetBackgroundColor 175 78 17
SetTextColor 0 0 0
SetBorderColor 0 0 0
SetFontSize 45
PlayAlertSound 6 300
# Decent Uniques (2c+)
Show
Rarity Unique
BaseType {}
SetFontSize 45
SetBackgroundColor 70 35 14 220
SetBorderColor 0 0 0
PlayAlertSound 6 300
# Mediocre Uniques (~1c)
Show
Rarity Unique
BaseType {}
SetFontSize 38
# Worthless Uniques (< 2 alch)
Show
Rarity Unique
BaseType {}
SetFontSize 30
# Draw pink border around unknown Uniques
Show
Rarity Unique
SetBorderColor 255 100 255
""".format(
' '.join('"{}"'.format(x) for x in awesome),
' '.join('"{}"'.format(x) for x in valuable),
' '.join('"{}"'.format(x) for x in mediocre),
' '.join('"{}"'.format(x) for x in worthless),
)
return code
if __name__ == '__main__':
league = sys.argv[1]
print(build_filter_code(get_unique_prices(league)))
| mit | -7,418,833,182,215,221,000 | 24.702703 | 122 | 0.625657 | false |
gblanchard4/viamics | framework/modules/blast.py | 1 | 6151 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2010 - 2011, University of New Orleans
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# --
#Contains the process for running a viamics analysis using BLAST, using functions and classes from
#framework.tools.blast. This depends on blast databases being stored at constants.blastdb_dir, and having blastn
#blastn and makeblastdb executables on the path
#
#If the blastn or makeblastdb programs are throwing errors, one possible cause is spaces in the path to input
#or output files. I could not for the life of me figure this out (I think the blastn and makeblastdb programs just
#can't handle it), so I just stick underscores in the name the user gives. If Viamics is installed at say
#/home/username/Desktop/My bioinformatics folder/viamics, there could be a problem.
import os
import cPickle
from framework.tools.helper_functions import SerializeToFile, DeserializeFromFile
from framework.tools.logger import debug
from framework.tools import fasta
import framework.constants as c
import framework.tools.blast
import framework.tools.helper_functions as helper_functions
def _preprocess(p, request_dict):
#fasta.stripped specifies an open keyfile object, but all it does is
#"for line in keys" so a list of strings works here. Using a list avoids all
#the nonsense of sending another file from the client.
mode = request_dict.get("qa_mode")
try:
return fasta.fasta_qa_preprocess(
mode,
request_dict.get("data_file_path"),
request_dict.get("codes_primers"),#keyfile. see above
homopolymer_length = request_dict.get("homopolymer_length"))
except:
debug(helper_functions.formatExceptionInfo(), p.files.log_file)
raise
def _exec(p, request_dict):
p.set_analysis_type('blast')
p.threshold = request_dict.get('threshold_dict')
separator = request_dict['seperator']#sic
debug("storing separator: '%s'" % separator, p.files.log_file)
open(p.files.seperator_file_path, 'w').write(separator)
debug("storing DB name: '%s'" % request_dict['db_name'], p.files.log_file)
open(p.files.blast_db_name_path, 'w').write(request_dict['db_name'])
if p.threshold:
debug("storing confidence threshold", p.files.log_file)
with open(p.files.threshold_path,'w') as f:
f.write(cPickle.dumps(p.threshold))
#add length info to legend
num_seqs = helper_functions.get_number_of_lines(p.files.data_file_path) / 2
name = request_dict['db_name']
#run blast on data
blast_db = os.path.join(c.blastdb_dir,name,name)
debug("Extracting QA info", p.files.log_file)
cmt = open(p.files.data_comment_file_path,'w')
for line in open(p.files.data_file_path):
if line.startswith(';'):
cmt.write(line)
cmt.close()
debug(("running blast on %d sequences against database: %s " % (num_seqs, request_dict['db_name'])), p.files.log_file)
framework.tools.blast.run_blastn(p.files.data_file_path, p.files.blast_output_file_path, blast_db,num=1)
samples_dictionary(p)
samples = DeserializeFromFile(p.files.samples_serialized_file_path).keys()
if len(samples) == 0:
msg = 'error: samples dict contains no samples. perhaps no sequences in the query matched the datbase'
debug(msg,p.files.log_file)
raise ValueError(msg)
else:
open(p.files.all_unique_samples_file_path, 'w').write('\n'.join(samples) + '\n')
debug("%d unique sample names stored" % len(samples), p.files.log_file)
otu_library(p)
if hasattr(p,'threshold'):
separate_low_confidence(p)
def samples_dictionary(p):
debug("Computing sample dictionary", p.files.log_file)
db_name = open(p.files.blast_db_name_path).read()
legend_path = os.path.join(c.blastdb_dir,
db_name,db_name+c.blast_legend_file_extension)
samples_dict = framework.tools.blast.create_samples_dictionary(p.files.blast_output_file_path,
legend_path,
open(p.files.seperator_file_path).read(),
thresholds=p.threshold)
debug("Serializing samples dictionary object", p.files.log_file)
SerializeToFile(samples_dict, p.files.samples_serialized_file_path)
def otu_library(p):
debug("Generating OTU Library", p.files.log_file)
db_name = open(p.files.blast_db_name_path).read()
legend_path = os.path.join(c.blastdb_dir,
db_name,db_name+c.blast_legend_file_extension)
otu_library = framework.tools.blast.get_otu_library(p.files.blast_output_file_path,
legend_path,
open(p.files.seperator_file_path).read())
SerializeToFile(otu_library, p.files.otu_library_file_path)
def separate_low_confidence(p):
debug("Separating low confidence sequences", p.files.log_file)
separator = open(p.files.seperator_file_path).read()
lo_seqs = framework.tools.blast.low_confidence_seqs(open(p.files.data_file_path),
open(p.files.blast_output_file_path),
p.threshold,
separator)
with open(p.files.low_confidence_seqs_path,'w') as o:
for s in lo_seqs:
o.write(s)
def _module_functions(p, request_dict):
return {
'blast': {'func': samples_dictionary, 'desc': 'Samples dictionary'},
'blast': {'func': otu_library, 'desc': 'OTU library'}
}
def _sample_map_functions(p, request_dict):
return {}
| gpl-2.0 | 185,121,696,036,604,600 | 42.624113 | 122 | 0.63323 | false |
colonelqubit/libreconverter | libreconverter.py | 1 | 6539 | #!/usr/bin/python3
#
# Requires Python3
# *Please* make sure to use the version of Python included with
# your copy of LibreOffice.
#
# Convert spreadsheet to CSV file.
#
# Based on:
# PyODConverter (Python OpenDocument Converter) v1.0.0 - 2008-05-05
# Copyright (C) 2008 Mirko Nasato <[email protected]>
# Licensed under the GNU LGPL v2.1 - or any later version.
# http://www.gnu.org/licenses/lgpl-2.1.html
#
import os
import re
import loutils
import uno
from com.sun.star.task import ErrorCodeIOException
class LibreConverter:
"""
Spreadsheet converter class.
Converts spreadsheets to CSV files.
"""
def __init__(self, lorunner=None):
self.desktop = None
self.lorunner = None
def convert(self, inputFile, outputFile, verbose=False):
"""
Convert the input file (a spreadsheet) to a CSV file.
The input file name can contain a sheet specification to specify a particular sheet.
The sheet specification is either a number or a sheet name.
The sheet specification is appended to the file name separated by a colon
or an at sign: ":" or "@".
If the output file name contains a %d or %s format specifier, then all the sheets
in the input file are converted, otherwise only the first sheet is converted.
If the output file name contains a %d format specifier then the sheet number
is used when formatting the output file name.
The format can contain a width specifier (eg %02d).
If the output file name contains a %s specifier then the sheet name is used
when formatting the output file name.
"""
# Start openoffice if needed.
if not self.desktop:
if not self.lorunner:
self.lorunner = loutils.LORunner()
self.desktop = self.lorunner.connect()
# Check for sheet specification in input file name.
match = re.search(r'^(.*)[@:](.*)$', inputFile)
if os.path.exists(inputFile) or not match:
inputUrl = uno.systemPathToFileUrl(os.path.abspath(inputFile))
inputSheet = '1' # Convert fist sheet.
else:
inputUrl = uno.systemPathToFileUrl(os.path.abspath(match.group(1)))
inputSheet = match.group(2)
# NOTE:
# Sheet activation does not work properly when Hidden is specified.
# Although the sheet does become the active sheet, it's not the sheet that
# gets saved if the spreadsheet is loaded with Hidden=True.
#
# Removing Hidden=True doesn't seem to change anything: nothing appears
# on the screen regardless of the Hidden value.
#
# document = self.desktop.loadComponentFromURL(inputUrl, "_blank", 0, loutils.lo_properties(Hidden=True))
document = self.desktop.loadComponentFromURL(inputUrl, "_blank", 0, loutils.lo_properties())
try:
props = loutils.lo_properties(FilterName="Text - txt - csv (StarCalc)")
#
# Another useful property option:
# FilterOptions="59,34,0,1"
# 59 - Field separator (semicolon), this is the ascii value.
# 34 - Text delimiter (double quote), this is the ascii value.
# 0 - Character set (system).
# 1 - First line number to export.
#
# For more information see:
# http://wiki.services.openoffice.org/wiki/Documentation/DevGuide/Spreadsheets/Filter_Options
# To convert a particular sheet, the sheet needs to be active.
# To activate a sheet we need the spreadsheet-view, to get the spreadsheet-view
# we need the spreadsheet-controller, to get the spreadsheet-controller
# we need the spreadsheet-model.
#
# The spreadsheet-model interface is available from the document object.
# The spreadsheet-view interface is available from the controller.
#
controller = document.getCurrentController()
sheets = document.getSheets()
# If the output file name contains a %d or %s format specifier, convert all sheets.
# Use the sheet number if the format is %d, otherwise the sheet name.
dfmt = re.search(r'%[0-9]*d', outputFile)
sfmt = re.search(r'%s', outputFile)
if dfmt or sfmt:
i = 0
while i < sheets.getCount():
# Activate the sheet.
sheet = sheets.getByIndex(i)
controller.setActiveSheet(sheet)
# Create output file name.
if dfmt:
ofile = outputFile % (i+1)
else:
ofile = outputFile % sheet.getName().replace(' ', '_')
if verbose: print( " %s" % ofile)
# Save the sheet to the output file.
outputUrl = uno.systemPathToFileUrl(os.path.abspath(ofile))
document.storeToURL(outputUrl, props)
i += 1
else:
# Activate the sheet to be converted.
if re.search(r'^\d+$', inputSheet):
sheet = sheets.getByIndex(int(inputSheet)-1)
else:
sheet = sheets.getByName(inputSheet)
controller.setActiveSheet(sheet)
outputUrl = uno.systemPathToFileUrl(os.path.abspath(outputFile))
document.storeToURL(outputUrl, props)
finally:
if document: document.close(True)
if __name__ == "__main__":
from sys import argv
from os.path import isfile
if len(argv) == 2 and argv[1] == '--shutdown':
loutils.lo_shutdown_if_running()
else:
if len(argv) < 3 or len(argv) % 2 != 1:
print("USAGE:")
print(" python %s INPUT-FILE[:SHEET] OUTPUT-FILE ..." % argv[0])
print("OR")
print(" python %s --shutdown" % argv[0])
exit(255)
try:
i = 1
converter = LibreConverter()
while i+1 < len(argv):
print('%s => %s' % (argv[i], argv[i+1]))
converter.convert(argv[i], argv[i+1], True)
i += 2
except ErrorCodeIOException as exception:
print("ERROR! ErrorCodeIOException %d" % exception.ErrCode)
exit(1)
| lgpl-2.1 | 6,492,071,722,650,483,000 | 37.017442 | 114 | 0.577764 | false |
wait4pumpkin/tmall | solution/analysis/single_repeat.py | 1 | 5277 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import random
import glob
import os
import sys
import time
import math
import numpy
import pylab
from collections import Counter
from svmutil import *
N_MONTH = 4
N_DAY_PER_MONTH = 31
BASE_MONTH = 4
TYPE_LENGTH = 4
class User(object):
def __init__(self, id, info):
self.id = id;
self.brands = info.keys()
self.data = dict()
self.day = dict()
self.label = set()
for brandID in self.brands:
brand = info[brandID]
for month, day, action in brand:
p = (month - BASE_MONTH) * 12
if day > 10:
p += 4
elif day > 20:
p += 8
if action == 1:
if month >= BASE_MONTH + N_MONTH - 1:
self.label.add(brandID)
else:
if brandID not in self.data:
self.data[brandID] = 0
self.day[brandID] = []
self.data[brandID] += 1
self.day[brandID].append(day + (month - BASE_MONTH) * N_DAY_PER_MONTH)
self.data = sorted(self.data.items(), key=lambda e: e[1], reverse=True)
self.period_brand = set()
for brand, days in self.day.items():
days.sort()
wait = [days[idx+1] - days[idx] for idx in range(len(days)-1)]
repeat = [num for num in wait if num > 0]
if len(repeat) > 0:
if days[-1] < (N_MONTH - 2) * N_DAY_PER_MONTH:
if len(repeat) > 2 or sum(repeat) > 10:
self.period_brand.add(brand)
print repeat
else:
self.period_brand.add(brand)
print '!', repeat
def __str__(self):
return str(self.id) + ' ' + str(len(self.bands))
if __name__ == '__main__':
userInfo = dict()
with open('/home/pumpkin/Documents/project/tmall/dataset/t_alibaba_data.csv', 'rb') as csvfile:
user_table = dict()
brand_table = dict()
user_counter = 0
brand_counter = 0
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
userID, brandID, actionType, month, day = [int(field) for field in row]
if not userID in user_table:
user_table[userID] = user_counter
user_counter += 1
if not brandID in brand_table:
brand_table[brandID] = brand_counter
brand_counter += 1
userID = user_table[userID]
brandID = brand_table[brandID]
if not userID in userInfo:
userInfo[userID] = dict()
user = userInfo[userID]
if brandID not in user:
user[brandID] = []
if month in (4, 5, 6):
day = day - 14
else:
day = day - 15
if day <= 0:
month -= 1
day += 31
band = user[brandID]
band.append((month, day, actionType))
users = []
for (userID, info) in userInfo.iteritems():
users.append(User(userID, info))
counter = 0
for user in users:
if len(user.data) <= 0:
continue
if user.data[0][1] > 1:
counter += 1
print counter, '{:.2f}%'.format(float(counter) / len(users) * 100)
# counter = 0
# for user in users:
# if len(user.data) <= 0 or user.data[0][1] < 2:
# continue
# flag = False
# for brand, time in user.data:
# if time < 2:
# break
# day = sorted(user.day[brand])
# wait = [day[idx+1] - day[idx] for idx in range(len(day)-1)]
# if len([num for num in wait if num > 0]) > 0:
# flag = True
# repeat = [num for num in wait if num > 0]
# if day[-1] < (N_MONTH - 1) * N_DAY_PER_MONTH:
# if len(repeat) < 3 and sum(repeat) < 10:
# flag = False
# else:
# print repeat
# if flag:
# counter += 1
# print '================================================================'
# print counter, '{:.2f}%'.format(float(counter) / len(users) * 100)
pBands = []
bBands = []
hitBands = []
for user in users:
bBands.append(len(user.label))
hit = 0
total = len(user.period_brand)
for predict in user.period_brand:
if predict in user.label:
hit += 1
hitBands.append(hit)
pBands.append(total)
print sum(hitBands), ' ', sum(pBands), ' ', sum(bBands)
precision = float(sum(hitBands)) / sum(pBands) if not sum(pBands) == 0 else 0
recall = float(sum(hitBands)) / sum(bBands) if not sum(bBands) == 0 else 0
f1 = (2 * precision * recall) / (precision + recall) if not precision + recall == 0 else 0
print 'All: %.02f%% (Precision) %.02f%% (Recall) %.02f%% (F1)' % (precision * 100, recall * 100, f1 * 100) | mit | -2,505,034,783,997,640,000 | 29.686047 | 111 | 0.465795 | false |
listyque/TACTIC-Handler | thlib/tactic_server.py | 1 | 2467 | # tactic_api_client.py
# Start here to run client for tactic api
import sys
import datetime
from thlib.side.Qt import QtWidgets as QtGui
from thlib.side.Qt import QtCore as QtCore
from thlib.side.Qt import QtNetwork as QtNetwork
import main_standalone
import thlib.global_functions as gf
from thlib.environment import env_mode, env_inst, dl
import thlib.ui_classes.ui_tactic_server_classes as ui_tactic_server_classes
class QSingleApplication(QtGui.QApplication):
def start_single(self, main_window):
self.main_window = main_window
# Creating local Socket
self.socket = QtNetwork.QLocalSocket()
# socket Actions
self.socket.connected.connect(self.connected_to_server)
self.socket.error.connect(self.start_app)
# Trying to connect to existing, previeous executed server
self.socket.connectToServer(self.applicationName(), QtCore.QIODevice.ReadOnly)
def connected_to_server(self):
sys.exit()
def start_app(self):
self.server = QtNetwork.QLocalServer()
listen = self.server.listen(self.applicationName())
if listen:
self.server.newConnection.connect(self.handle_new_connections)
else:
QtGui.QMessageBox.critical(None, self.tr('Error'), self.tr('Error listening the socket.'))
self.main_window.create_ui()
def handle_new_connections(self):
print('Checking for the Server is Up')
incom_socket = self.server.nextPendingConnection()
incom_socket.readyRead.connect(lambda: self.readSocket(incom_socket))
def readSocket(self, new_socket):
new_socket.waitForReadyRead(20000)
new_socket.readAll()
@gf.catch_error
def startup():
env_inst.ui_super = QSingleApplication(sys.argv)
env_inst.ui_super.setApplicationName('TacticHandler_TacticApiServer')
if env_mode.qt5:
env_inst.ui_super.setStyle('fusion')
else:
env_inst.ui_super.setStyle('plastique')
env_mode.set_mode('api_server')
date_str = datetime.date.strftime(dl.session_start, '%d_%m_%Y_%H_%M_%S')
stdout_path = u'{0}/log/api_server_stdout_{1}.log'.format(env_mode.get_current_path(), date_str)
sys.stdout = open(stdout_path, 'w')
main_standalone.setPaletteFromDct(main_standalone.palette)
env_inst.ui_super.start_single(ui_tactic_server_classes.Ui_TacticServer())
sys.exit(env_inst.ui_super.exec_())
if __name__ == '__main__':
startup()
| epl-1.0 | 4,327,361,219,604,347,400 | 29.45679 | 102 | 0.691528 | false |
srio/shadow3-scripts | HIGHLIGHTS/occupation.py | 1 | 3634 | from orangecontrib.comsyl.util.CompactAFReader import CompactAFReader
# from CompactAFReader import CompactAFReader
import numpy
from srxraylib.plot.gol import plot_image, plot
# from plot_color import plot_with_transparency_one
import pylab as plt
from matplotlib.colors import Normalize, ListedColormap
import matplotlib.patches as patches
def convert_to_h5(file_from,file_to):
af = CompactAFReader.initialize_from_file(file_from)
af.write_h5(file_to)
print("File written to disk: ",file_to)
if __name__ == "__main__":
# filename_ebs = "/scisoft/data/srio/COMSYL/ID16/id16s_ebs_u18_1400mm_1h_new_s1.0.npy"
# filename_ebs = "/scisoft/data/srio/COMSYL/CALCULATIONS/cs_new_u18_2m_1h_s2.5.h5" # NOT GOOD
# convert_to_h5("/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz",
# "cs_new_u18_2m_1h_s2.5.h5")
# convert_to_h5("/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_low_beta_u18_2m_1h_s6.5.npy",
# "cl_low_beta_u18_2m_1h_s6.5.h5")
# filename_ebs = "cs_new_u18_2m_1h_s2.5.h5"
# filename_ebs = "cl_low_beta_u18_2m_1h_s6.5.h5"
# filename_ebs = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/new_u18_2m_1h_ts_s2.0.npz"
filename_ebs = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz" # OK EBS
filename_lb = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_low_beta_u18_2m_1h_s6.5.npy" # OK LB
filename_hb = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_high_beta_u18_2m_1h_s2.0.npy"
#
# load CSD
#
af_ebs = CompactAFReader.initialize_from_file(filename_ebs)
cumulated_occupation_ebs = af_ebs.cumulated_occupation_array()
occupation_ebs = af_ebs.occupation_array()
af_lb = CompactAFReader.initialize_from_file(filename_lb)
cumulated_occupation_lb = af_lb.cumulated_occupation_array()
occupation_lb = af_lb.occupation_array()
af_hb = CompactAFReader.initialize_from_file(filename_hb)
cumulated_occupation_hb = af_hb.cumulated_occupation_array()
occupation_hb = af_hb.occupation_array()
#
print("Coherent fraction EBS: ",cumulated_occupation_ebs[0])
print("Coherent fraction LB: ",cumulated_occupation_lb[0])
print("Coherent fraction HB: ",cumulated_occupation_hb[0])
extensions = ["ebs","lb","hb"]
data = [cumulated_occupation_ebs,cumulated_occupation_lb,cumulated_occupation_hb]
data_occ = [occupation_ebs,occupation_lb,occupation_hb]
plot(numpy.arange(cumulated_occupation_ebs.size),cumulated_occupation_ebs,
numpy.arange(cumulated_occupation_lb.size),cumulated_occupation_lb,
numpy.arange(cumulated_occupation_hb.size),cumulated_occupation_hb,
legend=extensions)
for i,extension in enumerate(extensions):
f = open("cumulated_occupation_%s.dat"%extension,'w')
data_i = data[i]
for j in range(data_i.size):
f.write("%d %g \n"%(j,data_i[j]))
f.close()
print("File written to disk: cumulated_occupation_%s.dat"%extension)
f = open("occupation_%s.dat"%extension,'w')
data_i = data_occ[i]
for j in range(data_i.size):
f.write("%d %g \n"%(j,data_i[j]))
f.close()
print("File written to disk: occupation_%s.dat"%extension)
#
# get indices
#
# first propagate a few modes only to check there are no errors
# afp = AFpropagated.propagate(af,distance=distance,index_max=1,zoom=zoom)
| mit | 2,380,184,108,273,730,000 | 34.281553 | 128 | 0.682168 | false |
erilyth/PyGame-Learning-Environment | ple/games/flappybird/__init__.py | 1 | 13533 | import os
import sys
import numpy as np
import pygame
from pygame.constants import K_w
from .. import base
class BirdPlayer(pygame.sprite.Sprite):
def __init__(self,
SCREEN_WIDTH, SCREEN_HEIGHT, init_pos,
image_assets, rng, color="red", scale=1.0):
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.image_order = [0, 1, 2, 1]
#done image stuff
pygame.sprite.Sprite.__init__(self)
self.image_assets = image_assets
self.init(init_pos, color)
self.height = self.image.get_height()
self.scale = scale
#all in terms of y
self.vel = 0
self.FLAP_POWER = 9*self.scale
self.MAX_DROP_SPEED = 10.0
self.GRAVITY = 1.0*self.scale
self.rng = rng
self._oscillateStartPos() #makes the direction and position random
self.rect.center = (self.pos_x, self.pos_y) #could be done better
def init(self, init_pos, color):
#set up the surface we draw the bird too
self.flapped = True #start off w/ a flap
self.current_image = 0
self.color = color
self.image = self.image_assets[self.color][self.current_image]
self.rect = self.image.get_rect()
self.thrust_time = 0.0
self.tick = 0
self.pos_x = init_pos[0]
self.pos_y = init_pos[1]
def _oscillateStartPos(self):
offset = 8*np.sin( self.rng.rand() * np.pi )
self.pos_y += offset
def flap(self):
if self.pos_y > -2.0*self.image.get_height():
self.vel = 0.0
self.flapped = True
def update(self, dt):
self.tick += 1
#image cycle
if (self.tick + 1) % 15 == 0:
self.current_image += 1
if self.current_image >= 3:
self.current_image = 0
#set the image to draw with.
self.image = self.image_assets[self.color][self.current_image]
self.rect = self.image.get_rect()
if self.vel < self.MAX_DROP_SPEED and self.thrust_time == 0.0:
self.vel += self.GRAVITY
#the whole point is to spread this out over the same time it takes in 30fps.
if self.thrust_time+dt <= (1.0/30.0) and self.flapped:
self.thrust_time += dt
self.vel += -1.0*self.FLAP_POWER
else:
self.thrust_time = 0.0
self.flapped = False
self.pos_y += self.vel
self.rect.center = (self.pos_x, self.pos_y)
def draw(self, screen):
screen.blit(self.image, self.rect.center)
class Pipe(pygame.sprite.Sprite):
def __init__(self,
SCREEN_WIDTH, SCREEN_HEIGHT, gap_start, gap_size, image_assets, scale,
offset=0, color="green"):
self.speed = 4.0*scale
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.image_assets = image_assets
#done image stuff
self.width = self.image_assets["green"]["lower"].get_width()
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((self.width, self.SCREEN_HEIGHT))
self.image.set_colorkey((0,0,0))
self.init(gap_start, gap_size, offset, color)
def init(self, gap_start, gap_size, offset, color):
self.image.fill((0,0,0))
self.gap_start = gap_start
self.x = self.SCREEN_WIDTH+self.width+offset
self.lower_pipe = self.image_assets[color]["lower"]
self.upper_pipe = self.image_assets[color]["upper"]
top_bottom = gap_start-self.upper_pipe.get_height()
bottom_top = gap_start+gap_size
self.image.blit(self.upper_pipe, (0, top_bottom ))
self.image.blit(self.lower_pipe, (0, bottom_top ))
self.rect = self.image.get_rect()
self.rect.center = (self.x, self.SCREEN_HEIGHT/2)
def update(self, dt):
self.x -= self.speed
self.rect.center = (self.x, self.SCREEN_HEIGHT/2)
class Backdrop():
def __init__(self, SCREEN_WIDTH, SCREEN_HEIGHT, image_background, image_base, scale):
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.background_image = image_background
self.base_image = image_base
self.x = 0
self.speed = 4.0*scale
self.max_move = self.base_image.get_width() - self.background_image.get_width()
def update_draw_base(self, screen, dt):
#the extra is on the right
if self.x > -1*self.max_move:
self.x -= self.speed
else:
self.x = 0
screen.blit(self.base_image, (self.x, self.SCREEN_HEIGHT*0.79))
def draw_background(self, screen):
screen.blit(self.background_image, (0,0))
class FlappyBird(base.Game):
"""
Used physics values from sourabhv's `clone`_.
.. _clone: https://github.com/sourabhv/FlapPyBird
Parameters
----------
width : int (default: 288)
Screen width. Consistent gameplay is not promised for different widths or heights, therefore the width and height should not be altered.
height : inti (default: 512)
Screen height.
pipe_gap : int (default: 100)
The gap in pixels left between the top and bottom pipes.
"""
def __init__(self, width=288, height=512, pipe_gap=100):
actions = {
"up": K_w
}
fps = 30
base.Game.__init__(self, width, height, actions=actions)
self.scale = 30.0/fps
self.allowed_fps = 30 #restrict the fps
self.pipe_gap = 100
self.pipe_color = "red"
self.images = {}
#so we can preload images
pygame.display.set_mode((1,1), pygame.NOFRAME)
self._dir_ = os.path.dirname(os.path.abspath(__file__))
self._asset_dir = os.path.join( self._dir_, "assets/" )
self._load_images()
self.pipe_offsets = [0, self.width*0.5, self.width]
self.init_pos = (
int( self.width * 0.2),
int( self.height / 2 )
)
self.pipe_min = int(self.pipe_gap/4)
self.pipe_max = int(self.height*0.79*0.6 - self.pipe_gap/2)
self.backdrop = None
self.player = None
self.pipe_group = None
def _load_images(self):
#preload and convert all the images so its faster when we reset
self.images["player"] = {}
for c in ["red", "blue", "yellow"]:
image_assets = [
os.path.join( self._asset_dir, "%sbird-upflap.png" % c ),
os.path.join( self._asset_dir, "%sbird-midflap.png" % c ),
os.path.join( self._asset_dir, "%sbird-downflap.png" % c ),
]
self.images["player"][c] = [ pygame.image.load(im).convert_alpha() for im in image_assets ]
self.images["background"] = {}
for b in ["day", "night"]:
path = os.path.join( self._asset_dir, "background-%s.png" % b )
self.images["background"][b] = pygame.image.load(path).convert()
self.images["pipes"] = {}
for c in ["red", "green"]:
path = os.path.join( self._asset_dir, "pipe-%s.png" % c )
self.images["pipes"][c] = {}
self.images["pipes"][c]["lower"] = pygame.image.load(path).convert_alpha()
self.images["pipes"][c]["upper"] = pygame.transform.rotate(self.images["pipes"][c]["lower"], 180)
path = os.path.join( self._asset_dir, "base.png" )
self.images["base"] = pygame.image.load(path).convert()
def init(self):
if self.backdrop is None:
self.backdrop = Backdrop(
self.width,
self.height,
self.images["background"]["day"],
self.images["base"],
self.scale
)
if self.player is None:
self.player = BirdPlayer(
self.width,
self.height,
self.init_pos,
self.images["player"],
self.rng,
color="red",
scale=self.scale
)
if self.pipe_group is None:
self.pipe_group = pygame.sprite.Group([
self._generatePipes(offset=-75),
self._generatePipes(offset=-75+self.width/2),
self._generatePipes(offset=-75+self.width*1.5)
])
color = self.rng.choice(["day", "night"])
self.backdrop.background_image = self.images["background"][color]
#instead of recreating
color = self.rng.choice(["red", "blue", "yellow"])
self.player.init(self.init_pos, color)
self.pipe_color = self.rng.choice(["red", "green"])
for i,p in enumerate(self.pipe_group):
self._generatePipes(offset=self.pipe_offsets[i], pipe=p)
self.score = 0.0
self.lives = 1
self.tick = 0
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player y position.
* players velocity.
* next pipe distance to player
* next pipe top y position
* next pipe bottom y position
* next next pipe distance to player
* next next pipe top y position
* next next pipe bottom y position
See code for structure.
"""
pipes = []
for p in self.pipe_group:
if p.x > self.player.pos_x:
pipes.append((p, p.x - self.player.pos_x))
sorted(pipes, key=lambda p: p[1])
next_pipe = pipes[1][0]
next_next_pipe = pipes[0][0]
if next_next_pipe.x < next_pipe.x:
next_pipe, next_next_pipe = next_next_pipe, next_pipe
state = {
"player_y": self.player.pos_y,
"player_vel": self.player.vel,
"next_pipe_dist_to_player": next_pipe.x - self.player.pos_x,
"next_pipe_top_y": next_pipe.gap_start,
"next_pipe_bottom_y": next_pipe.gap_start+self.pipe_gap,
"next_next_pipe_dist_to_player": next_next_pipe.x - self.player.pos_x,
"next_next_pipe_top_y": next_next_pipe.gap_start,
"next_next_pipe_bottom_y": next_next_pipe.gap_start+self.pipe_gap
}
return state
def getScore(self):
return self.score
def _generatePipes(self, offset=0, pipe=None):
start_gap = self.rng.random_integers(
self.pipe_min,
self.pipe_max
)
if pipe == None:
pipe = Pipe(
self.width,
self.height,
start_gap,
self.pipe_gap,
self.images["pipes"],
self.scale,
color=self.pipe_color,
offset=offset
)
return pipe
else:
pipe.init(start_gap, self.pipe_gap, offset, self.pipe_color)
def _handle_player_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions['up']:
self.player.flap()
def game_over(self):
return self.lives <= 0
def step(self, dt):
self.tick += 1
dt = dt / 1000.0
self.score += self.rewards["tick"]
#handle player movement
self._handle_player_events()
for p in self.pipe_group:
hit = pygame.sprite.spritecollide(self.player, self.pipe_group, False)
for h in hit: #do check to see if its within the gap.
top_pipe_check = ((self.player.pos_y - self.player.height/2) <= h.gap_start)
bot_pipe_check = ((self.player.pos_y + self.player.height) > h.gap_start+self.pipe_gap)
if top_pipe_check:
self.lives -= 1
if bot_pipe_check:
self.lives -= 1
#is it past the player?
if (p.x - p.width/2) <= self.player.pos_x < (p.x - p.width/2 + 4):
self.score += self.rewards["positive"]
#is out out of the screen?
if p.x < -p.width:
self._generatePipes(offset=self.width*0.2, pipe=p)
#fell on the ground
if self.player.pos_y >= 0.79*self.height - self.player.height:
self.lives -= 1
#went above the screen
if self.player.pos_y < -self.player.height:
self.lives -= 1
self.player.update(dt)
self.pipe_group.update(dt)
if self.lives <= 0:
self.score += self.rewards["loss"]
self.backdrop.draw_background(self.screen)
self.pipe_group.draw(self.screen)
self.backdrop.update_draw_base(self.screen, dt)
self.player.draw(self.screen)
| mit | -4,636,007,687,599,830,000 | 30.767606 | 144 | 0.522205 | false |
12425/pac-maker | pac-maker.py | 1 | 5031 | #!/usr/bin/env python3
# vim: fileencoding=utf-8
import os
import re
from base64 import standard_b64decode as b64decode
from os.path import dirname, isfile, expanduser
from configparser import ConfigParser
from urllib.request import urlopen
HOST_PAT = re.compile(r'^[\w-]+(\.[\w-]+)+$')
PORT_PAT = re.compile(r':\d+$')
def fetch_list(path, decode=False):
if path.startswith('http'):
with urlopen(path) as res:
content = res.read()
else:
with open(expanduser(path), 'rb') as f:
content = f.read()
if decode:
content = b64decode(content)
return content.decode('utf8')
def parse_list(content, gfw=True):
exc = set()
inc = set()
for line in content.split('\n'):
line = line.strip()
if not line:
continue
add_line(line, gfw, inc, exc)
inc -= exc
return inc, exc
def merge_list(inc1, exc1, inc2, exc2):
inc = ((inc1 - exc1) | inc2) - exc2
exc = (exc1 - inc2) | exc2
inc -= exc
return inc, exc
def add_line(line, gfw, inc, exc):
if line.startswith('!'):
return
negative = False
if line.startswith('@@'):
negative = True
line = line[2:]
if line.startswith('||'):
parse_double_pipe(line[2:], negative, gfw, inc, exc)
elif line.startswith('|'):
parse_single_pipe(line[1:], negative, gfw, inc, exc)
else:
parse_normal(line, negative, gfw, inc, exc)
def parse_double_pipe(line, negative, gfw, inc, exc):
line = line.replace('*', '')
if line.startswith('.'):
line = line[1:]
if line.endswith('.^'):
return
if line.endswith('/') or line.endswith('^') or line.endswith('.'):
line = line[:-1]
if '..' in line:
return
if not gfw:
if '/' in line or '^' in line or '$' in line:
return
if PORT_PAT.search(line):
return
if HOST_PAT.match(line):
if negative:
exc.add(line)
else:
inc.add(line)
return
print('|| Format not recognized:', line)
def parse_single_pipe(line, negative, gfw, inc, exc):
if line.startswith('http://'):
line = line[7:]
elif line.startswith('https://'):
line = line[8:]
if not gfw:
if '$' in line:
return
line = line.replace('*', '')
if line.startswith('/') or '..' in line:
return
if line.startswith('.'):
line = line[1:]
if line.endswith('.'):
line = line[:-1]
if line.endswith('/') or line.endswith('^'):
line = line[:-1]
if gfw:
line = line.split('/', 1)[0]
else:
if '/' in line:
return
if '.' not in line:
return
if HOST_PAT.match(line):
if negative:
exc.add(line)
else:
inc.add(line)
return
print('| Format not recognized:', line)
def parse_normal(line, negative, gfw, inc, exc):
line = line.replace('*', '')
if line.startswith('.'):
line = line[1:]
if line.endswith('/') or line.endswith('^'):
line = line[:-1]
elif line.endswith('%2F'):
line = line[:-3]
if line.startswith('?') or '&' in line:
return
if gfw:
line = line.split('/', 1)[0]
else:
if line.endswith('.'):
line = line[:-1]
if '/' in line or '#' in line or '$' in line or '?' in line:
return
if HOST_PAT.match(line):
if negative:
exc.add(line)
else:
inc.add(line)
return
if line == 'http:':
return
if line.startswith('[AutoProxy ') or line.startswith('[Adblock Plus '):
return
if '.' not in line:
return
if PORT_PAT.search(line):
return
print('Format not recognized:', line)
def generate_pac_file(adblist, gfwlist):
global conf
pacfile = next(iter(conf['pac_file']))
with open('pac.js', encoding='utf8') as fi:
pac = fi.read()
pac = pac.replace('$ADBLIST', dict_to_object(adblist))
pac = pac.replace('$GFWLIST', dict_to_object(gfwlist))
with open(expanduser(pacfile), 'w', encoding='utf8') as fo:
fo.write(pac)
print(pacfile, 'generated.')
def dict_to_object(l):
return ',\n '.join(('"%s":1' % x for x in l))
def load_conf(ini):
if not isfile(ini):
L.error('Config file does not exist: %s', ini)
return
conf = ConfigParser(delimiters=('='), allow_no_value=True)
conf.read(ini, 'utf8')
return dict(conf)
if __name__ == '__main__':
os.chdir(dirname(__file__))
global conf
conf = load_conf('conf-pac-maker.ini')
if not conf:
exit()
# gfwlist
inc = set()
exc = set()
for f in conf['gfwlist'].keys():
l = fetch_list(f, decode=True)
inc2, exc2 = parse_list(l, True)
inc, exc = merge_list(inc, exc, inc2, exc2)
for f in conf['my_gfwlist'].keys():
l = fetch_list(f)
inc2, exc2 = parse_list(l, True)
inc, exc = merge_list(inc, exc, inc2, exc2)
gfwlist = inc - exc
# adblocklist
inc.clear()
exc.clear()
for f in conf['adblocklist'].keys():
l = fetch_list(f)
inc2, exc2 = parse_list(l, False)
inc, exc = merge_list(inc, exc, inc2, exc2)
for f in conf['my_adblocklist'].keys():
l = fetch_list(f)
inc2, exc2 = parse_list(l, False)
inc, exc = merge_list(inc, exc, inc2, exc2)
adblist = inc - exc
generate_pac_file(adblist, gfwlist - adblist)
| bsd-3-clause | -103,067,667,032,438,510 | 24.538071 | 73 | 0.595309 | false |
mjs/juju | acceptancetests/update_lxc_cache.py | 1 | 7813 | #!/usr/bin/python
"""Update the lxc 'download' template cache for hosts on closed networks."""
from __future__ import print_function
from argparse import ArgumentParser
from collections import namedtuple
import errno
import os
import sys
import traceback
import shutil
import subprocess
import urllib2
SITE = 'https://images.linuxcontainers.org'
INDEX_PATH = 'meta/1.0'
INDEX = 'index-system'
ROOTFS = 'rootfs.tar.xz'
META = 'meta.tar.xz'
LXC_CACHE = '/var/cache/lxc/download'
System = namedtuple(
'System', ['dist', 'release', 'arch', 'variant', 'version', 'path'])
PUT_SCRIPT = """\
scp {rootfs_path} {meta_path} {user_host}:~/
"""
INSTALL_SCRIPT = """\
ssh {user_host} bash <<"EOT"
sudo mkdir -p {lxc_cache}
sudo mv ~/{rootfs} ~/{meta} {lxc_cache}
sudo chown -R root:root {lxc_cache}
sudo tar -C {lxc_cache} -xf {lxc_cache}/meta.tar.xz
EOT
"""
class LxcCache:
"""Manage the LXC download template cache."""
def __init__(self, workspace, verbose=False, dry_run=False):
"""Set the workspace for the local cache."""
self.workspace = os.path.abspath(workspace)
self.verbose = verbose
self.dry_run = dry_run
local_path = os.path.join(self.workspace, INDEX_PATH, INDEX)
self.systems, ignore = self.init_systems(local_path)
def init_systems(self, location):
"""Return a tuple of the dict of lxc Systems and the source data.
A System has these attributes: 'dist', 'release', 'arch', 'variant',
'version', and 'path'. The dict keys are a tuple of
(dist, release, arch, variant).
"""
systems = {}
if location.startswith('http'):
request = urllib2.Request(location)
response = urllib2.urlopen(request)
data = response.read()
else:
try:
with open(location) as f:
data = f.read()
except IOError as e:
if e.errno == errno.ENOENT:
if self.verbose:
print('Local cache is empty.')
return systems, None
for line in data.splitlines():
system = System(*line.split(';'))
key = (system.dist, system.release, system.arch, system.variant)
systems[key] = system
return systems, data
def get_updates(self, dist, release, arch, variant):
"""Return a tuple of the new system and the source data that match.
The new system and source data will be None when there are
no updates. The dist, release, arch, and variant args identify the
system to return.
"""
key = (dist, release, arch, variant)
old_system = self.systems.get(key)
url = '%s/%s/%s' % (SITE, INDEX_PATH, INDEX)
new_systems, data = self.init_systems(url)
new_system = new_systems[key]
if not old_system or new_system.version > old_system.version:
if self.verbose:
print('Found new version for %s' % str(key))
print(new_system.version)
return new_system, data
if self.verbose:
print('Version is current for %s' % str(key))
print(old_system.version)
return None, None
def get_lxc_data(self, system):
"""Download the system image and meta data.
Return a tuple of the image and meta data paths.
"""
image_path = os.path.join(self.workspace, system.path[1:])
if not self.dry_run:
if self.verbose:
print('creating %s' % image_path)
if not os.path.isdir(image_path):
os.makedirs(image_path)
rootfs_path = os.path.join(image_path, ROOTFS)
rootfs_url = '%s%s%s' % (SITE, system.path, ROOTFS)
self.download(rootfs_url, rootfs_path)
meta_path = os.path.join(image_path, META)
meta_url = '%s%s%s' % (SITE, system.path, META)
self.download(meta_url, meta_path)
return rootfs_path, meta_path
def download(self, location, path):
"""Download a large binary from location to the specified path."""
chunk = 16 * 1024
if not self.dry_run:
request = urllib2.Request(location)
response = urllib2.urlopen(request)
if response.getcode() == 200:
with open(path, 'wb') as f:
shutil.copyfileobj(response, f, chunk)
if self.verbose:
print('Downloaded %s' % location)
def put_lxc_data(self, user_host, system, rootfs_path, meta_path):
"""Install the lxc image and meta data on the host.
The user on the host must have password-less sudo.
"""
lxc_cache = os.path.join(
LXC_CACHE, system.dist, system.release, system.arch,
system.variant)
put_script = PUT_SCRIPT.format(
user_host=user_host, rootfs_path=rootfs_path, meta_path=meta_path)
if not self.dry_run:
subprocess.check_call([put_script], shell=True)
if self.verbose:
print("Uploaded %s and %s" % (ROOTFS, META))
install_script = INSTALL_SCRIPT.format(
user_host=user_host, lxc_cache=lxc_cache, rootfs=ROOTFS, meta=META)
if not self.dry_run:
subprocess.check_call([install_script], shell=True)
if self.verbose:
print("Installed %s and %s" % (ROOTFS, META))
def save_index(self, data):
"Save the (current) index data for future calls to get_updates()."
index_dir = os.path.join(self.workspace, INDEX_PATH)
if not os.path.isdir(index_dir):
os.makedirs(index_dir)
index_path = os.path.join(self.workspace, INDEX_PATH, INDEX)
with open(index_path, 'w') as f:
f.write(data)
if self.verbose:
print('saved index: %s' % INDEX)
def parse_args(argv=None):
"""Return the argument parser for this program."""
parser = ArgumentParser(
"Update a remote host's download lxc template cache.")
parser.add_argument(
'-d', '--dry-run', action='store_true', default=False,
help='Do not make changes.')
parser.add_argument(
'-v', '--verbose', action='store_true', default=False,
help='Increase verbosity.')
parser.add_argument(
'--dist', default="ubuntu", help="The distribution to update.")
parser.add_argument(
'--variant', default="default", help="The variant to update.")
parser.add_argument(
'user_host', help='The user@host to update.')
parser.add_argument(
'release', help='The release to update.')
parser.add_argument(
'arch', help='The architecture of the remote host')
parser.add_argument(
'workspace', help='The path to the local dir to stage the update.')
args = parser.parse_args(argv)
return args
def main(argv):
"""Update the lxc download template cache for hosts on closed networks."""
args = parse_args(argv)
try:
lxc_cache = LxcCache(
args.workspace, verbose=args.verbose, dry_run=args.dry_run)
new_system, data = lxc_cache.get_updates(
args.dist, args.release, args.arch, args.variant)
if new_system:
rootfs_path, meta_path = lxc_cache.get_lxc_data(new_system)
lxc_cache.put_lxc_data(
args.user_host, new_system, rootfs_path, meta_path)
lxc_cache.save_index(data)
except Exception as e:
print(e)
print(getattr(e, 'output', ''))
if args.verbose:
traceback.print_tb(sys.exc_info()[2])
return 2
if args.verbose:
print("Done.")
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| agpl-3.0 | -5,232,036,418,743,799,000 | 34.83945 | 79 | 0.588634 | false |
fjacob21/MAX | service/src/features/tv/eg_tv_feature.py | 1 | 1266 | from eg_networksender import Send
class eg_tv_feature(object):
def __init__(self, device):
self._device = device
@property
def name(self):
return 'tv'
@property
def version(self):
return 1
@property
def description(self):
return "Control TV using evenghost receiver"
@property
def functions(self):
return ['open', 'close', 'state']
def execute(self, cmd, params):
if cmd == 'open':
return self.open(params)
if cmd == 'close':
return self.close(params)
if cmd == 'state':
return self.state(params)
def open(self, params):
return {'device':self._device.json, 'feature':self.name, 'result': Send('OpenTV', self._device.ip)}
def close(self, params):
return {'device':self._device.json, 'feature':self.name, 'result': Send('CloseTV', self._device.ip)}
def state(self, params):
if self._device.is_online()['isonline'] == False:
return {'device':self._device.json, 'feature':self.name, 'result': True, 'state': 0}
result = Send('GetState', self._device.ip)
return {'device':self._device.json, 'feature':self.name, 'result': Send('GetState', self._device.ip)}
| mit | 1,546,015,063,897,016,300 | 29.142857 | 109 | 0.587678 | false |
mabuchilab/QNET | tests/algebra/test_substitute.py | 1 | 5025 | from sympy import symbols
import pytest
from qnet.algebra.core.abstract_algebra import substitute
from qnet.algebra.core.exceptions import BasisNotSetError
from qnet.algebra.core.matrix_algebra import Matrix
from qnet.algebra.core.operator_algebra import (
IdentityOperator, II, OperatorSymbol)
from qnet.algebra.library.fock_operators import Destroy
from qnet.algebra.core.hilbert_space_algebra import LocalSpace
@pytest.fixture
def H_JC():
hil_a = LocalSpace('A')
hil_b = LocalSpace('B')
a = Destroy(hs=hil_a)
a_dag = a.dag()
b = Destroy(hs=hil_b)
b_dag = b.dag()
omega_a, omega_b, g = symbols('omega_a, omega_b, g')
H = (omega_a * a_dag * a + omega_b * b_dag * b +
2 * g * (a_dag * b + b_dag * a))
return H
def test_substitute_basis(H_JC):
""""Test that we can assign an expression's Hilbert space a basis"""
H = H_JC
with pytest.raises(BasisNotSetError):
H.space.dimension
hs_mapping = {
LocalSpace('A'): LocalSpace('A', basis=('g', 'e')),
LocalSpace('B'): LocalSpace('B', dimension=10),
}
H2 = H.substitute(hs_mapping)
assert H2.space.dimension == 20
H2 = substitute(H, hs_mapping)
assert H2.space.dimension == 20
def test_substitute_numvals(H_JC):
"""Test that we can substitute in numbers for scalar coefficients"""
omega_a, omega_b, g = symbols('omega_a, omega_b, g')
num_vals = {
omega_a: 0.2,
omega_b: 0,
g: 1,
}
hil_a = LocalSpace('A')
hil_b = LocalSpace('B')
a = Destroy(hs=hil_a)
a_dag = a.dag()
b = Destroy(hs=hil_b)
b_dag = b.dag()
H2_expected = 0.2 * a_dag * a + 2 * (a_dag * b + b_dag * a)
H2 = H_JC.substitute(num_vals)
assert H2 == H2_expected
H2 = substitute(H_JC, num_vals)
assert H2 == H2_expected
def test_substitute_str(H_JC):
"""Test that we can substitute e.g. label strings"""
H2 = H_JC.substitute({'A': '1', 'B': '2'})
hs_mapping = {
LocalSpace('A'): LocalSpace('1'),
LocalSpace('B'): LocalSpace('2'),
}
assert H2 == H_JC.substitute(hs_mapping)
def test_substitute_sympy_formula(H_JC):
"""Test that we can replace sympy symbols with other sympy formulas"""
omega_a, omega_b, g = symbols('omega_a, omega_b, g')
Delta_a, Delta_b, delta, kappa = symbols('Delta_a, Delta_b, delta, kappa')
hil_a = LocalSpace('A')
hil_b = LocalSpace('B')
a = Destroy(hs=hil_a)
a_dag = a.dag()
b = Destroy(hs=hil_b)
b_dag = b.dag()
mapping = {
omega_a: Delta_a,
omega_b: Delta_b,
g: kappa / (2 * delta)
}
H2_expected = (
Delta_a * a_dag * a + Delta_b * b_dag * b +
(kappa / delta) * (a_dag * b + b_dag * a))
H2 = H_JC.substitute(mapping)
assert H2 == H2_expected
H2 = substitute(H_JC, mapping)
assert H2 == H2_expected
def test_substitute_total_expression(H_JC):
"""Test that we can replace the entire expr with another expression"""
C = OperatorSymbol('C', hs=H_JC.space)
assert H_JC.substitute({H_JC: C}) == C
assert substitute(H_JC, {H_JC: C}) == C
def test_substitute_symbol_not_in_expr(H_JC):
"""Test that we if a symbol in the mapping dict does not occur in the expr,
we don't get an error, but leaves the expr unchanged"""
x = symbols('x')
assert H_JC.substitute({x: 0}) == H_JC
assert substitute(H_JC, {x: 0}) == H_JC
def test_substitute_sub_expr(H_JC):
"""Test that we can replace non-atomic sub-expressions"""
hil_a = LocalSpace('A')
hil_b = LocalSpace('B')
omega_a, omega_b, g = symbols('omega_a, omega_b, g')
a = Destroy(hs=hil_a)
a_dag = a.dag()
b = Destroy(hs=hil_b)
b_dag = b.dag()
n_op_a = OperatorSymbol('n', hs=hil_a)
n_op_b = OperatorSymbol('n', hs=hil_b)
x_op = OperatorSymbol('x', hs=H_JC.space)
mapping = {
a_dag * a: n_op_a,
b_dag * b: n_op_b,
(a_dag * b + b_dag * a): x_op + x_op.dag()
}
H2_expected = (omega_a * n_op_a + omega_b * n_op_b +
2 * g * (x_op + x_op.dag()))
H2 = H_JC.substitute(mapping)
assert H2 == H2_expected
H2 = substitute(H_JC, mapping)
assert H2 == H2_expected
def test_substitute_matrix(H_JC):
"""Test that we can substitute in a Matrix (element-wise)"""
M = Matrix([[H_JC, IdentityOperator], [IdentityOperator, H_JC]])
IM = Matrix([[IdentityOperator, IdentityOperator],
[IdentityOperator, IdentityOperator]])
assert M.substitute({H_JC: IdentityOperator}) == M.substitute({M: IM})
assert substitute(M, {H_JC: IdentityOperator}) == substitute(M, {M: IM})
def test_substitute_sympy():
"""Test that the sustitute function can directly modify sympy
expressions"""
g, kappa = symbols('g, kappa')
assert substitute(g**2/2, {g**2: kappa}) == kappa / 2
def test_singleton_substitute():
"""Test that calling the substitute method on a Singleton returns the
Singleton"""
assert II.substitute({}) is II
| mit | -7,784,058,673,838,910,000 | 29.08982 | 79 | 0.6 | false |
dvoraka/webrt | djrt/djrt/wsgi.py | 1 | 1130 | """
WSGI config for djrt project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djrt.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| gpl-3.0 | -8,924,795,436,346,122,000 | 39.357143 | 79 | 0.799115 | false |
TheAlgorithms/Python | graphs/minimum_spanning_tree_kruskal.py | 1 | 1393 | from typing import List, Tuple
def kruskal(num_nodes: int, num_edges: int, edges: List[Tuple[int, int, int]]) -> int:
"""
>>> kruskal(4, 3, [(0, 1, 3), (1, 2, 5), (2, 3, 1)])
[(2, 3, 1), (0, 1, 3), (1, 2, 5)]
>>> kruskal(4, 5, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)])
[(2, 3, 1), (0, 2, 1), (0, 1, 3)]
>>> kruskal(4, 6, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2),
... (2, 1, 1)])
[(2, 3, 1), (0, 2, 1), (2, 1, 1)]
"""
edges = sorted(edges, key=lambda edge: edge[2])
parent = list(range(num_nodes))
def find_parent(i):
if i != parent[i]:
parent[i] = find_parent(parent[i])
return parent[i]
minimum_spanning_tree_cost = 0
minimum_spanning_tree = []
for edge in edges:
parent_a = find_parent(edge[0])
parent_b = find_parent(edge[1])
if parent_a != parent_b:
minimum_spanning_tree_cost += edge[2]
minimum_spanning_tree.append(edge)
parent[parent_a] = parent_b
return minimum_spanning_tree
if __name__ == "__main__": # pragma: no cover
num_nodes, num_edges = list(map(int, input().strip().split()))
edges = []
for _ in range(num_edges):
node1, node2, cost = [int(x) for x in input().strip().split()]
edges.append((node1, node2, cost))
kruskal(num_nodes, num_edges, edges)
| mit | 2,064,725,714,210,795,800 | 28.638298 | 86 | 0.498923 | false |
andela-ooshodi/django-bucketlist-application | djangobucketlist/djangobucketlist/settings/base.py | 1 | 4073 | """
Django settings for djangobucketlist project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.contrib.messages import constants as message_constants
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm(%x1m*2!qs9(l(s&n0nft&$9%3dbpcrc_v#*3cxd7#thj0zbb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bucketlist',
'apiv1',
'bootstrapform',
'djangobower',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'djangobucketlist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangobucketlist.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
APPEND_SLASH = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, '..', 'bucketlist/static'),
)
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder'
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# custom message tag for django messaging middleware
MESSAGE_TAGS = {
message_constants.ERROR: 'danger'
}
# Django REST_FRAMEWORK global settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10,
'TEST_REQUEST_DEFAULT_FORMAT': 'json'
}
# Swagger settings
SWAGGER_SETTINGS = {
'exclude_namespaces': [],
'api_version': 'version 1',
}
# Bower configurations
BOWER_INSTALLED_APPS = (
'mdi',
'jquery',
'bootstrap',
)
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, '..', 'bucketlist/static')
# Default database configuration
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'bucketlist-db',
}
}
| gpl-2.0 | -2,429,033,043,398,538,000 | 24.45625 | 81 | 0.69752 | false |
dhongu/l10n-romania | l10n_ro_stock_account_voucher/__manifest__.py | 1 | 1346 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: dataERP - Vlad Nafureanu ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Valuation from Account Voucher',
'version': '11.0.1.0.0',
'author': 'dataERP, Vlad Nafureanu',
'website': 'http://www.forbiom.eu',
'category': 'Purchase, Warehouse, Accounting',
'depends': [
'account_voucher',
'l10n_ro_stock_account'],
'data': [
'views/account_voucher.xml',
'views/purchase_order.xml'
],
'installable': True,
'active': False,
}
| agpl-3.0 | 6,313,241,797,806,392,000 | 37.457143 | 78 | 0.580981 | false |
ingrammicro/apsconnect-cli | tests/test_apsconnect_internals.py | 1 | 4800 | import os
import sys
from unittest import TestCase
from pkg_resources import DistributionNotFound
from apsconnectcli.apsconnect import (
GITHUB_RELEASES_PAGE,
bin_version,
get_version,
get_latest_version,
main,
APSConnectUtil,
)
if sys.version_info >= (3,):
from unittest.mock import patch
_BUILTINS_OPEN = 'builtins.open'
_BUILTINS_PRINT = 'builtins.print'
else:
from mock import patch
_BUILTINS_OPEN = 'apsconnectcli.apsconnect.open'
_BUILTINS_PRINT = 'apsconnectcli.apsconnect.print'
class TestVersion(TestCase):
def test_latest_version(self):
with patch('apsconnectcli.apsconnect.get_version') as version_mock, \
patch('apsconnectcli.apsconnect.get_latest_version') as latest_version_mock, \
patch(_BUILTINS_PRINT) as print_mock:
version_mock.return_value = '1.2.3'
latest_version_mock.return_value = '1.2.3'
APSConnectUtil().version()
self.assertEqual(print_mock.call_count, 1)
self.assertTrue('1.2.3' in print_mock.call_args[0][0])
def test_outdated_version(self):
with patch('apsconnectcli.apsconnect.get_version') as version_mock, \
patch('apsconnectcli.apsconnect.get_latest_version') as latest_version_mock, \
patch(_BUILTINS_PRINT) as print_mock:
version_mock.return_value = '1.2.3'
latest_version_mock.return_value = '1.2.4'
APSConnectUtil().version()
self.assertEqual(print_mock.call_count, 2)
self.assertTrue('1.2.4' in print_mock.call_args[0][0])
def test_unknown_version(self):
with patch('apsconnectcli.apsconnect.get_version') as version_mock, \
patch('apsconnectcli.apsconnect.get_latest_version'), \
patch(_BUILTINS_PRINT) as print_mock:
version_mock.return_value = None
APSConnectUtil().version()
self.assertEqual(print_mock.call_count, 1)
self.assertTrue(GITHUB_RELEASES_PAGE in print_mock.call_args[0][0])
class TestHelpers(TestCase):
def test_bin_version_ok(self):
with patch('apsconnectcli.apsconnect.sys') as sys_mock, \
patch(_BUILTINS_OPEN) as open_mock:
open_mock.return_value.__enter__.return_value.read.return_value = 'v100500'
sys_mock._MEIPASS = 'pyinstaller_data_dir'
result = bin_version()
open_mock.assert_called_once_with(os.path.join(sys_mock._MEIPASS, 'VERSION'))
self.assertEqual(result, 'v100500')
def test_bin_version_exception(self):
self.assertEqual(bin_version(), None)
def test_get_version_from_package_ok(self):
with patch('apsconnectcli.apsconnect.pkg_resources') as pkg_mock:
pkg_mock.get_distribution.return_value.version = 'v100500'
result = get_version()
self.assertEqual(result, 'v100500')
def test_get_version_from_package_error(self):
with patch('apsconnectcli.apsconnect.pkg_resources') as pkg_mock, \
patch('apsconnectcli.apsconnect.bin_version') as bin_mock:
bin_mock.return_value = 'v100500'
pkg_mock.DistributionNotFound = DistributionNotFound
pkg_mock.get_distribution.side_effect = DistributionNotFound()
result = get_version()
self.assertEqual(result, 'v100500')
def test_get_latest_version_ok(self):
with patch('apsconnectcli.apsconnect.get') as get_mock:
get_mock.return_value.json.return_value = {'tag_name': 'v123'}
result = get_latest_version()
self.assertEqual(result, '123')
def test_get_latest_version_error(self):
with patch('apsconnectcli.apsconnect.get') as get_mock:
get_mock.return_value = 'Definitely not JSON'
result = get_latest_version()
self.assertIsNone(result)
def test_main_prints_version(self):
with patch('apsconnectcli.apsconnect.fire'), \
patch('apsconnectcli.apsconnect.get_version') as get_version_mock, \
patch(_BUILTINS_PRINT) as print_mock:
get_version_mock.return_value = '100.500'
main()
self.assertTrue('100.500' in print_mock.call_args[0][0])
def test_main_prints_error_and_exists_if_there_are_problems(self):
with patch('apsconnectcli.apsconnect.fire') as fire_mock, \
patch('apsconnectcli.apsconnect.get_version'), \
patch(_BUILTINS_PRINT) as print_mock, \
patch('apsconnectcli.apsconnect.sys') as sys_mock:
fire_mock.Fire.side_effect = Exception('All is lost')
main()
self.assertTrue('All is lost' in print_mock.call_args[0][0])
sys_mock.exit.assert_called_once_with(1)
| apache-2.0 | 3,231,210,942,872,284,000 | 35.641221 | 90 | 0.637292 | false |
npdata/Scikit-qfit | tests/test_synthmap.py | 1 | 4688 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test the Q spectrum processing using a synthesised image as specified
in Section 5 of the "Fitting freeform shapes with orthogonal bases" document.
"""
from __future__ import print_function, absolute_import, division
__author__ = "npdata"
__copyright__ = "npdata"
__license__ = "mit"
import math
import time
import numpy as np
from numpy.polynomial.polynomial import polyval2d
from skqfit.qspectre import QSpectrum
def eval_synthmap(as_map=False, inverse=True):
def sag_fn(rhov, thetav):
x = rhov*np.cos(thetav)
y = rhov*np.sin(thetav)
return sag_xy(x, y)
def sag_xy(x, y):
cx = -1/452.62713
cy = -1/443.43539
kx = ky = 0.0
x2 = x*x
y2 = y*y
z = (cx*x2 + cy*y2)/(1 + np.sqrt(1-((1+kx)*cx*cx)*x2 - ((1+ky)*cy*cy)*y2))
return z + polyval2d(x, y, sag_xy.coeff)
sag_xy.rmax = 174.2
sag_xy.curv = -1/478.12597
sag_xy.coeff = np.zeros((11,11), dtype=np.float)
sag_xy.coeff[0,:] = [0, 0, 8.446692E-05, -1.773111E-08, 2.103339E-10, -4.450410E-14, 1.204820E-15, -3.751270E-18, 1.243271E-20, -1.671689E-23, 2.740074E-26]
sag_xy.coeff[2,:9] = [6.086975E-05, -9.657166E-08, 3.881972E-10, -5.340721E-13, 1.962740E-15, -3.972902E-18, 2.276418E-20, -6.515923E-23, 1.259617E-25]
sag_xy.coeff[4,:7] = [1.345443E-10, -4.424293E-13, 1.672236E-15, -4.286471E-18, 1.613314E-20, -4.548523E-23, 9.938038E-26]
sag_xy.coeff[6,:5] = [3.310262E-16, -1.749391E-18, 7.515349E-21, -2.305324E-23, 1.939290E-26]
sag_xy.coeff[8,:3] = [1.020537E-21, -6.739667E-24, -3.800397E-27]
sag_xy.coeff[10,0] = 1.653756E-28
def build_map(pts, slice=False):
x = np.linspace(-1.02*sag_xy.rmax, 1.02*sag_xy.rmax, pts)
if slice:
y = np.linspace(0.0, 0.0, 1)
else:
y = np.linspace(-1.02*sag_xy.rmax, 1.02*sag_xy.rmax, pts)
xv, yv = np.meshgrid(x, y, indexing='ij')
z = sag_xy(xv, yv)
return x, y, z.reshape((x.size, y.size))
def test_xy_gradient(zmap, dfdx, dfdy, x, y):
grad = np.gradient(zmap)
gx = grad[0] / (x[1] - x[0])
dy = grad[1] / (y[1] - y[0])
err_dx = dfdx - gx
gx_err = np.nanmax(err_dx[1:-1,1:-1]) - np.nanmin(err_dx[1:-1,1:-1])
err_dy = dfdy - dy
gy_err = np.nanmax(err_dy[1:-1,1:-1]) - np.nanmin(err_dy[1:-1,1:-1])
#display_map(err_dx[1:-1,1:-1])
#display_map(err_dy[1:-1,1:-1])
return max(gx_err, gy_err)
exp_ispec = np.array([[70531, 225291, 25895, 199399, 3583, 2651, 1886, 339, 55, 41, 5],
[43, 223995, 11377, 198, 2604, 801, 46, 37, 5, 0, 0],
[82, 12916, 3592, 994, 158, 10, 5, 0, 0, 0, 0],
[10, 1568, 256, 10, 2, 0, 0, 0, 0, 0, 0],
[1, 20, 3, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int)
bfs_c = sag_xy.curv
points = 501
if False:
m_max = 200
n_max = 200
else:
m_max = 10
n_max = 9
qfit = QSpectrum(m_max, n_max)
if as_map:
x, y, zmap = build_map(points)
qfit.data_map(x, y, zmap, centre=(0.,0.), radius=sag_xy.rmax)
#display_map(zmap)
else:
qfit.set_sag_fn(sag_fn, sag_xy.rmax, bfs_c)
start = time.time()
a_nm, b_nm = qfit.q_fit(m_max, n_max)
print('fit done, time %.3fs' % (time.time() - start))
qspec = np.sqrt(np.square(a_nm) + np.square(b_nm))
#disp_qspec(qspec)
ispec = np.round(1e6*qspec).astype(int)
idiff = ispec[:5,:11] - exp_ispec
errors = np.count_nonzero(idiff)
inv_err, grad_err = 0.0, 0.0
if inverse:
if not as_map:
x, y, zmap = build_map(points)
start = time.time()
zinv, dfdx, dfdy = qfit.build_map(x, y, radius=sag_xy.rmax, centre=(0.0,0.0), a_nm=a_nm, b_nm=b_nm, interpolated=True, inc_deriv=True)
print('inverse done, time %.3fs' % (time.time() - start))
grad_err = test_xy_gradient(zinv, dfdx, dfdy, x, y)
cond = zinv != 0.0
diff = np.extract(cond, zmap - zinv)
inv_err = max(math.fabs(np.nanmax(diff)), math.fabs(np.nanmin(diff)))
assert errors == 0 and inv_err < 1.0e-7 and grad_err < 1.0e-5
def test_as_map():
eval_synthmap(as_map=True, inverse=True)
def test_as_sagfn():
eval_synthmap(as_map=False, inverse=True)
if __name__ == "__main__":
test_as_sagfn()
test_as_map()
| mit | -8,850,606,060,119,817,000 | 34.913386 | 161 | 0.524744 | false |
spktklr/kansalaisrajoite | python/vote.py | 1 | 1105 | # coding=utf-8
from bottle import Bottle, HTTPError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import joinedload
import model
from utils import jsonplugin
import auth
app = Bottle()
app.install(model.plugin)
app.install(jsonplugin)
@app.get('/<id:int>')
@auth.require_login
def read_one(db, user, id):
try:
item = db.query(model.Restriction) \
.options(joinedload(model.Restriction.voters)) \
.filter_by(id=id).one()
return {'voted': user in item.voters}
except NoResultFound:
return HTTPError(404, 'Not found')
@app.post('/<id:int>')
@auth.require_login
def create(db, user, id):
try:
item = db.query(model.Restriction).filter_by(id=id).one()
item.voters.add(user)
except NoResultFound:
return HTTPError(404, 'Not found')
# Disabled
# @app.delete('/<id:int>')
# @auth.require_login
def delete(db, user, id):
try:
item = db.query(model.Restriction).filter_by(id=id).one()
item.voters.remove(user)
except NoResultFound:
return HTTPError(404, 'Not found')
| agpl-3.0 | -5,562,420,966,119,401,000 | 23.021739 | 65 | 0.657919 | false |
michaelneuder/image_quality_analysis | bin/nets/old/pixel_diff_conv_net_double_feed.py | 1 | 6455 | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
np.set_printoptions(threshold=np.nan)
import tensorflow as tf
import time
def convolve_inner_layers(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='SAME')
y = tf.nn.bias_add(y, b)
return tf.nn.tanh(y)
def convolve_ouput_layer(x, W, b):
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='SAME')
y = tf.nn.bias_add(y, b)
return y
def conv_net(x, W, b):
conv1 = convolve_inner_layers(x, W['weights1'], b['bias1'])
conv2 = convolve_inner_layers(conv1, W['weights2'], b['bias2'])
conv3 = convolve_inner_layers(conv2, W['weights3'], b['bias3'])
output_feed = tf.concat([conv1, conv2, conv3],3)
output = convolve_ouput_layer(output_feed, W['weights_out'], b['bias_out'])
return output
def get_variance(training_target):
all_pixels = training_target.flatten()
return all_pixels.var()
def get_epoch(x, y, n):
input_size = x.shape[0]
number_batches = input_size // n
extra_examples = input_size % n
batches = {}
batch_indices = np.arange(input_size)
np.random.shuffle(batch_indices)
for i in range(number_batches):
temp_indices = batch_indices[n*i:n*(i+1)]
temp_x = []
temp_y = []
for j in temp_indices:
temp_x.append(x[j])
temp_y.append(y[j])
batches[i] = [np.asarray(temp_x), np.asarray(temp_y)]
if extra_examples != 0:
extra_indices = batch_indices[input_size-extra_examples:input_size]
temp_x = []
temp_y = []
for k in extra_indices:
temp_x.append(x[k])
temp_y.append(y[k])
batches[i+1] = [np.asarray(temp_x), np.asarray(temp_y)]
return batches
def main():
# parameters
filter_dim = 7
filter_dim2 = 1
number_images = 100
batch_size = 4
image_dim = 96
input_layer = 2
first_layer = 50
second_layer = 25
third_layer = 10
output_layer = 1
initializer_scale = 10.0
learning_rate = .001
epochs = 130
# seeding for debug purposes --- dont forget to remove
SEED = 12345
np.random.seed(SEED)
tf.set_random_seed(SEED)
print('generating random images ... ')
# train images
rand_img_train_1 = np.random.random_sample((number_images,image_dim**2))
rand_img_train_2 = np.random.random_sample((number_images,image_dim**2))
difference_train = abs(rand_img_train_1 - rand_img_train_2)
# test image
rand_img_test_1 = np.random.random_sample((number_images,image_dim**2))
rand_img_test_2 = np.random.random_sample((number_images,image_dim**2))
difference_test = abs(rand_img_test_1 - rand_img_test_2)
# stacking & reshaping images
train_data = np.reshape(np.dstack((rand_img_train_1, rand_img_train_2)), [number_images,image_dim,image_dim,2])
test_data = np.reshape(np.dstack((rand_img_test_1, rand_img_test_2)), [number_images,image_dim,image_dim,2])
target_data_train = np.reshape(difference_train, [number_images,image_dim,image_dim,1])
target_data_test = np.reshape(difference_test, [number_images,image_dim,image_dim,1])
# initializing variables --- fan in
weights = {
'weights1': tf.Variable(tf.random_normal([filter_dim,filter_dim,input_layer,first_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*input_layer)))),
'weights2': tf.Variable(tf.random_normal([filter_dim2,filter_dim2,first_layer,second_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*first_layer)))),
'weights3': tf.Variable(tf.random_normal([filter_dim2,filter_dim2,second_layer,third_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*second_layer)))),
'weights_out': tf.Variable(tf.random_normal([filter_dim2,filter_dim2,third_layer+second_layer+first_layer,output_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*third_layer))))
}
biases = {
'bias1': tf.Variable(tf.random_normal([first_layer],stddev=(1.0/(initializer_scale*filter_dim*filter_dim*input_layer)))),
'bias2': tf.Variable(tf.random_normal([second_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*first_layer)))),
'bias3': tf.Variable(tf.random_normal([third_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*second_layer)))),
'bias_out': tf.Variable(tf.random_normal([output_layer],stddev=(1.0/(initializer_scale*filter_dim2*filter_dim2*third_layer))))
}
# tf Graph input
x = tf.placeholder(tf.float32, [None, image_dim, image_dim, 2])
y = tf.placeholder(tf.float32, [None, image_dim, image_dim, 1])
# model
prediction = conv_net(x, weights, biases)
# get variance to normalize error terms during training
variance = get_variance(difference_train)
# loss and optimization
cost = tf.reduce_mean(tf.square(tf.subtract(prediction, y)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# session
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
epoch_count = 0
global_step = 0
start_time = time.time()
print("starting training ... ")
while epoch_count < epochs:
print('---------------------------------------------------------')
print('beginning epoch {} ...'.format(epoch_count))
epoch = get_epoch(train_data, target_data_train, batch_size)
for i in epoch:
x_data_train, y_data_train = np.asarray(epoch[i][0]), np.asarray(epoch[i][1])
sess.run(optimizer, feed_dict={x : x_data_train, y : y_data_train})
loss = sess.run(cost, feed_dict={x : x_data_train, y : y_data_train})
percent_error = 100*loss/variance
print(" - training global_step {0:4d} error: {1:8.4f} {2:8.2f}%".format(global_step, loss, percent_error))
global_step += 1
epoch_count+=1
print('optimization finished!')
print('\nstarting testing...')
score = sess.run(cost, feed_dict={x: test_data, y: target_data_test})
pred = sess.run(prediction, feed_dict={x: test_data})
for i in range(image_dim):
print(rand_img_test_1[0][i],rand_img_test_2[0][i], pred[0][0][i], difference_test[0][i])
print('---- score : {} ----'.format(score))
if __name__ == '__main__':
main()
| mit | 1,550,266,799,215,808,500 | 41.748344 | 198 | 0.623857 | false |
gimli-org/gimli | doc/tutorials/dev/plot_XX_mod_fv_laplace-2d.py | 1 | 2873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import pygimli as pg
import pygimli.solver as solver
from pygimli.viewer import showMesh
from pygimli.viewer.mpl import drawMesh, drawStreams
from pygimli.meshtools import createMesh
import matplotlib.pyplot as plt
import numpy as np
from solverFVM import solveFiniteVolume, createFVPostProzessMesh
# build domain
nSteps = 20
dPhi = (0.6 * np.pi)/nSteps
boundaries = []
for i in range(1, nSteps+1):
boundaries.append([np.cos(dPhi*i), np.sin(dPhi*i)])
poly = pg.Mesh(2)
nodes = []
for b in boundaries:
nodes.append(poly.createNode(b))
for b in boundaries[::-1]:
nodes.append(poly.createNode(pg.RVector3(b)*0.1))
for i in range(len(nodes)):
poly.createEdge(nodes[i], nodes[(i+1)%len(nodes)], 1)
mesh = createMesh(poly, quality=34, area=0.001, smooth=[0,10])
f = pg.Vector(mesh.cellCount(), 10)
a = pg.Vector(mesh.cellCount(), 0.1)
#Start FEM solution
swatch = pg.core.Stopwatch(True)
uDirichlet = [1, lambda p_: np.sin(np.arctan2(p_.center()[1],
p_.center()[0]))/p_.center().abs()]
uFEM = solver.solvePoisson(mesh, a=a, f=f, uBoundary=uDirichlet)
print('FEM:', swatch.duration(True))
ax1, cbar = showMesh(mesh, data=uFEM,
nLevs=12, cMin=0, cMax=10, colorBar=True,
showLater=True)
drawMesh(ax1, mesh)
#print(min(u), max(u))
uAna = np.array(list(map(lambda p_: np.sin(np.arctan2(p_[1],
p_[0]))/p_.abs(),
mesh.positions())))
#drawStreamLines2(ax1, mesh, data=u)
#ax2,cbar = showMesh(mesh, data=(u+1e-6)/(ua+1e-6), filled=True, colorBar=True, showLater=True)
#showMesh(amesh)
print('---:', swatch.duration(True))
uFV = solveFiniteVolume(mesh, a=a, f=f, uBoundary=uDirichlet)
print('FVM:', swatch.duration(True))
ax2, cbar = showMesh(mesh,
data=uFV,
cMin=0, cMax=10, logScale=False,
interpolate=False, shading='gouraud',
tri=1,
nLevs=12,
colorBar=True, showLater=True)
drawMesh(ax2, mesh)
#allBounds = pg.solver.parseArgToBoundaries(uDirichlet, mesh)
#bounds, vals = zip(*allBounds)
#uDirVals = pg.solver.generateBoundaryValue(bounds, vals)
mesh2, u2 = createFVPostProzessMesh(mesh, uFV, uDirichlet)
print('---:', swatch.duration(True))
ax3, cbar = showMesh(mesh2, data=u2,
nLevs=12, cMin=0, cMax=10, colorBar=True,
showLater=True)
drawMesh(ax3, mesh2)
#ax3,cbar = showMesh(mesh,
#data=np.array(list(map(lambda p_: np.sin(np.arctan2(p_[1],p_[0]))/p_.abs(), mesh.cellCenter()))),
#cMin=0, cMax=10, logScale=False,
#showLater=True)
#drawMesh(ax3, mesh)
plt.show()
#drawMesh(ax, grid) | apache-2.0 | -7,926,348,419,567,565,000 | 28.030303 | 118 | 0.604595 | false |
wfnex/openbras | src/VPP/test/test_mpls.py | 1 | 52659 | #!/usr/bin/env python
import unittest
import socket
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
MRouteItfFlags, MRouteEntryFlags
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6
from scapy.contrib.mpls import MPLS
class TestMPLS(VppTestCase):
""" MPLS Test Case """
def setUp(self):
super(TestMPLS, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(4))
# setup both interfaces
# assign them different tables.
table_id = 0
for i in self.pg_interfaces:
i.admin_up()
i.set_table_ip4(table_id)
i.set_table_ip6(table_id)
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
i.enable_mpls()
table_id += 1
def tearDown(self):
super(TestMPLS, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.ip6_disable()
i.admin_down()
# the default of 64 matches the IP packet TTL default
def create_stream_labelled_ip4(
self,
src_if,
mpls_labels,
mpls_ttl=255,
ping=0,
ip_itf=None,
dst_ip=None,
n=257):
self.reset_packet_infos()
pkts = []
for i in range(0, n):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)
for ii in range(len(mpls_labels)):
if ii == len(mpls_labels) - 1:
p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=1)
else:
p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=0)
if not ping:
if not dst_ip:
p = (p / IP(src=src_if.local_ip4, dst=src_if.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw(payload))
else:
p = (p / IP(src=src_if.local_ip4, dst=dst_ip) /
UDP(sport=1234, dport=1234) /
Raw(payload))
else:
p = (p / IP(src=ip_itf.remote_ip4,
dst=ip_itf.local_ip4) /
ICMP())
info.data = p.copy()
pkts.append(p)
return pkts
def create_stream_ip4(self, src_if, dst_ip):
self.reset_packet_infos()
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=src_if.remote_ip4, dst=dst_ip) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl,
dst_ip=None):
if dst_ip is None:
dst_ip = src_if.remote_ip6
self.reset_packet_infos()
pkts = []
for i in range(0, 257):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
MPLS(label=mpls_label, ttl=mpls_ttl) /
IPv6(src=src_if.remote_ip6, dst=dst_ip) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
@staticmethod
def verify_filter(capture, sent):
if not len(capture) == len(sent):
# filter out any IPv6 RAs from the capture
for p in capture:
if p.haslayer(IPv6):
capture.remove(p)
return capture
def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0):
try:
capture = self.verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
# the rx'd packet has the MPLS label popped
eth = rx[Ether]
self.assertEqual(eth.type, 0x800)
tx_ip = tx[IP]
rx_ip = rx[IP]
if not ping_resp:
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
else:
self.assertEqual(rx_ip.src, tx_ip.dst)
self.assertEqual(rx_ip.dst, tx_ip.src)
except:
raise
def verify_mpls_stack(self, rx, mpls_labels, ttl=255, num=0):
# the rx'd packet has the MPLS label popped
eth = rx[Ether]
self.assertEqual(eth.type, 0x8847)
rx_mpls = rx[MPLS]
for ii in range(len(mpls_labels)):
self.assertEqual(rx_mpls.label, mpls_labels[ii])
self.assertEqual(rx_mpls.cos, 0)
if ii == num:
self.assertEqual(rx_mpls.ttl, ttl)
else:
self.assertEqual(rx_mpls.ttl, 255)
if ii == len(mpls_labels) - 1:
self.assertEqual(rx_mpls.s, 1)
else:
# not end of stack
self.assertEqual(rx_mpls.s, 0)
# pop the label to expose the next
rx_mpls = rx_mpls[MPLS].payload
def verify_capture_labelled_ip4(self, src_if, capture, sent,
mpls_labels):
try:
capture = self.verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IP]
# the MPLS TTL is copied from the IP
self.verify_mpls_stack(
rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1)
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
except:
raise
def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels,
ttl=255, top=None):
if top is None:
top = len(mpls_labels) - 1
try:
capture = self.verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
tx_ip = tx[IP]
rx_ip = rx[IP]
# the MPLS TTL is 255 since it enters a new tunnel
self.verify_mpls_stack(
rx, mpls_labels, ttl, top)
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
except:
raise
def verify_capture_labelled(self, src_if, capture, sent,
mpls_labels, ttl=254, num=0):
try:
capture = self.verify_filter(capture, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
rx = capture[i]
self.verify_mpls_stack(rx, mpls_labels, ttl, num)
except:
raise
def verify_capture_ip6(self, src_if, capture, sent):
try:
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
# the rx'd packet has the MPLS label popped
eth = rx[Ether]
self.assertEqual(eth.type, 0x86DD)
tx_ip = tx[IPv6]
rx_ip = rx[IPv6]
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
except:
raise
def send_and_assert_no_replies(self, intf, pkts, remark):
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
for i in self.pg_interfaces:
i.assert_nothing_captured(remark=remark)
def test_swap(self):
""" MPLS label swap tests """
#
# A simple MPLS xconnect - eos label in label out
#
route_32_eos = VppMplsRoute(self, 32, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[33])])
route_32_eos.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [32])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled(self.pg0, rx, tx, [33])
#
# A simple MPLS xconnect - non-eos label in label out
#
route_32_neos = VppMplsRoute(self, 32, 0,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[33])])
route_32_neos.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [32, 99])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled(self.pg0, rx, tx, [33, 99])
#
# An MPLS xconnect - EOS label in IP out
#
route_33_eos = VppMplsRoute(self, 33, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[])])
route_33_eos.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [33])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_ip4(self.pg0, rx, tx)
#
# An MPLS xconnect - non-EOS label in IP out - an invalid configuration
# so this traffic should be dropped.
#
route_33_neos = VppMplsRoute(self, 33, 0,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[])])
route_33_neos.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [33, 99])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.assert_nothing_captured(
remark="MPLS non-EOS packets popped and forwarded")
#
# A recursive EOS x-connect, which resolves through another x-connect
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
labels=[44, 45])])
route_34_eos.add_vpp_config()
tx = self.create_stream_labelled_ip4(self.pg0, [34])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 45], num=2)
#
# A recursive non-EOS x-connect, which resolves through another
# x-connect
#
route_34_neos = VppMplsRoute(self, 34, 0,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=32,
labels=[44, 46])])
route_34_neos.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [34, 99])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
# it's the 2nd (counting from 0) label in the stack that is swapped
self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 46, 99], num=2)
#
# an recursive IP route that resolves through the recursive non-eos
# x-connect
#
ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_via_label=34,
labels=[55])])
ip_10_0_0_1.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 46, 55])
ip_10_0_0_1.remove_vpp_config()
route_34_neos.remove_vpp_config()
route_34_eos.remove_vpp_config()
route_33_neos.remove_vpp_config()
route_33_eos.remove_vpp_config()
route_32_neos.remove_vpp_config()
route_32_eos.remove_vpp_config()
def test_bind(self):
""" MPLS Local Label Binding test """
#
# Add a non-recursive route with a single out label
#
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[45])])
route_10_0_0_1.add_vpp_config()
# bind a local label to the route
binding = VppMplsIpBind(self, 44, "10.0.0.1", 32)
binding.add_vpp_config()
# non-EOS stream
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [44, 99])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled(self.pg0, rx, tx, [45, 99])
# EOS stream
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [44])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled(self.pg0, rx, tx, [45])
# IP stream
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled_ip4(self.pg0, rx, tx, [45])
#
# cleanup
#
binding.remove_vpp_config()
route_10_0_0_1.remove_vpp_config()
def test_imposition(self):
""" MPLS label imposition test """
#
# Add a non-recursive route with a single out label
#
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[32])])
route_10_0_0_1.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32])
#
# Add a non-recursive route with a 3 out labels
#
route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[32, 33, 34])])
route_10_0_0_2.add_vpp_config()
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.2")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 33, 34])
#
# add a recursive path, with output label, via the 1 label route
#
route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32,
[VppRoutePath("10.0.0.1",
0xffffffff,
labels=[44])])
route_11_0_0_1.add_vpp_config()
#
# a stream that matches the route for 11.0.0.1, should pick up
# the label stack for 11.0.0.1 and 10.0.0.1
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "11.0.0.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 44])
#
# add a recursive path, with 2 labels, via the 3 label route
#
route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32,
[VppRoutePath("10.0.0.2",
0xffffffff,
labels=[44, 45])])
route_11_0_0_2.add_vpp_config()
#
# a stream that matches the route for 11.0.0.1, should pick up
# the label stack for 11.0.0.1 and 10.0.0.1
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "11.0.0.2")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_labelled_ip4(
self.pg0, rx, tx, [32, 33, 34, 44, 45])
#
# cleanup
#
route_11_0_0_2.remove_vpp_config()
route_11_0_0_1.remove_vpp_config()
route_10_0_0_2.remove_vpp_config()
route_10_0_0_1.remove_vpp_config()
def test_tunnel(self):
""" MPLS Tunnel Tests """
#
# Create a tunnel with a single out label
#
mpls_tun = VppMPLSTunnelInterface(self,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[44, 46])])
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
#
# add an unlabelled route through the new tunnel
#
route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
[VppRoutePath("0.0.0.0",
mpls_tun._sw_if_index)])
route_10_0_0_3.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46])
#
# add a labelled route through the new tunnel
#
route_10_0_0_4 = VppIpRoute(self, "10.0.0.4", 32,
[VppRoutePath("0.0.0.0",
mpls_tun._sw_if_index,
labels=[33])])
route_10_0_0_4.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.4")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46, 33],
ttl=63, top=2)
def test_v4_exp_null(self):
""" MPLS V4 Explicit NULL test """
#
# The first test case has an MPLS TTL of 0
# all packet should be dropped
#
tx = self.create_stream_labelled_ip4(self.pg0, [0], 0)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.assert_nothing_captured(remark="MPLS TTL=0 packets forwarded")
#
# a stream with a non-zero MPLS TTL
# PG0 is in the default table
#
tx = self.create_stream_labelled_ip4(self.pg0, [0])
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_ip4(self.pg0, rx, tx)
#
# a stream with a non-zero MPLS TTL
# PG1 is in table 1
# we are ensuring the post-pop lookup occurs in the VRF table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg1, [0])
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture()
self.verify_capture_ip4(self.pg0, rx, tx)
def test_v6_exp_null(self):
""" MPLS V6 Explicit NULL test """
#
# a stream with a non-zero MPLS TTL
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip6(self.pg0, 2, 2)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_ip6(self.pg0, rx, tx)
#
# a stream with a non-zero MPLS TTL
# PG1 is in table 1
# we are ensuring the post-pop lookup occurs in the VRF table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip6(self.pg1, 2, 2)
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture()
self.verify_capture_ip6(self.pg0, rx, tx)
def test_deag(self):
""" MPLS Deagg """
#
# A de-agg route - next-hop lookup in default table
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=0)])
route_34_eos.add_vpp_config()
#
# ping an interface in the default table
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [34], ping=1,
ip_itf=self.pg0)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture()
self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1)
#
# A de-agg route - next-hop lookup in non-default table
#
route_35_eos = VppMplsRoute(self, 35, 1,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
route_35_eos.add_vpp_config()
#
# ping an interface in the non-default table
# PG0 is in the default table. packet arrive labelled in the
# default table and egress unlabelled in the non-default
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(
self.pg0, [35], ping=1, ip_itf=self.pg1)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
packet_count = self.get_packet_count_for_if_idx(self.pg0.sw_if_index)
rx = self.pg1.get_capture(packet_count)
self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
#
# Double pop
#
route_36_neos = VppMplsRoute(self, 36, 0,
[VppRoutePath("0.0.0.0",
0xffffffff)])
route_36_neos.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [36, 35],
ping=1, ip_itf=self.pg1)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(len(tx))
self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
route_36_neos.remove_vpp_config()
route_35_eos.remove_vpp_config()
route_34_eos.remove_vpp_config()
def test_interface_rx(self):
""" MPLS Interface Receive """
#
# Add a non-recursive route that will forward the traffic
# post-interface-rx
#
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
table_id=1,
paths=[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
route_10_0_0_1.add_vpp_config()
#
# An interface receive label that maps traffic to RX on interface
# pg1
# by injecting the packet in on pg0, which is in table 0
# doing an interface-rx on pg1 and matching a route in table 1
# if the packet egresses, then we must have swapped to pg1
# so as to have matched the route in table 1
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
is_interface_rx=1)])
route_34_eos.add_vpp_config()
#
# ping an interface in the default table
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [34], n=257,
dst_ip="10.0.0.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(257)
self.verify_capture_ip4(self.pg1, rx, tx)
def test_mcast_mid_point(self):
""" MPLS Multicast Mid Point """
#
# Add a non-recursive route that will forward the traffic
# post-interface-rx
#
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
table_id=1,
paths=[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
route_10_0_0_1.add_vpp_config()
#
# Add a mcast entry that replicate to pg2 and pg3
# and replicate to a interface-rx (like a bud node would)
#
route_3400_eos = VppMplsRoute(self, 3400, 1,
[VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index,
labels=[3401]),
VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index,
labels=[3402]),
VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
is_interface_rx=1)],
is_multicast=1)
route_3400_eos.add_vpp_config()
#
# ping an interface in the default table
# PG0 is in the default table
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [3400], n=257,
dst_ip="10.0.0.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(257)
self.verify_capture_ip4(self.pg1, rx, tx)
rx = self.pg2.get_capture(257)
self.verify_capture_labelled(self.pg2, rx, tx, [3401])
rx = self.pg3.get_capture(257)
self.verify_capture_labelled(self.pg3, rx, tx, [3402])
def test_mcast_head(self):
""" MPLS Multicast Head-end """
#
# Create a multicast tunnel with two replications
#
mpls_tun = VppMPLSTunnelInterface(self,
[VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index,
labels=[42]),
VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index,
labels=[43])],
is_multicast=1)
mpls_tun.add_vpp_config()
mpls_tun.admin_up()
#
# add an unlabelled route through the new tunnel
#
route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
[VppRoutePath("0.0.0.0",
mpls_tun._sw_if_index)])
route_10_0_0_3.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg2.get_capture(257)
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [42])
rx = self.pg3.get_capture(257)
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])
#
# An an IP multicast route via the tunnel
# A (*,G).
# one accepting interface, pg0, 1 forwarding interface via the tunnel
#
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(mpls_tun._sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg2.get_capture(257)
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [42])
rx = self.pg3.get_capture(257)
self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [43])
def test_mcast_ip4_tail(self):
""" MPLS IPv4 Multicast Tail """
#
# Add a multicast route that will forward the traffic
# post-disposition
#
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
table_id=1,
paths=[VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
#
# An interface receive label that maps traffic to RX on interface
# pg1
# by injecting the packet in on pg0, which is in table 0
# doing an rpf-id and matching a route in table 1
# if the packet egresses, then we must have matched the route in
# table 1
#
route_34_eos = VppMplsRoute(self, 34, 1,
[VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
nh_table_id=1,
rpf_id=55)],
is_multicast=1)
route_34_eos.add_vpp_config()
#
# Drop due to interface lookup miss
#
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [34],
dst_ip="232.1.1.1", n=1)
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")
#
# set the RPF-ID of the enrtry to match the input packet's
#
route_232_1_1_1.update_rpf_id(55)
self.vapi.cli("clear trace")
tx = self.create_stream_labelled_ip4(self.pg0, [34],
dst_ip="232.1.1.1", n=257)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(257)
self.verify_capture_ip4(self.pg1, rx, tx)
#
# set the RPF-ID of the enrtry to not match the input packet's
#
route_232_1_1_1.update_rpf_id(56)
tx = self.create_stream_labelled_ip4(self.pg0, [34],
dst_ip="232.1.1.1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
def test_mcast_ip6_tail(self):
""" MPLS IPv6 Multicast Tail """
#
# Add a multicast route that will forward the traffic
# post-disposition
#
route_ff = VppIpMRoute(
self,
"::",
"ff01::1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
table_id=1,
paths=[VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_ff.add_vpp_config()
#
# An interface receive label that maps traffic to RX on interface
# pg1
# by injecting the packet in on pg0, which is in table 0
# doing an rpf-id and matching a route in table 1
# if the packet egresses, then we must have matched the route in
# table 1
#
route_34_eos = VppMplsRoute(
self, 34, 1,
[VppRoutePath("::",
self.pg1.sw_if_index,
nh_table_id=1,
rpf_id=55,
is_ip6=1)],
is_multicast=1)
route_34_eos.add_vpp_config()
#
# Drop due to interface lookup miss
#
tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
dst_ip="ff01::1")
#
# set the RPF-ID of the enrtry to match the input packet's
#
route_ff.update_rpf_id(55)
tx = self.create_stream_labelled_ip6(self.pg0, [34], 255,
dst_ip="ff01::1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(257)
self.verify_capture_ip6(self.pg1, rx, tx)
#
# set the RPF-ID of the enrtry to not match the input packet's
#
route_ff.update_rpf_id(56)
tx = self.create_stream_labelled_ip6(self.pg0, [34], 225,
dst_ip="ff01::1")
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
class TestMPLSDisabled(VppTestCase):
""" MPLS disabled """
def setUp(self):
super(TestMPLSDisabled, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(2))
# PG0 is MPLS enalbed
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
self.pg0.enable_mpls()
# PG 1 is not MPLS enabled
self.pg1.admin_up()
def tearDown(self):
super(TestMPLSDisabled, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def send_and_assert_no_replies(self, intf, pkts, remark):
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
for i in self.pg_interfaces:
i.get_capture(0)
i.assert_nothing_captured(remark=remark)
def test_mpls_disabled(self):
""" MPLS Disabled """
tx = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
MPLS(label=32, ttl=64) /
IPv6(src="2001::1", dst=self.pg0.remote_ip6) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# A simple MPLS xconnect - eos label in label out
#
route_32_eos = VppMplsRoute(self, 32, 1,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[33])])
route_32_eos.add_vpp_config()
#
# PG1 does not forward IP traffic
#
self.send_and_assert_no_replies(self.pg1, tx, "MPLS disabled")
#
# MPLS enable PG1
#
self.pg1.enable_mpls()
#
# Now we get packets through
#
self.pg1.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
#
# Disable PG1
#
self.pg1.disable_mpls()
#
# PG1 does not forward IP traffic
#
self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
class TestMPLSPIC(VppTestCase):
""" MPLS PIC edge convergence """
def setUp(self):
super(TestMPLSPIC, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(4))
# core links
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
self.pg0.enable_mpls()
self.pg1.admin_up()
self.pg1.config_ip4()
self.pg1.resolve_arp()
self.pg1.enable_mpls()
# VRF (customer facing) link
self.pg2.admin_up()
self.pg2.set_table_ip4(1)
self.pg2.config_ip4()
self.pg2.resolve_arp()
self.pg2.set_table_ip6(1)
self.pg2.config_ip6()
self.pg2.resolve_ndp()
self.pg3.admin_up()
self.pg3.set_table_ip4(1)
self.pg3.config_ip4()
self.pg3.resolve_arp()
self.pg3.set_table_ip6(1)
self.pg3.config_ip6()
self.pg3.resolve_ndp()
def tearDown(self):
super(TestMPLSPIC, self).tearDown()
self.pg0.disable_mpls()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.set_table_ip4(0)
i.set_table_ip6(0)
i.admin_down()
def test_mpls_ibgp_pic(self):
""" MPLS iBGP PIC edge convergence
1) setup many iBGP VPN routes via a pair of iBGP peers.
2) Check EMCP forwarding to these peers
3) withdraw the IGP route to one of these peers.
4) check forwarding continues to the remaining peer
"""
#
# IGP+LDP core routes
#
core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32,
[VppRoutePath(self.pg0.remote_ip4,
self.pg0.sw_if_index,
labels=[45])])
core_10_0_0_45.add_vpp_config()
core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index,
labels=[46])])
core_10_0_0_46.add_vpp_config()
#
# Lot's of VPN routes. We need more the 64 so VPP will build
# the fast convergence indirection
#
vpn_routes = []
pkts = []
for ii in range(64):
dst = "192.168.1.%d" % ii
vpn_routes.append(VppIpRoute(self, dst, 32,
[VppRoutePath("10.0.0.45",
0xffffffff,
labels=[145],
is_resolve_host=1),
VppRoutePath("10.0.0.46",
0xffffffff,
labels=[146],
is_resolve_host=1)],
table_id=1))
vpn_routes[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg2.local_mac,
src=self.pg2.remote_mac) /
IP(src=self.pg2.remote_ip4, dst=dst) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# Send the packet stream (one pkt to each VPN route)
# - expect a 50-50 split of the traffic
#
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0._get_capture(1)
rx1 = self.pg1._get_capture(1)
# not testig the LB hashing algorithm so we're not concerned
# with the split ratio, just as long as neither is 0
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
# us to probe the interim (psot-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
#
# Withdraw one of the IGP routes
#
core_10_0_0_46.remove_vpp_config()
#
# now all packets should be forwarded through the remaining peer
#
self.vapi.ppcli("clear trace")
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0.get_capture(len(pkts))
#
# enable the FIB walk process to converge the FIB
#
self.vapi.ppcli("test fib-walk-process enable")
#
# packets should still be forwarded through the remaining peer
#
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0.get_capture(64)
#
# Add the IGP route back and we return to load-balancing
#
core_10_0_0_46.add_vpp_config()
self.pg2.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg0._get_capture(1)
rx1 = self.pg1._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
def test_mpls_ebgp_pic(self):
""" MPLS eBGP PIC edge convergence
1) setup many eBGP VPN routes via a pair of eBGP peers
2) Check EMCP forwarding to these peers
3) withdraw one eBGP path - expect LB across remaining eBGP
"""
#
# Lot's of VPN routes. We need more the 64 so VPP will build
# the fast convergence indirection
#
vpn_routes = []
vpn_bindings = []
pkts = []
for ii in range(64):
dst = "192.168.1.%d" % ii
local_label = 1600 + ii
vpn_routes.append(VppIpRoute(self, dst, 32,
[VppRoutePath(self.pg2.remote_ip4,
0xffffffff,
nh_table_id=1,
is_resolve_attached=1),
VppRoutePath(self.pg3.remote_ip4,
0xffffffff,
nh_table_id=1,
is_resolve_attached=1)],
table_id=1))
vpn_routes[ii].add_vpp_config()
vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
ip_table_id=1))
vpn_bindings[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
MPLS(label=local_label, ttl=64) /
IP(src=self.pg0.remote_ip4, dst=dst) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg2._get_capture(1)
rx1 = self.pg3._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
# us to probe the interim (psot-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
#
# withdraw the connected prefix on the interface.
#
self.pg2.unconfig_ip4()
#
# now all packets should be forwarded through the remaining peer
#
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg3.get_capture(len(pkts))
#
# enable the FIB walk process to converge the FIB
#
self.vapi.ppcli("test fib-walk-process enable")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg3.get_capture(len(pkts))
#
# put the connecteds back
#
self.pg2.config_ip4()
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg2._get_capture(1)
rx1 = self.pg3._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
def test_mpls_v6_ebgp_pic(self):
""" MPLSv6 eBGP PIC edge convergence
1) setup many eBGP VPNv6 routes via a pair of eBGP peers
2) Check EMCP forwarding to these peers
3) withdraw one eBGP path - expect LB across remaining eBGP
"""
#
# Lot's of VPN routes. We need more the 64 so VPP will build
# the fast convergence indirection
#
vpn_routes = []
vpn_bindings = []
pkts = []
for ii in range(64):
dst = "3000::%d" % ii
local_label = 1600 + ii
vpn_routes.append(VppIpRoute(self, dst, 128,
[VppRoutePath(self.pg2.remote_ip6,
0xffffffff,
nh_table_id=1,
is_resolve_attached=1,
is_ip6=1),
VppRoutePath(self.pg3.remote_ip6,
0xffffffff,
nh_table_id=1,
is_ip6=1,
is_resolve_attached=1)],
table_id=1,
is_ip6=1))
vpn_routes[ii].add_vpp_config()
vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
ip_table_id=1,
is_ip6=1))
vpn_bindings[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg0.local_mac,
src=self.pg0.remote_mac) /
MPLS(label=local_label, ttl=64) /
IPv6(src=self.pg0.remote_ip6, dst=dst) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg2._get_capture(1)
rx1 = self.pg3._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
# us to probe the interim (psot-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
#
# withdraw the connected prefix on the interface.
# and shutdown the interface so the ND cache is flushed.
#
self.pg2.unconfig_ip6()
self.pg2.admin_down()
#
# now all packets should be forwarded through the remaining peer
#
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg3.get_capture(len(pkts))
#
# enable the FIB walk process to converge the FIB
#
self.vapi.ppcli("test fib-walk-process enable")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg3.get_capture(len(pkts))
#
# put the connecteds back
#
self.pg2.admin_up()
self.pg2.config_ip6()
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx0 = self.pg2._get_capture(1)
rx1 = self.pg3._get_capture(1)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| bsd-3-clause | -886,170,395,574,342,800 | 33.440157 | 79 | 0.483792 | false |
open-mmlab/mmdetection | mmdet/models/dense_heads/corner_head.py | 1 | 46890 | from logging import warning
from math import ceil, log
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob
from mmcv.ops import CornerPool, batched_nms
from mmcv.runner import BaseModule
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from ..utils import gaussian_radius, gen_gaussian_target
from ..utils.gaussian_target import (gather_feat, get_local_maximum,
get_topk_from_heatmap,
transpose_and_gather_feat)
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
class BiCornerPool(BaseModule):
"""Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
Args:
in_channels (int): Input channels of module.
out_channels (int): Output channels of module.
feat_channels (int): Feature channels of module.
directions (list[str]): Directions of two CornerPools.
norm_cfg (dict): Dictionary to construct and config norm layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
directions,
feat_channels=128,
out_channels=128,
norm_cfg=dict(type='BN', requires_grad=True),
init_cfg=None):
super(BiCornerPool, self).__init__(init_cfg)
self.direction1_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction2_conv = ConvModule(
in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
self.aftpool_conv = ConvModule(
feat_channels,
out_channels,
3,
padding=1,
norm_cfg=norm_cfg,
act_cfg=None)
self.conv1 = ConvModule(
in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.conv2 = ConvModule(
in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
self.direction1_pool = CornerPool(directions[0])
self.direction2_pool = CornerPool(directions[1])
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Forward features from the upstream network.
Args:
x (tensor): Input feature of BiCornerPool.
Returns:
conv2 (tensor): Output feature of BiCornerPool.
"""
direction1_conv = self.direction1_conv(x)
direction2_conv = self.direction2_conv(x)
direction1_feat = self.direction1_pool(direction1_conv)
direction2_feat = self.direction2_pool(direction2_conv)
aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
conv1 = self.conv1(x)
relu = self.relu(aftpool_conv + conv1)
conv2 = self.conv2(relu)
return conv2
@HEADS.register_module()
class CornerHead(BaseDenseHead, BBoxTestMixin):
"""Head of CornerNet: Detecting Objects as Paired Keypoints.
Code is modified from the `official github repo
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/
kp.py#L73>`_ .
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_ .
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
num_feat_levels (int): Levels of feature from the previous module. 2
for HourglassNet-104 and 1 for HourglassNet-52. Because
HourglassNet-104 outputs the final feature and intermediate
supervision feature and HourglassNet-52 only outputs the final
feature. Default: 2.
corner_emb_channels (int): Channel of embedding vector. Default: 1.
train_cfg (dict | None): Training config. Useless in CornerHead,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CornerHead. Default: None.
loss_heatmap (dict | None): Config of corner heatmap loss. Default:
GaussianFocalLoss.
loss_embedding (dict | None): Config of corner embedding loss. Default:
AssociativeEmbeddingLoss.
loss_offset (dict | None): Config of corner offset loss. Default:
SmoothL1Loss.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_classes,
in_channels,
num_feat_levels=2,
corner_emb_channels=1,
train_cfg=None,
test_cfg=None,
loss_heatmap=dict(
type='GaussianFocalLoss',
alpha=2.0,
gamma=4.0,
loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.25,
push_weight=0.25),
loss_offset=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(CornerHead, self).__init__(init_cfg)
self.num_classes = num_classes
self.in_channels = in_channels
self.corner_emb_channels = corner_emb_channels
self.with_corner_emb = self.corner_emb_channels > 0
self.corner_offset_channels = 2
self.num_feat_levels = num_feat_levels
self.loss_heatmap = build_loss(
loss_heatmap) if loss_heatmap is not None else None
self.loss_embedding = build_loss(
loss_embedding) if loss_embedding is not None else None
self.loss_offset = build_loss(
loss_offset) if loss_offset is not None else None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self._init_layers()
def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
"""Initialize conv sequential for CornerHead."""
return nn.Sequential(
ConvModule(in_channels, feat_channels, 3, padding=1),
ConvModule(
feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
def _init_corner_kpt_layers(self):
"""Initialize corner keypoint layers.
Including corner heatmap branch and corner offset branch. Each branch
has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
"""
self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_pool.append(
BiCornerPool(
self.in_channels, ['top', 'left'],
out_channels=self.in_channels))
self.br_pool.append(
BiCornerPool(
self.in_channels, ['bottom', 'right'],
out_channels=self.in_channels))
self.tl_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.br_heat.append(
self._make_layers(
out_channels=self.num_classes,
in_channels=self.in_channels))
self.tl_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
self.br_off.append(
self._make_layers(
out_channels=self.corner_offset_channels,
in_channels=self.in_channels))
def _init_corner_emb_layers(self):
"""Initialize corner embedding layers.
Only include corner embedding branch with two parts: prefix `tl_` for
top-left and `br_` for bottom-right.
"""
self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
self.br_emb.append(
self._make_layers(
out_channels=self.corner_emb_channels,
in_channels=self.in_channels))
def _init_layers(self):
"""Initialize layers for CornerHead.
Including two parts: corner keypoint layers and corner embedding layers
"""
self._init_corner_kpt_layers()
if self.with_corner_emb:
self._init_corner_emb_layers()
def init_weights(self):
super(CornerHead, self).init_weights()
bias_init = bias_init_with_prob(0.1)
for i in range(self.num_feat_levels):
# The initialization of parameters are different between
# nn.Conv2d and ConvModule. Our experiments show that
# using the original initialization of nn.Conv2d increases
# the final mAP by about 0.2%
self.tl_heat[i][-1].conv.reset_parameters()
self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
self.br_heat[i][-1].conv.reset_parameters()
self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
self.tl_off[i][-1].conv.reset_parameters()
self.br_off[i][-1].conv.reset_parameters()
if self.with_corner_emb:
self.tl_emb[i][-1].conv.reset_parameters()
self.br_emb[i][-1].conv.reset_parameters()
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of corner heatmaps, offset heatmaps and
embedding heatmaps.
- tl_heats (list[Tensor]): Top-left corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- br_heats (list[Tensor]): Bottom-right corner heatmaps for all
levels, each is a 4D-tensor, the channels number is
num_classes.
- tl_embs (list[Tensor] | list[None]): Top-left embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- br_embs (list[Tensor] | list[None]): Bottom-right embedding
heatmaps for all levels, each is a 4D-tensor or None.
If not None, the channels number is corner_emb_channels.
- tl_offs (list[Tensor]): Top-left offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
- br_offs (list[Tensor]): Bottom-right offset heatmaps for all
levels, each is a 4D-tensor. The channels number is
corner_offset_channels.
"""
lvl_ind = list(range(self.num_feat_levels))
return multi_apply(self.forward_single, feats, lvl_ind)
def forward_single(self, x, lvl_ind, return_pool=False):
"""Forward feature of a single level.
Args:
x (Tensor): Feature of a single level.
lvl_ind (int): Level index of current feature.
return_pool (bool): Return corner pool feature or not.
Returns:
tuple[Tensor]: A tuple of CornerHead's output for current feature
level. Containing the following Tensors:
- tl_heat (Tensor): Predicted top-left corner heatmap.
- br_heat (Tensor): Predicted bottom-right corner heatmap.
- tl_emb (Tensor | None): Predicted top-left embedding heatmap.
None for `self.with_corner_emb == False`.
- br_emb (Tensor | None): Predicted bottom-right embedding
heatmap. None for `self.with_corner_emb == False`.
- tl_off (Tensor): Predicted top-left offset heatmap.
- br_off (Tensor): Predicted bottom-right offset heatmap.
- tl_pool (Tensor): Top-left corner pool feature. Not must
have.
- br_pool (Tensor): Bottom-right corner pool feature. Not must
have.
"""
tl_pool = self.tl_pool[lvl_ind](x)
tl_heat = self.tl_heat[lvl_ind](tl_pool)
br_pool = self.br_pool[lvl_ind](x)
br_heat = self.br_heat[lvl_ind](br_pool)
tl_emb, br_emb = None, None
if self.with_corner_emb:
tl_emb = self.tl_emb[lvl_ind](tl_pool)
br_emb = self.br_emb[lvl_ind](br_pool)
tl_off = self.tl_off[lvl_ind](tl_pool)
br_off = self.br_off[lvl_ind](br_pool)
result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
if return_pool:
result_list.append(tl_pool)
result_list.append(br_pool)
return result_list
def get_targets(self,
gt_bboxes,
gt_labels,
feat_shape,
img_shape,
with_corner_emb=False,
with_guiding_shift=False,
with_centripetal_shift=False):
"""Generate corner targets.
Including corner heatmap, corner offset.
Optional: corner embedding, corner guiding shift, centripetal shift.
For CornerNet, we generate corner heatmap, corner offset and corner
embedding from this function.
For CentripetalNet, we generate corner heatmap, corner offset, guiding
shift and centripetal shift from this function.
Args:
gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
has shape (num_gt, 4).
gt_labels (list[Tensor]): Ground truth labels of each box, each has
shape (num_gt,).
feat_shape (list[int]): Shape of output feature,
[batch, channel, height, width].
img_shape (list[int]): Shape of input image,
[height, width, channel].
with_corner_emb (bool): Generate corner embedding target or not.
Default: False.
with_guiding_shift (bool): Generate guiding shift target or not.
Default: False.
with_centripetal_shift (bool): Generate centripetal shift target or
not. Default: False.
Returns:
dict: Ground truth of corner heatmap, corner offset, corner
embedding, guiding shift and centripetal shift. Containing the
following keys:
- topleft_heatmap (Tensor): Ground truth top-left corner
heatmap.
- bottomright_heatmap (Tensor): Ground truth bottom-right
corner heatmap.
- topleft_offset (Tensor): Ground truth top-left corner offset.
- bottomright_offset (Tensor): Ground truth bottom-right corner
offset.
- corner_embedding (list[list[list[int]]]): Ground truth corner
embedding. Not must have.
- topleft_guiding_shift (Tensor): Ground truth top-left corner
guiding shift. Not must have.
- bottomright_guiding_shift (Tensor): Ground truth bottom-right
corner guiding shift. Not must have.
- topleft_centripetal_shift (Tensor): Ground truth top-left
corner centripetal shift. Not must have.
- bottomright_centripetal_shift (Tensor): Ground truth
bottom-right corner centripetal shift. Not must have.
"""
batch_size, _, height, width = feat_shape
img_h, img_w = img_shape[:2]
width_ratio = float(width / img_w)
height_ratio = float(height / img_h)
gt_tl_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_br_heatmap = gt_bboxes[-1].new_zeros(
[batch_size, self.num_classes, height, width])
gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
if with_corner_emb:
match = []
# Guiding shift is a kind of offset, from center to corner
if with_guiding_shift:
gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
# Centripetal shift is also a kind of offset, from center to corner
# and normalized by log.
if with_centripetal_shift:
gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
[batch_size, 2, height, width])
for batch_id in range(batch_size):
# Ground truth of corner embedding per image is a list of coord set
corner_match = []
for box_id in range(len(gt_labels[batch_id])):
left, top, right, bottom = gt_bboxes[batch_id][box_id]
center_x = (left + right) / 2.0
center_y = (top + bottom) / 2.0
label = gt_labels[batch_id][box_id]
# Use coords in the feature level to generate ground truth
scale_left = left * width_ratio
scale_right = right * width_ratio
scale_top = top * height_ratio
scale_bottom = bottom * height_ratio
scale_center_x = center_x * width_ratio
scale_center_y = center_y * height_ratio
# Int coords on feature map/ground truth tensor
left_idx = int(min(scale_left, width - 1))
right_idx = int(min(scale_right, width - 1))
top_idx = int(min(scale_top, height - 1))
bottom_idx = int(min(scale_bottom, height - 1))
# Generate gaussian heatmap
scale_box_width = ceil(scale_right - scale_left)
scale_box_height = ceil(scale_bottom - scale_top)
radius = gaussian_radius((scale_box_height, scale_box_width),
min_overlap=0.3)
radius = max(0, int(radius))
gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
radius)
gt_br_heatmap[batch_id, label] = gen_gaussian_target(
gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
radius)
# Generate corner offset
left_offset = scale_left - left_idx
top_offset = scale_top - top_idx
right_offset = scale_right - right_idx
bottom_offset = scale_bottom - bottom_idx
gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
gt_br_offset[batch_id, 1, bottom_idx,
right_idx] = bottom_offset
# Generate corner embedding
if with_corner_emb:
corner_match.append([[top_idx, left_idx],
[bottom_idx, right_idx]])
# Generate guiding shift
if with_guiding_shift:
gt_tl_guiding_shift[batch_id, 0, top_idx,
left_idx] = scale_center_x - left_idx
gt_tl_guiding_shift[batch_id, 1, top_idx,
left_idx] = scale_center_y - top_idx
gt_br_guiding_shift[batch_id, 0, bottom_idx,
right_idx] = right_idx - scale_center_x
gt_br_guiding_shift[
batch_id, 1, bottom_idx,
right_idx] = bottom_idx - scale_center_y
# Generate centripetal shift
if with_centripetal_shift:
gt_tl_centripetal_shift[batch_id, 0, top_idx,
left_idx] = log(scale_center_x -
scale_left)
gt_tl_centripetal_shift[batch_id, 1, top_idx,
left_idx] = log(scale_center_y -
scale_top)
gt_br_centripetal_shift[batch_id, 0, bottom_idx,
right_idx] = log(scale_right -
scale_center_x)
gt_br_centripetal_shift[batch_id, 1, bottom_idx,
right_idx] = log(scale_bottom -
scale_center_y)
if with_corner_emb:
match.append(corner_match)
target_result = dict(
topleft_heatmap=gt_tl_heatmap,
topleft_offset=gt_tl_offset,
bottomright_heatmap=gt_br_heatmap,
bottomright_offset=gt_br_offset)
if with_corner_emb:
target_result.update(corner_embedding=match)
if with_guiding_shift:
target_result.update(
topleft_guiding_shift=gt_tl_guiding_shift,
bottomright_guiding_shift=gt_br_guiding_shift)
if with_centripetal_shift:
target_result.update(
topleft_centripetal_shift=gt_tl_centripetal_shift,
bottomright_centripetal_shift=gt_br_centripetal_shift)
return target_result
def loss(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [left, top, right, bottom] format.
gt_labels (list[Tensor]): Class indices corresponding to each box.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components. Containing the
following losses:
- det_loss (list[Tensor]): Corner keypoint losses of all
feature levels.
- pull_loss (list[Tensor]): Part one of AssociativeEmbedding
losses of all feature levels.
- push_loss (list[Tensor]): Part two of AssociativeEmbedding
losses of all feature levels.
- off_loss (list[Tensor]): Corner offset losses of all feature
levels.
"""
targets = self.get_targets(
gt_bboxes,
gt_labels,
tl_heats[-1].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb)
mlvl_targets = [targets for _ in range(self.num_feat_levels)]
det_losses, pull_losses, push_losses, off_losses = multi_apply(
self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, mlvl_targets)
loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
if self.with_corner_emb:
loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
return loss_dict
def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
targets):
"""Compute losses for single level.
Args:
tl_hmp (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_hmp (Tensor): Bottom-right corner heatmap for current level with
shape (N, num_classes, H, W).
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
targets (dict): Corner target generated by `get_targets`.
Returns:
tuple[torch.Tensor]: Losses of the head's differnet branches
containing the following losses:
- det_loss (Tensor): Corner keypoint loss.
- pull_loss (Tensor): Part one of AssociativeEmbedding loss.
- push_loss (Tensor): Part two of AssociativeEmbedding loss.
- off_loss (Tensor): Corner offset loss.
"""
gt_tl_hmp = targets['topleft_heatmap']
gt_br_hmp = targets['bottomright_heatmap']
gt_tl_off = targets['topleft_offset']
gt_br_off = targets['bottomright_offset']
gt_embedding = targets['corner_embedding']
# Detection loss
tl_det_loss = self.loss_heatmap(
tl_hmp.sigmoid(),
gt_tl_hmp,
avg_factor=max(1,
gt_tl_hmp.eq(1).sum()))
br_det_loss = self.loss_heatmap(
br_hmp.sigmoid(),
gt_br_hmp,
avg_factor=max(1,
gt_br_hmp.eq(1).sum()))
det_loss = (tl_det_loss + br_det_loss) / 2.0
# AssociativeEmbedding loss
if self.with_corner_emb and self.loss_embedding is not None:
pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
gt_embedding)
else:
pull_loss, push_loss = None, None
# Offset loss
# We only compute the offset loss at the real corner position.
# The value of real corner would be 1 in heatmap ground truth.
# The mask is computed in class agnostic mode and its shape is
# batch * 1 * width * height.
tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_tl_hmp)
br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_br_hmp)
tl_off_loss = self.loss_offset(
tl_off,
gt_tl_off,
tl_off_mask,
avg_factor=max(1, tl_off_mask.sum()))
br_off_loss = self.loss_offset(
br_off,
gt_br_off,
br_off_mask,
avg_factor=max(1, br_off_mask.sum()))
off_loss = (tl_off_loss + br_off_loss) / 2.0
return det_loss, pull_loss, push_loss, off_loss
def get_bboxes(self,
tl_heats,
br_heats,
tl_embs,
br_embs,
tl_offs,
br_offs,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_embs (list[Tensor]): Top-left corner embeddings for each level
with shape (N, corner_emb_channels, H, W).
br_embs (list[Tensor]): Bottom-right corner embeddings for each
level with shape (N, corner_emb_channels, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=tl_embs[-1][img_id:img_id + 1, :],
br_emb=br_embs[-1][img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
if torch.onnx.is_in_onnx_export():
assert len(
img_metas
) == 1, 'Only support one input image while in exporting to ONNX'
detections, labels = result_list[0]
# batch_size 1 here, [1, num_det, 5], [1, num_det]
return detections.unsqueeze(0), labels.unsqueeze(0)
return result_list
def _get_bboxes_single(self,
tl_heat,
br_heat,
tl_off,
br_off,
img_meta,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
tl_emb (Tensor): Top-left corner embedding for current level with
shape (N, corner_emb_channels, H, W).
br_emb (Tensor): Bottom-right corner embedding for current level
with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift: Top-left corner's centripetal shift for
current level with shape (N, 2, H, W).
br_centripetal_shift: Bottom-right corner's centripetal shift for
current level with shape (N, 2, H, W).
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
if isinstance(img_meta, (list, tuple)):
img_meta = img_meta[0]
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
tl_heat=tl_heat.sigmoid(),
br_heat=br_heat.sigmoid(),
tl_off=tl_off,
br_off=br_off,
tl_emb=tl_emb,
br_emb=br_emb,
tl_centripetal_shift=tl_centripetal_shift,
br_centripetal_shift=br_centripetal_shift,
img_meta=img_meta,
k=self.test_cfg.corner_topk,
kernel=self.test_cfg.local_maximum_kernel,
distance_threshold=self.test_cfg.distance_threshold)
if rescale:
batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
bboxes = batch_bboxes.view([-1, 4])
scores = batch_scores.view([-1, 1])
clses = batch_clses.view([-1, 1])
# use `sort` instead of `argsort` here, since currently exporting
# `argsort` to ONNX opset version 11 is not supported
scores, idx = scores.sort(dim=0, descending=True)
bboxes = bboxes[idx].view([-1, 4])
scores = scores.view(-1)
clses = clses[idx].view(-1)
detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
keepinds = (detections[:, -1] > -0.1)
detections = detections[keepinds]
labels = clses[keepinds]
if with_nms:
detections, labels = self._bboxes_nms(detections, labels,
self.test_cfg)
return detections, labels
def _bboxes_nms(self, bboxes, labels, cfg):
if labels.numel() == 0:
return bboxes, labels
if 'nms_cfg' in cfg:
warning.warn('nms_cfg in test_cfg will be deprecated. '
'Please rename it as nms')
if 'nms' not in cfg:
cfg.nms = cfg.nms_cfg
out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels,
cfg.nms)
out_labels = labels[keep]
if len(out_bboxes) > 0:
# use `sort` to replace with `argsort` here
_, idx = torch.sort(out_bboxes[:, -1], descending=True)
max_per_img = out_bboxes.new_tensor(cfg.max_per_img).to(torch.long)
nms_after = max_per_img
if torch.onnx.is_in_onnx_export():
# Always keep topk op for dynamic input in onnx
from mmdet.core.export import get_k_for_topk
nms_after = get_k_for_topk(max_per_img, out_bboxes.shape[0])
idx = idx[:nms_after]
out_bboxes = out_bboxes[idx]
out_labels = out_labels[idx]
return out_bboxes, out_labels
def decode_heatmap(self,
tl_heat,
br_heat,
tl_off,
br_off,
tl_emb=None,
br_emb=None,
tl_centripetal_shift=None,
br_centripetal_shift=None,
img_meta=None,
k=100,
kernel=3,
distance_threshold=0.5,
num_dets=1000):
"""Transform outputs for a single batch item into raw bbox predictions.
Args:
tl_heat (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_heat (Tensor): Bottom-right corner heatmap for current level
with shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
tl_emb (Tensor | None): Top-left corner embedding for current
level with shape (N, corner_emb_channels, H, W).
br_emb (Tensor | None): Bottom-right corner embedding for current
level with shape (N, corner_emb_channels, H, W).
tl_centripetal_shift (Tensor | None): Top-left centripetal shift
for current level with shape (N, 2, H, W).
br_centripetal_shift (Tensor | None): Bottom-right centripetal
shift for current level with shape (N, 2, H, W).
img_meta (dict): Meta information of current image, e.g.,
image size, scaling factor, etc.
k (int): Get top k corner keypoints from heatmap.
kernel (int): Max pooling kernel for extract local maximum pixels.
distance_threshold (float): Distance threshold. Top-left and
bottom-right corner keypoints with feature distance less than
the threshold will be regarded as keypoints from same object.
num_dets (int): Num of raw boxes before doing nms.
Returns:
tuple[torch.Tensor]: Decoded output of CornerHead, containing the
following Tensors:
- bboxes (Tensor): Coords of each box.
- scores (Tensor): Scores of each box.
- clses (Tensor): Categories of each box.
"""
with_embedding = tl_emb is not None and br_emb is not None
with_centripetal_shift = (
tl_centripetal_shift is not None
and br_centripetal_shift is not None)
assert with_embedding + with_centripetal_shift == 1
batch, _, height, width = tl_heat.size()
if torch.onnx.is_in_onnx_export():
inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2]
else:
inp_h, inp_w, _ = img_meta['pad_shape']
# perform nms on heatmaps
tl_heat = get_local_maximum(tl_heat, kernel=kernel)
br_heat = get_local_maximum(br_heat, kernel=kernel)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap(
tl_heat, k=k)
br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap(
br_heat, k=k)
# We use repeat instead of expand here because expand is a
# shallow-copy function. Thus it could cause unexpected testing result
# sometimes. Using expand will decrease about 10% mAP during testing
# compared to repeat.
tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
tl_off = transpose_and_gather_feat(tl_off, tl_inds)
tl_off = tl_off.view(batch, k, 1, 2)
br_off = transpose_and_gather_feat(br_off, br_inds)
br_off = br_off.view(batch, 1, k, 2)
tl_xs = tl_xs + tl_off[..., 0]
tl_ys = tl_ys + tl_off[..., 1]
br_xs = br_xs + br_off[..., 0]
br_ys = br_ys + br_off[..., 1]
if with_centripetal_shift:
tl_centripetal_shift = transpose_and_gather_feat(
tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
br_centripetal_shift = transpose_and_gather_feat(
br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
br_ctxs = br_xs - br_centripetal_shift[..., 0]
br_ctys = br_ys - br_centripetal_shift[..., 1]
# all possible boxes based on top k corners (ignoring class)
tl_xs *= (inp_w / width)
tl_ys *= (inp_h / height)
br_xs *= (inp_w / width)
br_ys *= (inp_h / height)
if with_centripetal_shift:
tl_ctxs *= (inp_w / width)
tl_ctys *= (inp_h / height)
br_ctxs *= (inp_w / width)
br_ctys *= (inp_h / height)
x_off, y_off = 0, 0 # no crop
if not torch.onnx.is_in_onnx_export():
# since `RandomCenterCropPad` is done on CPU with numpy and it's
# not dynamic traceable when exporting to ONNX, thus 'border'
# does not appears as key in 'img_meta'. As a tmp solution,
# we move this 'border' handle part to the postprocess after
# finished exporting to ONNX, which is handle in
# `mmdet/core/export/model_wrappers.py`. Though difference between
# pytorch and exported onnx model, it might be ignored since
# comparable performance is achieved between them (e.g. 40.4 vs
# 40.6 on COCO val2017, for CornerNet without test-time flip)
if 'border' in img_meta:
x_off = img_meta['border'][2]
y_off = img_meta['border'][0]
tl_xs -= x_off
tl_ys -= y_off
br_xs -= x_off
br_ys -= y_off
zeros = tl_xs.new_zeros(*tl_xs.size())
tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros)
tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros)
br_xs = torch.where(br_xs > 0.0, br_xs, zeros)
br_ys = torch.where(br_ys > 0.0, br_ys, zeros)
bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
if with_centripetal_shift:
tl_ctxs -= x_off
tl_ctys -= y_off
br_ctxs -= x_off
br_ctys -= y_off
tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
dim=3)
area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
rcentral = torch.zeros_like(ct_bboxes)
# magic nums from paper section 4.1
mu = torch.ones_like(area_bboxes) / 2.4
mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
bboxes[..., 0]) / 2
rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
bboxes[..., 1]) / 2
area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
(rcentral[..., 3] - rcentral[..., 1])).abs()
dists = area_ct_bboxes / area_rcentral
tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
ct_bboxes[..., 0] >= rcentral[..., 2])
tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
ct_bboxes[..., 1] >= rcentral[..., 3])
br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
ct_bboxes[..., 2] >= rcentral[..., 2])
br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
ct_bboxes[..., 3] >= rcentral[..., 3])
if with_embedding:
tl_emb = transpose_and_gather_feat(tl_emb, tl_inds)
tl_emb = tl_emb.view(batch, k, 1)
br_emb = transpose_and_gather_feat(br_emb, br_inds)
br_emb = br_emb.view(batch, 1, k)
dists = torch.abs(tl_emb - br_emb)
tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
# tl and br should have same class
tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
cls_inds = (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = dists > distance_threshold
# reject boxes based on widths and heights
width_inds = (br_xs <= tl_xs)
height_inds = (br_ys <= tl_ys)
# No use `scores[cls_inds]`, instead we use `torch.where` here.
# Since only 1-D indices with type 'tensor(bool)' are supported
# when exporting to ONNX, any other bool indices with more dimensions
# (e.g. 2-D bool tensor) as input parameter in node is invalid
negative_scores = -1 * torch.ones_like(scores)
scores = torch.where(cls_inds, negative_scores, scores)
scores = torch.where(width_inds, negative_scores, scores)
scores = torch.where(height_inds, negative_scores, scores)
scores = torch.where(dist_inds, negative_scores, scores)
if with_centripetal_shift:
scores[tl_ctx_inds] = -1
scores[tl_cty_inds] = -1
scores[br_ctx_inds] = -1
scores[br_cty_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 4)
bboxes = gather_feat(bboxes, inds)
clses = tl_clses.contiguous().view(batch, -1, 1)
clses = gather_feat(clses, inds).float()
return bboxes, scores, clses
| apache-2.0 | -2,730,451,611,199,976,400 | 43.572243 | 79 | 0.536362 | false |
kg-bot/SupyBot | plugins/Linux/__init__.py | 1 | 2354 | ###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Linux-related commands.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| gpl-3.0 | -8,622,309,444,501,066,000 | 38.233333 | 79 | 0.763806 | false |
NeverWalkAloner/collectiveblogs | blogs/tests_models.py | 1 | 1024 | from django.contrib.auth.models import User
from django.test import TestCase
from .models import Blog, Subscription
# Create your tests here.
class BlogTest(TestCase):
def setUp(self):
Blog.objects.create(name='test',
unique_name='test_blog')
def test_srt_representation(self):
blog = Blog.objects.get(unique_name='test_blog')
self.assertEqual('test', str(blog))
class SubscriptionTest(TestCase):
def setUp(self):
self.blog = Blog.objects.create(name='test',
unique_name='test_blog')
self.user = User.objects._create_user(username='test',
password='test',
email='')
Subscription.objects.create(blog=self.blog,
user=self.user)
def test_subscription_representation(self):
s = Subscription.objects.get(blog=self.blog)
self.assertEqual('test', str(s)) | gpl-3.0 | -1,216,902,955,600,590,300 | 34.344828 | 62 | 0.5625 | false |
nkmk/python-snippets | notebook/pandas_set_index.py | 1 | 3936 | import pandas as pd
df = pd.read_csv('data/src/sample_pandas_normal.csv')
print(df)
# name age state point
# 0 Alice 24 NY 64
# 1 Bob 42 CA 92
# 2 Charlie 18 CA 70
# 3 Dave 68 TX 70
# 4 Ellen 24 CA 88
# 5 Frank 30 NY 57
df_i = df.set_index('name')
print(df_i)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
df_id = df.set_index('name', drop=False)
print(df_id)
# name age state point
# name
# Alice Alice 24 NY 64
# Bob Bob 42 CA 92
# Charlie Charlie 18 CA 70
# Dave Dave 68 TX 70
# Ellen Ellen 24 CA 88
# Frank Frank 30 NY 57
df_mi = df.set_index(['state', 'name'])
print(df_mi)
# age point
# state name
# NY Alice 24 64
# CA Bob 42 92
# Charlie 18 70
# TX Dave 68 70
# CA Ellen 24 88
# NY Frank 30 57
df_mi.sort_index(inplace=True)
print(df_mi)
# age point
# state name
# CA Bob 42 92
# Charlie 18 70
# Ellen 24 88
# NY Alice 24 64
# Frank 30 57
# TX Dave 68 70
print(df_i)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
df_ii = df_i.set_index('state')
print(df_ii)
# age point
# state
# NY 24 64
# CA 42 92
# CA 18 70
# TX 68 70
# CA 24 88
# NY 30 57
df_mi = df_i.set_index('state', append=True)
print(df_mi)
# age point
# name state
# Alice NY 24 64
# Bob CA 42 92
# Charlie CA 18 70
# Dave TX 68 70
# Ellen CA 24 88
# Frank NY 30 57
print(df_mi.swaplevel(0, 1))
# age point
# state name
# NY Alice 24 64
# CA Bob 42 92
# Charlie 18 70
# TX Dave 68 70
# CA Ellen 24 88
# NY Frank 30 57
print(df_i)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
df_ri = df_i.reset_index()
print(df_ri)
# name age state point
# 0 Alice 24 NY 64
# 1 Bob 42 CA 92
# 2 Charlie 18 CA 70
# 3 Dave 68 TX 70
# 4 Ellen 24 CA 88
# 5 Frank 30 NY 57
df_change = df_i.reset_index().set_index('state')
print(df_change)
# name age point
# state
# NY Alice 24 64
# CA Bob 42 92
# CA Charlie 18 70
# TX Dave 68 70
# CA Ellen 24 88
# NY Frank 30 57
df.set_index('name', inplace=True)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
df = pd.read_csv('data/src/sample_pandas_normal.csv', index_col=0)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
print(df.loc['Bob'])
# age 42
# state CA
# point 92
# Name: Bob, dtype: object
print(df.at['Bob', 'age'])
# 42
| mit | 7,370,681,852,639,031,000 | 23.754717 | 66 | 0.424035 | false |
AlgorithmLover/OJCodes | qlcoder/data_mining/topic_model/reference/refered_code.py | 1 | 3286 | #!/usr/bin/python
# -*- coding:utf8 -*-
import time
import jieba.analyse
def post_cut(url):
fr = open(url + "/post_data.txt")
fo = open(url + "/post_key.txt", "a+")
for line in fr.readlines():
term = line.strip().split("\t")
if len(term) == 3 and term[2] != "":
key_list = jieba.analyse.extract_tags(term[2], 30) # get keywords
ustr = term[0] + "\t"
for i in key_list:
ustr += i.encode("utf-8") + " "
fo.write(ustr + "\n")
fr.close()
fo.close()
def post_tfidf(url):
from sklearn.feature_extraction.text import HashingVectorizer
fr = open(url + "/post_key.txt")
id_list = []
data_list = []
for line in fr.readlines():
term = line.strip().split("\t")
if len(term) == 2:
id_list.append(term[0])
data_list.append(term[1])
hv = HashingVectorizer(n_features=10000, non_negative=True) # 该类实现hash技巧
post_tfidf = hv.fit_transform(data_list) # return feature vector 'fea_train' [n_samples,n_features]
print 'Size of fea_train:' + repr(post_tfidf.shape)
print post_tfidf.nnz
post_cluster(url, id_list, post_tfidf)
def post_cluster(url, id, tfidf_vec):
from sklearn.cluster import KMeans
kmean = KMeans(n_clusters=300)
print "kmeans"
kmean.fit(tfidf_vec)
pred = kmean.transform(tfidf_vec)
count1 = 0
count2 = 0
pred_str = []
for item in pred:
count1 += 1
vec = ""
for tmp in item:
vec += str(tmp)[0:7] + "\t"
pred_str.append(vec)
print len(pred_str)
print len(id)
pred = kmean.predict(tfidf_vec)
fo = open(url + "/cluster.txt", "a+")
for i in range(len(pred)):
count2 += 1
fo.write(id[i] + "\t" + str(pred[i]) + "\n")
fo.close()
print "%d+%d" % (count1, count2)
def post_lda(url, cluster):
from gensim import corpora, models, matutils
count = 0
fr = open(url + "/post_key.txt")
fo2 = open(url + "/post_vec_lda.txt", "a+")
id_list = []
data_list = []
for line in fr.readlines():
term = line.strip().split("\t")
if len(term) == 2:
count += 1
id_list.append(term[0])
word = term[1].strip().split()
data_list.append(word)
dic = corpora.Dictionary(data_list)
corpus = [dic.doc2bow(text) for text in data_list] #
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
lda = models.LdaModel(corpus_tfidf, id2word=dic, num_topics=200)
corpus_lda = lda[corpus_tfidf]
num = 0
for doc in corpus_lda:
wstr = ""
for i in range(len(doc)):
item = doc[i]
wstr += str(item[0]) + "," + str(item[1])[0:7] + "/"
fo2.write(id_list[num] + "\t" + wstr[0:-1] + "\n")
num += 1
fr.close()
fo2.close()
print num
if cluster:
lda_csc_matrix = matutils.corpus2csc(corpus_lda).transpose() # gensim sparse matrix to scipy sparse matrix
post_cluster(url, id_list, lda_csc_matrix)
if __name__ == "__main__":
url = "path"
time = time.time()
post_cut(url)
post_tfidf(url)
lda_cluster = False
post_lda(url, lda_cluster)
print time.time() - time
| mit | 6,242,895,625,385,736,000 | 26.512605 | 115 | 0.550397 | false |
raphael-boucher/channels | channels/exceptions.py | 1 | 1061 | class ConsumeLater(Exception):
"""
Exception that says that the current message should be re-queued back
onto its channel as it's not ready to be consumd yet (e.g. global order
is being enforced)
"""
pass
class ResponseLater(Exception):
"""
Exception raised inside a Django view when the view has passed
responsibility for the response to another consumer, and so is not
returning a response.
"""
pass
class RequestTimeout(Exception):
"""
Raised when it takes too long to read a request body.
"""
pass
class RequestAborted(Exception):
"""
Raised when the incoming request tells us it's aborted partway through
reading the body.
"""
pass
class DenyConnection(Exception):
"""
Raised during a websocket.connect (or other supported connection) handler
to deny the connection.
"""
pass
class SendNotAvailableOnDemultiplexer(Exception):
"""
Raised when trying to send with a WebsocketDemultiplexer. Use the multiplexer instead.
"""
pass
| bsd-3-clause | 8,837,627,070,535,233,000 | 22.065217 | 90 | 0.685203 | false |
steven-murray/halomod | tests/test_halo_model.py | 1 | 5334 | """
Integration-style tests of the full HaloModel class.
"""
from halomod import TracerHaloModel, DMHaloModel
import pytest
import numpy as np
from hmf.halos.mass_definitions import MassDefinition
from hmf.density_field.filters import Filter
from halomod.profiles import Profile
from halomod.bias import Bias
from halomod.concentration import CMRelation
from halomod.hod import HOD
@pytest.mark.parametrize("model", (TracerHaloModel, DMHaloModel))
def test_default_actually_inits(model):
model()
@pytest.fixture(scope="module")
def dmhm():
return DMHaloModel(transfer_model="EH")
@pytest.fixture(scope="module")
def thm():
return TracerHaloModel(rmin=0.01, rmax=50, rnum=20, transfer_model="EH")
def test_dm_model_instances(dmhm):
assert isinstance(dmhm.mdef, MassDefinition)
assert isinstance(dmhm.filter, Filter)
assert isinstance(dmhm.halo_profile, Profile)
assert isinstance(dmhm.bias, Bias)
assert isinstance(dmhm.halo_concentration, CMRelation)
def test_tr_model_instances(thm):
assert isinstance(thm.mdef, MassDefinition)
assert isinstance(thm.filter, Filter)
assert isinstance(thm.halo_profile, Profile)
assert isinstance(thm.bias, Bias)
assert isinstance(thm.halo_concentration, CMRelation)
assert isinstance(thm.hod, HOD)
@pytest.mark.parametrize(
"quantity",
(
"corr_linear_mm",
"corr_halofit_mm",
"corr_1h_auto_matter",
"corr_2h_auto_matter",
"corr_auto_matter",
"corr_1h_ss_auto_tracer",
"corr_1h_cs_auto_tracer",
"corr_1h_auto_tracer",
"corr_auto_tracer",
"corr_1h_cross_tracer_matter",
"corr_2h_cross_tracer_matter",
"corr_cross_tracer_matter",
"corr_2h_auto_tracer",
# 'halo_profile_rho', 'halo_profile_lam', 'tracer_profile_rho', 'tracer_profile_lam')
),
)
def test_monotonic_dec(thm: TracerHaloModel, quantity):
# Ensure it's going down (or potentially 1e-5 level numerical noise going up)
assert np.all(np.diff(getattr(thm, quantity)) <= 1e-5)
def test_halo_power():
"""Tests the halo centre power spectrum"""
hm = TracerHaloModel(bias_model="UnityBias")
assert np.allclose(hm.power_hh(hm.k_hm), hm.power_mm_2h, rtol=1e-2)
def test_setting_default_tracers_conc():
"""Tests setting default tracer parameters based on halo parameters"""
hm = TracerHaloModel(
halo_profile_model="NFW",
tracer_profile_model="CoredNFW",
halo_concentration_model="Ludlow16",
tracer_concentration_model="Duffy08",
halo_concentration_params={"f": 0.02, "C": 650,},
transfer_model="EH",
)
assert hm.tracer_concentration.params == hm.tracer_concentration._defaults
def test_setting_default_tracers_conc_set_params():
"""Tests setting default tracer parameters based on halo parameters"""
hm = TracerHaloModel(
halo_profile_model="NFW",
tracer_profile_model="NFW",
halo_concentration_model="Ludlow16",
tracer_concentration_model="Ludlow16",
tracer_concentration_params={"f": 0.03, "C": 657,},
transfer_model="EH",
)
assert hm.tracer_concentration.params["f"] == 0.03
assert hm.tracer_concentration.params["C"] == 657
def test_setting_default_tracers_prof():
"""Tests setting default tracer parameters based on halo parameters"""
hm = TracerHaloModel(
halo_profile_model="GeneralizedNFW",
tracer_profile_model="NFW",
halo_concentration_model="Ludlow16",
tracer_concentration_model="Duffy08",
halo_profile_params={"alpha": 1.1},
transfer_model="EH",
)
assert hm.tracer_profile.params == hm.tracer_profile._defaults
def test_setting_default_tracers_same_model():
hm = TracerHaloModel(
halo_profile_model="NFW",
tracer_profile_model="NFW",
halo_concentration_model="Ludlow16",
tracer_concentration_model="Ludlow16",
transfer_model="EH",
)
assert hm.tracer_profile.params == hm.halo_profile.params
assert hm.halo_concentration.params == hm.tracer_concentration.params
@pytest.mark.parametrize(
"dep,new",
[
("corr_gg_1h", "corr_1h_auto_tracer"),
("corr_gg_2h", "corr_2h_auto_tracer"),
("corr_gg", "corr_auto_tracer"),
("power_gg_1h", "power_1h_auto_tracer"),
("power_gg_2h", "power_2h_auto_tracer"),
("power_gg", "power_auto_tracer"),
("corr_mm_1h", "corr_1h_auto_matter"),
("corr_mm_2h", "corr_2h_auto_matter"),
("corr_mm", "corr_auto_matter"),
("power_mm_1h", "power_1h_auto_matter"),
("power_mm_2h", "power_2h_auto_matter"),
("power_mm", "power_auto_matter"),
],
)
def test_deprecated(thm: TracerHaloModel, dep, new):
with pytest.warns(UserWarning):
assert np.all(getattr(thm, dep) == getattr(thm, new))
@pytest.mark.parametrize(
"attr",
[
("halo_concentration_model"),
("bias_model"),
("hc_spectrum"),
("halo_profile_model"),
("sd_bias_model"),
("hod_model"),
("tracer_profile_model"),
("tracer_concentration_model"),
],
)
def test_raiseerror(thm: TracerHaloModel, attr):
fakemodel = 1
with pytest.raises(ValueError):
setattr(thm, attr, fakemodel)
| mit | 7,365,096,517,109,439,000 | 30.011628 | 93 | 0.650731 | false |
RedHatInsights/insights-core | insights/parsers/parted.py | 1 | 8405 | """
PartedL - command ``parted -l``
===============================
This module provides processing for the ``parted`` command. The output is parsed
by the ``PartedL`` class. Attributes are provided for each field for the disk,
and a list of ``Partition`` class objects, one for each partition in the output.
Typical content of the ``parted -l`` command output
looks like::
Model: ATA TOSHIBA MG04ACA4 (scsi)
Disk /dev/sda: 4001GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags: pmbr_boot
Number Start End Size File system Name Flags
1 1049kB 2097kB 1049kB bios_grub
2 2097kB 526MB 524MB xfs
3 526MB 4001GB 4000GB lvm
The columns may vary depending upon the type of device.
Note:
The examples in this module may be executed with the following command:
``python -m insights.parsers.parted``
Examples:
>>> parted_data = '''
... Model: ATA TOSHIBA MG04ACA4 (scsi)
... Disk /dev/sda: 4001GB
... Sector size (logical/physical): 512B/512B
... Partition Table: gpt
... Disk Flags: pmbr_boot
...
... Number Start End Size File system Name Flags
... 1 1049kB 2097kB 1049kB bios_grub
... 2 2097kB 526MB 524MB xfs
... 3 526MB 4001GB 4000GB lvm
... '''.strip()
>>> from insights.tests import context_wrap
>>> shared = {PartedL: PartedL(context_wrap(parted_data))}
>>> parted_info = shared[PartedL]
>>> parted_info.data
{'partition_table': 'gpt', 'sector_size': '512B/512B', 'disk_flags': 'pmbr_boot', 'partitions': [{'end': '2097kB', 'name': 'bios_grub', 'number': '1', 'start': '1049kB', 'flags': 'bios_grub', 'file_system': 'bios_grub', 'size': '1049kB'}, {'start': '2097kB', 'size': '524MB', 'end': '526MB', 'number': '2', 'file_system': 'xfs'}, {'end': '4001GB', 'name': 'lvm', 'number': '3', 'start': '526MB', 'flags': 'lvm', 'file_system': 'lvm', 'size': '4000GB'}], 'model': 'ATA TOSHIBA MG04ACA4 (scsi)', 'disk': '/dev/sda', 'size': '4001GB'}
>>> parted_info.data['model']
'ATA TOSHIBA MG04ACA4 (scsi)'
>>> parted_info.disk
'/dev/sda'
>>> parted_info.logical_sector_size
'512B'
>>> parted_info.physical_sector_size
'512B'
>>> parted_info.boot_partition
>>> parted_info.data['disk_flags']
'pmbr_boot'
>>> len(parted_info.partitions)
3
>>> parted_info.partitions[0].data
{'end': '2097kB', 'name': 'bios_grub', 'number': '1', 'start': '1049kB', 'flags': 'bios_grub', 'file_system': 'bios_grub', 'size': '1049kB'}
>>> parted_info.partitions[0].number
'1'
>>> parted_info.partitions[0].start
'1049kB'
>>> parted_info.partitions[0].end
'2097kB'
>>> parted_info.partitions[0].size
'1049kB'
>>> parted_info.partitions[0].file_system
'bios_grub'
>>> parted_info.partitions[0].type
>>> parted_info.partitions[0].flags
'bios_grub'
"""
from .. import parser, CommandParser
from ..parsers import ParseException, parse_fixed_table
from insights.specs import Specs
class Partition(object):
"""Class to contain information for one partition.
Represents the values from one row of the partition information from the
``parted`` command. Column names have been converted to lowercase and are
provided as attributes. Column names may vary so the ``get`` method may
be used to check for the presence of a column.
Attributes:
data (dict): Dictionary of partition information keyed by column names
in lowercase.
"""
def __init__(self, data):
self.data = data
@property
def number(self):
"""str: Partition number."""
return self.data.get('number')
@property
def start(self):
"""str: Starting location for the partition."""
return self.data.get('start')
@property
def end(self):
"""str: Ending location for the partition."""
return self.data.get('end')
@property
def size(self):
"""str: Size of the partition."""
return self.data.get('size')
@property
def file_system(self):
"""str: File system type."""
return self.data.get('file_system')
@property
def type(self):
"""str: File system type."""
return self.data.get('type')
@property
def flags(self):
"""str: Partition flags."""
return self.data.get('flags')
def get(self, item):
"""Get information for column ``item`` or ``None`` if not present."""
return self.data.get(item)
@parser(Specs.parted__l)
class PartedL(CommandParser):
"""Class to represent attributes of the ``parted`` command output.
The columns may vary depending upon the type of device.
Attributes:
data (dict): Dictionary of information returned by ``parted`` command.
partitions (list): The partitions found in the output, as Partition
objects.
boot_partition (Partition): the first partition marked as bootable,
or ``None`` if one was not found.
Raises:
ParseException: Raised if ``parted`` output indicates "error" or
"warning" in first line, or if "disk" field is not present, or if
there is an error parsing the data.
ValueError: Raised if there is an error parsing the partition table.
"""
@property
def disk(self):
"""str: Disk information."""
return self.data['disk']
@property
def logical_sector_size(self):
"""str: Logical part of sector size."""
if self._sector_size:
return self._sector_size[0]
@property
def physical_sector_size(self):
"""str: Physical part of sector size."""
if self._sector_size:
return self._sector_size[1]
def get(self, item):
"""Returns a value for the specified ``item`` key."""
return self.data.get(item)
def parse_content(self, content):
# If device was not present output is error message
if content[0].startswith("Error") or content[0].startswith("Warning"):
raise ParseException("PartedL content indicates an error %s" % content[0])
dev_info = {}
table_lines = []
for line in content:
if not line.strip():
continue
if ':' in line:
label_value = line.split(':')
label = label_value[0].strip().lower()
if len(label_value) == 2:
value = label_value[1].strip()
value = value if value else None
# Single word labels
if ' ' not in label:
dev_info[label] = value
else:
if label.startswith("disk") and '/' in label:
disk_parts = label.split()
dev_info['disk'] = disk_parts[1].strip()
dev_info['size'] = value
elif label.startswith("sector"):
dev_info['sector_size'] = value
else:
label = label.replace(' ', '_')
dev_info[label] = value
else:
table_lines.append(line)
if 'disk' not in dev_info:
raise ParseException("PartedL unable to locate Disk in content")
# Now construct the partition table from the fixed table
partitions = []
if table_lines:
table_lines[0] = table_lines[0].replace('File system', 'File_system').lower()
partitions = parse_fixed_table(table_lines)
self.partitions = [Partition(n) for n in partitions]
self.boot_partition = None
self._sector_size = None
# If we got any partitions, find the first boot partition
for part in partitions:
if 'flags' in part and 'boot' in part['flags']:
self.boot_partition = Partition(part)
break
self.data = dev_info
if 'sector_size' in self.data:
self._sector_size = self.data['sector_size'].split('/', 1)
if len(self._sector_size) != 2:
self._sector_size = None
| apache-2.0 | -4,088,203,973,475,428,400 | 35.385281 | 535 | 0.569423 | false |
kfoss/neon | neon/models/rnn.py | 1 | 21431 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Simple recurrent neural network with one hidden layer.
"""
import logging
from neon.backends.backend import Block
from neon.diagnostics.visualize_rnn import VisualizeRNN
from neon.models.mlp import MLP
from neon.util.compat import range
from neon.util.param import req_param
logger = logging.getLogger(__name__)
class RNN(MLP):
"""
**Recurrent Neural Network**
Neon supports standard Recurrent Neural Networks (RNNs) as well as RNNs
with Long Short Term Memory cells (LSTMs). These models are trained on
sequence data, and therefore require a separate dataset format. Neon is
distributed with the Moby Dick dataset, which is a character-based encoding
of the book Moby Dick. Each character is represented in a one-hot encoding
as one of the 128 lowest ASCII chars.
*Dataset format and unrolling*
For the purpose of illustration, assume the entire source text is the 30
characters of ``'Your_shoe_fits_only_your_foot.'``. Note spaces have been
replaced by underscores for readability. To create minibatches of size 2,
we split the data into two subsequences ``'Your_shoe_fits_'`` and
``'only_your_foot.'`` which are treated as separate, independent sequences.
The RNN is trained using truncated back-propagation through time (tBPTT),
which means that the network in unrolled for a on number of steps,
effectively turning it into a deep feed-forward network. To illustrate the
process, consider an unrolling depth of 5 on the text above: The first step
is to break each sequence into short chunks of the unrolling depth:
| ``'Your_' 'shoe_' 'fits_'``
| ``'only_' 'your_' 'foot.'``
The second step is to create minibatches from the columns of this
structure, e.g. the two sequences ``'Your_'`` and ``'only_'`` will form the
first minibatch. This procedure leaves us with 3 minibatches in total.
The reason for using columns rather than rows is that this way we start
processing the independent sequences in parallel. Then, as we move to the
next minibatch, we also move to the next consecutive time step, and
immediately use the hidden state of the network that was computed at the
previous time step.
In the actual neon data format, each letter becomes a one-hot encoded
vector, and thus each chunk is split up into a list over the unrolling
steps, i.e.
| ``'Your_'``
| ``'only_'``
becomes a list of tensors corresponding to the one-hot encodings of
``['Y', 'o'], ['o', 'n'], ['u', 'l'], ['r', 'y'], [' ', ' ']``.
These lists form the elements of another list over the 3 minibatches that
make up the full dataset.
Note that in the more general case of datasets with multiple sequences of
unequal lengths, it would be necessary to pick the minibatch size to be
equal to the number of sequences, and the number of minibatches to be the
length of the sequences. Sequences would need to be padded to the maximum
length with an "empty character" code, e.g. the all-zeros vector rather
than a one-hot encoding.
In the Moby Dick example, the network is trained to predict one character
ahead, so the targets used for training are simply a copy of the inputs
shifted by one character into the future.
"""
def __init__(self, **kwargs):
self.accumulate = True
# Reusing deltas not supported for RNNs yet
self.reuse_deltas = False
super(RNN, self).__init__(**kwargs)
req_param(self, ['unrolls'])
self.rec_layer = self.layers[1]
def link(self, initlayer=None):
"""
link function for the RNN differs from the MLP in that it does not
print the layers
"""
for ll, pl in zip(self.layers, [initlayer] + self.layers[:-1]):
ll.set_previous_layer(pl)
# self.print_layers()
def fit(self, dataset):
viz = VisualizeRNN()
error = self.backend.empty((1, 1))
mb_id = self.backend.empty((1, 1))
self.print_layers()
self.data_layer.init_dataset(dataset)
self.data_layer.use_set('train')
# "output":"input":"rec"
# "lstm_x":"lstm_ih":"lstm_fh":"lstm_oh":"lstm_ch"
self.grad_checker(numgrad="output")
logger.info('commencing model fitting')
errorlist = []
suberrorlist = []
suberror = self.backend.zeros((1, 1))
while self.epochs_complete < self.num_epochs:
self.backend.begin(Block.epoch, self.epochs_complete)
error.fill(0.0)
mb_id = 1
self.data_layer.reset_counter()
while self.data_layer.has_more_data():
self.backend.begin(Block.minibatch, mb_id)
self.reset(mb_id)
self.backend.begin(Block.fprop, mb_id)
self.fprop(debug=(True if (mb_id is -1) else False))
self.backend.end(Block.fprop, mb_id)
self.backend.begin(Block.bprop, mb_id)
self.bprop(debug=(True if (mb_id is -1) else False))
self.backend.end(Block.bprop, mb_id)
self.backend.begin(Block.update, mb_id)
self.update(self.epochs_complete)
self.backend.end(Block.update, mb_id)
self.cost_layer.cost.set_outputbuf(
self.class_layer.output_list[-1])
suberror = self.cost_layer.get_cost()
suberrorlist.append(float(suberror.asnumpyarray()))
self.backend.add(error, suberror, error)
if self.step_print > 0 and mb_id % self.step_print == 0:
logger.info('%d.%d logloss=%0.5f', self.epochs_complete,
mb_id / self.step_print - 1,
float(error.asnumpyarray()) /
self.data_layer.num_batches)
self.backend.end(Block.minibatch, mb_id)
mb_id += 1
self.backend.end(Block.epoch, self.epochs_complete)
self.epochs_complete += 1
errorlist.append(float(error.asnumpyarray()) /
self.data_layer.num_batches)
# self.print_layers(debug=True)
logger.info('epoch: %d, total training error: %0.5f',
self.epochs_complete, float(error.asnumpyarray()) /
self.data_layer.num_batches)
if self.make_plots is True:
self.plot_layers(viz, suberrorlist, errorlist)
self.data_layer.cleanup()
def reset(self, batch):
"""
instead of having a separate buffer for hidden_init, we are now
using the last element output_list[-1] for that.
The shuffle is no longer necessary because fprop directly looks
into the output_list buffer.
"""
if (batch % self.reset_period) == 0 or batch == 1:
self.rec_layer.output_list[-1].fill(0) # reset fprop state
self.rec_layer.deltas.fill(0) # reset bprop (for non-truncated)
if 'c_t' in self.rec_layer.__dict__:
self.rec_layer.c_t[-1].fill(0)
self.rec_layer.celtas.fill(0)
def plot_layers(self, viz, suberrorlist, errorlist):
# generic error plot
viz.plot_error(suberrorlist, errorlist)
# LSTM specific plots
if 'c_t' in self.rec_layer.__dict__:
viz.plot_lstm_wts(self.rec_layer, scale=1.1, fig=4)
viz.plot_lstm_acts(self.rec_layer, scale=21, fig=5)
# RNN specific plots
else:
viz.plot_weights(self.rec_layer.weights.asnumpyarray(),
self.rec_layer.weights_rec.asnumpyarray(),
self.class_layer.weights.asnumpyarray())
viz.plot_activations(self.rec_layer.pre_act_list,
self.rec_layer.output_list,
self.class_layer.pre_act_list,
self.class_layer.output_list,
self.cost_layer.targets)
def fprop(self, debug=False, eps_tau=-1, eps=0,
num_target=None, num_i=0, num_j=0):
"""
Adding numerical gradient functionality here to avoid duplicate fprops.
TODO: Make a version where the for tau loop is inside the layer. The
best way is to have a baseclass for both RNN and LSTM for this.
"""
self.data_layer.fprop(None) # get next mini batch
inputs = self.data_layer.output
y = self.rec_layer.output_list # note: just a shorthand, no copy.
c = [None for k in range(len(y))]
if 'c_t' in self.rec_layer.__dict__:
c = self.rec_layer.c_t
# loop for rec_layer
for tau in range(0, self.unrolls):
if tau == eps_tau:
numpy_target = num_target[num_i, num_j].asnumpyarray()
num_target[num_i, num_j] = (numpy_target + eps)
if debug:
logger.debug("in RNNB.fprop, tau %d, input %d" % (tau,
inputs[tau].asnumpyarray().argmax(0)[0]))
self.rec_layer.fprop(y[tau-1], c[tau-1], inputs[tau], tau)
if tau == eps_tau:
num_target[num_i, num_j] = numpy_target
# loop for class_layer
for tau in range(0, self.unrolls):
if tau == eps_tau:
numpy_target = num_target[num_i, num_j].asnumpyarray()
num_target[num_i, num_j] = (numpy_target + eps)
if debug:
logger.debug("in RNNB.fprop, tau %d, input %d" % (tau,
inputs[tau].asnumpyarray().argmax(0)[0]))
self.class_layer.fprop(y[tau], tau)
if tau == eps_tau:
num_target[num_i, num_j] = numpy_target
# cost layer fprop is a pass.
def bprop(self, debug, numgrad=None):
"""
Parent method for bptt and truncated-bptt. Truncation is neccessary
for the standard RNN as a way to prevent exploding gradients. For the
LSTM it also
"""
if self.truncate:
self.trunc_bprop_tt(debug, numgrad)
else:
self.bprop_tt(debug, numgrad)
def trunc_bprop_tt(self, debug, numgrad=None):
"""
TODO: move the loop over t into the layer class.
"""
if numgrad is None:
min_unroll = 1
else:
logger.debug("MLP.bprop single unrolling for numgrad")
min_unroll = self.unrolls
for tau in range(min_unroll-0, self.unrolls+1):
self.cost_layer.cost.set_outputbuf(
self.class_layer.output_list[tau-1])
self.cost_layer.bprop(None, tau-1)
if debug:
tmp = self.cost_layer.targets[tau-1].asnumpyarray()
tmp = tmp.argmax(0)[0]
logger.debug("in RNNB.bprop, tau %d target %d" % (tau-1, tmp))
error = self.cost_layer.deltas
self.class_layer.bprop(error, tau, numgrad=numgrad)
error = self.class_layer.deltas
for t in list(range(0, tau))[::-1]:
if 'c_t' in self.rec_layer.__dict__:
cerror = self.rec_layer.celtas # on t=0, prev batch state
else:
cerror = None # for normal RNN
self.rec_layer.bprop(error, cerror, t, numgrad=numgrad)
error[:] = self.rec_layer.deltas # [TODO] why need deepcopy?
def bprop_tt(self, debug, numgrad=None):
"""
Keep state over consecutive unrollings. Explodes for RNN, and is not
currently used for anything, but future recurrent layers might use it.
"""
temp1 = self.backend.zeros(self.class_layer.deltas.shape)
temp2 = self.backend.zeros(self.class_layer.deltas.shape)
temp1c = self.backend.zeros(self.class_layer.deltas.shape)
temp2c = self.backend.zeros(self.class_layer.deltas.shape)
for tau in list(range(self.unrolls))[::-1]:
self.cost_layer.cost.set_outputbuf(
self.class_layer.output_list[tau])
self.cost_layer.bprop(None, tau)
cost_error = self.cost_layer.deltas
self.class_layer.bprop(cost_error, tau, numgrad=numgrad)
external_error = self.class_layer.deltas
internal_error = self.rec_layer.deltas
if 'c_t' in self.rec_layer.__dict__:
internal_cerror = self.rec_layer.celtas
external_cerror = self.backend.zeros(temp1.shape)
else:
internal_cerror = None
external_cerror = None
self.rec_layer.bprop(external_error, external_cerror, tau,
numgrad=numgrad)
temp1[:] = self.rec_layer.deltas
if 'c_t' in self.rec_layer.__dict__:
temp1c[:] = self.rec_layer.celtas
self.rec_layer.bprop(internal_error, internal_cerror, tau,
numgrad=numgrad)
temp2[:] = self.rec_layer.deltas
if 'c_t' in self.rec_layer.__dict__:
temp2c[:] = self.rec_layer.celtas
self.backend.add(temp1, temp2, out=self.rec_layer.deltas)
if 'c_t' in self.rec_layer.__dict__:
self.backend.add(temp1c, temp2c, out=self.rec_layer.celtas)
def grad_checker(self, numgrad="lstm_ch"):
"""
Check gradients for LSTM layer:
- W is replicated, only inject the eps once, repeat, average.
bProp is only through the full stack, but wrt. the W in each
level. bProp does this through a for t in tau.
Need a special fprop that injects into one unrolling only.
"""
for layer in self.layers:
logger.info("%s", str(layer))
if numgrad is "output":
num_target = self.class_layer.weights
anl_target = self.class_layer.weight_updates
num_i, num_j = 15, 56
elif numgrad is "input":
num_target = self.rec_layer.weights
anl_target = self.rec_layer.weight_updates
num_i, num_j = 12, 110 # 110 is "n"
elif numgrad is "rec":
num_target = self.rec_layer.weights_rec
anl_target = self.rec_layer.updates_rec
num_i, num_j = 12, 63
elif numgrad is "lstm_x":
num_target = self.rec_layer.Wfx
anl_target = self.rec_layer.Wfx_updates
num_i, num_j = 12, 110
elif numgrad is "lstm_ih":
num_target = self.rec_layer.Wih
anl_target = self.rec_layer.Wih_updates
num_i, num_j = 12, 55
elif numgrad is "lstm_fh":
num_target = self.rec_layer.Wfh
anl_target = self.rec_layer.Wfh_updates
num_i, num_j = 12, 55
elif numgrad is "lstm_oh":
num_target = self.rec_layer.Woh
anl_target = self.rec_layer.Woh_updates
num_i, num_j = 12, 55
elif numgrad is "lstm_ch":
num_target = self.rec_layer.Wch
anl_target = self.rec_layer.Wch_updates
num_i, num_j = 12, 55
eps = 1e-6 # better to use float64 in cpu.py for this
numerical = 0 # initialize buffer
# loop to inject epsilon in different unrolling stages
for eps_tau in range(0, self.unrolls):
self.reset(1) # clear hidden input
self.fprop(debug=False, eps_tau=eps_tau, eps=0,
num_target=num_target, num_i=num_i, num_j=num_j)
self.cost_layer.set_targets()
self.data_layer.reset_counter()
self.cost_layer.cost.set_outputbuf(
self.class_layer.output_list[-1])
suberror_eps = self.cost_layer.get_cost().asnumpyarray()
self.reset(1)
self.fprop(debug=False, eps_tau=eps_tau, eps=eps,
num_target=num_target, num_i=num_i, num_j=num_j)
self.data_layer.reset_counter()
self.cost_layer.cost.set_outputbuf(
self.class_layer.output_list[-1])
suberror_ref = self.cost_layer.get_cost().asnumpyarray()
num_part = (suberror_eps - suberror_ref) / eps
logger.debug("numpart for eps_tau=%d of %d is %e",
eps_tau, self.unrolls, num_part)
numerical += num_part
# bprop for analytical gradient
self.bprop(debug=False, numgrad=numgrad)
analytical = anl_target[num_i, num_j].asnumpyarray()
logger.debug("---------------------------------------------")
logger.debug("RNN grad_checker: suberror_eps %f", suberror_eps)
logger.debug("RNN grad_checker: suberror_ref %f", suberror_ref)
logger.debug("RNN grad_checker: numerical %e", numerical)
logger.debug("RNN grad_checker: analytical %e", analytical)
logger.debug("RNN grad_checker: ratio %e", 1./(numerical/analytical))
logger.debug("---------------------------------------------")
def predict_generator(self, dataset, setname):
"""
Generate flattened predicitons and true labels for the given dataset,
one mini-batch at a time.
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_fullset
"""
self.data_layer.init_dataset(dataset)
assert self.data_layer.has_set(setname)
self.data_layer.use_set(setname, predict=True)
self.data_layer.reset_counter()
predlabels = self.backend.empty((1, self.batch_size))
labels = self.backend.empty((1, self.batch_size))
# TODO: find some alternate way of re-assembling data that doesn't
# require allocating space for the entire dataset.
outputs_pred = self.backend.zeros((self.data_layer.num_batches *
self.unrolls, self.batch_size))
outputs_targ = self.backend.zeros((self.data_layer.num_batches *
self.unrolls, self.batch_size))
mb_id = 0
self.data_layer.reset_counter()
while self.data_layer.has_more_data():
mb_id += 1
self.reset(mb_id)
self.fprop(debug=False)
# time unrolling loop to disseminate fprop results
for tau in range(self.unrolls):
probs = self.class_layer.output_list[tau]
targets = self.data_layer.targets[tau]
self.backend.argmax(targets, axis=0, out=labels)
self.backend.argmax(probs, axis=0, out=predlabels)
# collect batches to re-assemble continuous data
idx = self.unrolls * (mb_id - 1) + tau
outputs_pred[idx, :] = predlabels
outputs_targ[idx, :] = labels
self.data_layer.cleanup()
# flatten the 2d predictions into our canonical 1D format
pred_flat = outputs_pred.transpose().reshape((1, -1))
targ_flat = outputs_targ.transpose().reshape((1, -1))
for i in range(self.data_layer.num_batches):
start = i * self.unrolls * self.batch_size
end = start + (self.unrolls * self.batch_size)
yield (pred_flat[start:end], targ_flat[start:end])
def write_string(self, pred, targ, setname):
""" For text prediction, reassemble the batches and print out a
short contigous segment of target text and predicted text - useful
to check for off-by-one errors and the like"""
import numpy as np
pred_int = pred[0, 2:40].asnumpyarray().ravel().astype(np.int8)
targ_int = targ[0, 2:40].asnumpyarray().ravel().astype(np.int8)
# remove special characters, replace them with '#'
pred_int[pred_int < 32] = 35
targ_int[targ_int < 32] = 35
# create output strings
logging.info("the target for '%s' is: '%s'", setname,
''.join(targ_int.view('c')))
logging.info("prediction for '%s' is: '%s'", setname,
''.join(pred_int.view('c')))
| apache-2.0 | 9,129,896,781,727,219,000 | 43.741127 | 79 | 0.575101 | false |
LamaHamadeh/Microsoft-DAT210x | Module 5/assignment4.py | 1 | 10263 | '''
author Lama Hamadeh
'''
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib
#
# TODO: Parameters to play around with
PLOT_TYPE_TEXT = False # If you'd like to see indices
PLOT_VECTORS = True # If you'd like to see your original features in P.C.-Space
matplotlib.style.use('ggplot') # Look Pretty
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
# This function will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
## Visualize projections
# Sort each column by its length. These are your *original*
# columns, not the principal components.
import math
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print "Projected Features by importance:\n", important_features
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import RandomizedPCA
model = RandomizedPCA(n_components=dimensions)
model.fit(data)
return model
def doKMeans(data, clusters=0):
#
# TODO: Do the KMeans clustering here, passing in the # of clusters parameter
# and fit it against your data. Then, return a tuple containing the cluster
# centers and the labels
#
# .. your code here ..
model = KMeans(n_clusters=clusters)
labels = model.fit_predict(data)
return model.cluster_centers_, model.labels_
#
# TODO: Load up the dataset. It has may or may not have nans in it. Make
# sure you catch them and destroy them, by setting them to '0'. This is valid
# for this dataset, since if the value is missing, you can assume no $ was spent
# on it.
#
# .. your code here ..
df=pd.read_csv('/Users/ADB3HAMADL/Desktop/Anaconda_Packages/DAT210x-master/Module5/Datasets/Wholesale customers data.csv')
df.dropna(axis = 0, how = 'any', inplace = True)
#
# TODO: As instructed, get rid of the 'Channel' and 'Region' columns, since
# you'll be investigating as if this were a single location wholesaler, rather
# than a national / international one. Leaving these fields in here would cause
# KMeans to examine and give weight to them.
#
# .. your code here ..
df.drop(['Channel','Region'],inplace=True,axis=1)
print(df.dtypes)
print(df.head())
#
# TODO: Before unitizing / standardizing / normalizing your data in preparation for
# K-Means, it's a good idea to get a quick peek at it. You can do this using the
# .describe() method, or even by using the built-in pandas df.plot.hist()
#
# .. your code here ..
print(df.describe())
print(df.plot.hist())
#
# INFO: Having checked out your data, you may have noticed there's a pretty big gap
# between the top customers in each feature category and the rest. Some feature
# scaling algos won't get rid of outliers for you, so it's a good idea to handle that
# manually---particularly if your goal is NOT to determine the top customers. After
# all, you can do that with a simple Pandas .sort_values() and not a machine
# learning clustering algorithm. From a business perspective, you're probably more
# interested in clustering your +/- 2 standard deviation customers, rather than the
# creme dela creme, or bottom of the barrel'ers
#
# Remove top 5 and bottom 5 samples for each column:
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
#
# INFO Drop rows by index. We do this all at once in case there is a
# collision. This way, we don't end up dropping more rows than we have
# to, if there is a single row that satisfies the drop for multiple columns.
# Since there are 6 rows, if we end up dropping < 5*6*2 = 60 rows, that means
# there indeed were collisions.
print "Dropping {0} Outliers...".format(len(drop))
df.drop(inplace=True, labels=drop.keys(), axis=0)
print df.describe()
#
# INFO: What are you interested in?
#
# Depending on what you're interested in, you might take a different approach
# to normalizing/standardizing your data.
#
# You should note that all columns left in the dataset are of the same unit.
# You might ask yourself, do I even need to normalize / standardize the data?
# The answer depends on what you're trying to accomplish. For instance, although
# all the units are the same (generic money unit), the price per item in your
# store isn't. There may be some cheap items and some expensive one. If your goal
# is to find out what items people buy tend to buy together but you didn't
# unitize properly before running kMeans, the contribution of the lesser priced
# item would be dwarfed by the more expensive item.
#
# For a great overview on a few of the normalization methods supported in SKLearn,
# please check out: https://stackoverflow.com/questions/30918781/right-function-for-normalizing-input-of-sklearn-svm
#
# Suffice to say, at the end of the day, you're going to have to know what question
# you want answered and what data you have available in order to select the best
# method for your purpose. Luckily, SKLearn's interfaces are easy to switch out
# so in the mean time, you can experiment with all of them and see how they alter
# your results.
#
#
# 5-sec summary before you dive deeper online:
#
# NORMALIZATION: Let's say your user spend a LOT. Normalization divides each item by
# the average overall amount of spending. Stated differently, your
# new feature is = the contribution of overall spending going into
# that particular item: $spent on feature / $overall spent by sample
#
# MINMAX: What % in the overall range of $spent by all users on THIS particular
# feature is the current sample's feature at? When you're dealing with
# all the same units, this will produce a near face-value amount. Be
# careful though: if you have even a single outlier, it can cause all
# your data to get squashed up in lower percentages.
# Imagine your buyers usually spend $100 on wholesale milk, but today
# only spent $20. This is the relationship you're trying to capture
# with MinMax. NOTE: MinMax doesn't standardize (std. dev.); it only
# normalizes / unitizes your feature, in the mathematical sense.
# MinMax can be used as an alternative to zero mean, unit variance scaling.
# [(sampleFeatureValue-min) / (max-min)] * (max-min) + min
# Where min and max are for the overall feature values for all samples.
#
# TODO: Un-comment just ***ONE*** of lines at a time and see how alters your results
# Pay attention to the direction of the arrows, as well as their LENGTHS
#T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.MaxAbsScaler().fit_transform(df)
#T = preprocessing.Normalizer().fit_transform(df)
T = preprocessing.Normalizer().fit_transform(df) # No Change
#
# INFO: Sometimes people perform PCA before doing KMeans, so that KMeans only
# operates on the most meaningful features. In our case, there are so few features
# that doing PCA ahead of time isn't really necessary, and you can do KMeans in
# feature space. But keep in mind you have the option to transform your data to
# bring down its dimensionality. If you take that route, then your Clusters will
# already be in PCA-transformed feature space, and you won't have to project them
# again for visualization.
# Do KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
#
# TODO: Print out your centroids. They're currently in feature-space, which
# is good. Print them out before you transform them into PCA space for viewing
#
# .. your code here ..
print centroids
# Do PCA *after* to visualize the results. Project the centroids as well as
# the samples into the new 2D feature space for visualization purposes.
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualize all the samples. Give them the color of their cluster label
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
# Plot the index of the sample, so you can further investigate it in your dset
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plot a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plot the Centroids as X's, and label them
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
# Display feature vectors for investigation:
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
# Add the cluster label back into the dataframe and display it:
df['label'] = pd.Series(labels, index=df.index)
print df
plt.show()
| mit | -4,313,806,507,492,444,000 | 40.216867 | 122 | 0.714508 | false |
hardc0d3/sppy | sppy_test/open_env_get_db_cursor_ctl_close.py | 1 | 1038 | from sppy.spapi_cffi import SophiaApi
from sppy.spapi_cffi_cdef import sophia_api_cdefs
from sppy.spapi_cffi_codecs import *
sp = SophiaApi( '../../sophia/sophia1.2.2/libsophia.so.1.2.2',sophia_api_cdefs['1.2.2'] )
codec_u32 = U32(sp.ffi)
dbname = 'test'
env = sp.env()
print "get env object",env.cd
typ = sp.type(env)
print "type env env?",typ.decode(0)
ctl = sp.ctl(env)
typ = sp.type(ctl)
print "type of ctl?",typ.decode(0)
rc = sp.set( ctl, "sophia.path", "../test_data/" )
print "set ctl path", rc._(0)
rc = sp.open( env )
print "open env",rc._(0)
rc = sp.set( ctl, "db", dbname )
print "set ctl db name:%s"%dbname,rc
db = sp.get( ctl, "db.%s"%dbname )
print "get ctl db.%s"%dbname,db.cd
cursor = sp.cursor(ctl)
print cursor.cd
o = sp.get(cursor)
szk = sp.ffi.new("uint32_t*")
szv = sp.ffi.new("uint32_t*")
while o.cd != sp.ffi.NULL:
key = sp.get(o,"key",szk)
val = sp.get(o,"value",szv)
print key._(szk[0]),val._(szv[0])
o = sp.get(cursor)
#print o.cd
rc = sp.destroy(env)
print "destroy env",rc
| bsd-2-clause | -2,561,781,173,405,216,300 | 18.222222 | 89 | 0.631021 | false |
wdmchaft/taskcoach | taskcoachlib/mailer/outlook.py | 1 | 2591 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os, stat, tempfile
from taskcoachlib import persistence
if os.name == 'nt':
from win32com.client import GetActiveObject
def getCurrentSelection():
obj = GetActiveObject('Outlook.Application')
exp = obj.ActiveExplorer()
sel = exp.Selection
ret = []
for n in xrange(1, sel.Count + 1):
src = tempfile.NamedTemporaryFile(suffix='.eml') # Will be deleted automagically
src.close()
sel.Item(n).SaveAs(src.name, 0)
src = file(src.name, 'rb')
# Okay. In the case of HTML mails, Outlook doesn't put
# a blank line between the last header line and the
# body. This assumes that the last header is
# Subject:. Hope it's true.
# States:
# 0 still in headers
# 1 subject: header seen, blank line not written
# 2 all headers seen, blank line written
# 2 in body
name = persistence.get_temp_file(suffix='.eml')
dst = file(name, 'wb')
try:
s = 0
for line in src:
if s == 0:
dst.write(line)
if line.lower().startswith('subject:'):
dst.write('X-Outlook-ID: %s\r\n' % str(sel.Item(n).EntryID))
s = 1
elif s == 1:
dst.write('\r\n')
if line.strip() != '':
dst.write(line)
s = 2
else:
dst.write(line)
finally:
dst.close()
if os.name == 'nt':
os.chmod(name, stat.S_IREAD)
ret.append(name)
return ret
| gpl-3.0 | -6,198,454,282,548,796,000 | 34.986111 | 92 | 0.542648 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/aio/operations/_express_route_connections_operations.py | 1 | 21461 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteConnectionsOperations:
"""ExpressRouteConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "_models.ExpressRouteConnection",
**kwargs
) -> "_models.ExpressRouteConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_connection_parameters, 'ExpressRouteConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
put_express_route_connection_parameters: "_models.ExpressRouteConnection",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteConnection"]:
"""Creates a connection between an ExpressRoute gateway and an ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:param put_express_route_connection_parameters: Parameters required in an
ExpressRouteConnection PUT operation.
:type put_express_route_connection_parameters: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
put_express_route_connection_parameters=put_express_route_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> "_models.ExpressRouteConnection":
"""Gets the specified ExpressRouteConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the ExpressRoute connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
express_route_gateway_name: str,
connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a connection to a ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
async def list(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs
) -> "_models.ExpressRouteConnectionList":
"""Lists ExpressRouteConnections.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnectionList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteConnectionList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnectionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnectionList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections'} # type: ignore
| mit | -3,923,740,292,870,501,000 | 51.089806 | 250 | 0.665859 | false |
daq-tools/kotori | test/test_vendor_hiveeyes.py | 1 | 2713 | # -*- coding: utf-8 -*-
# (c) 2020-2021 Andreas Motl <[email protected]>
import logging
import pytest
import pytest_twisted
from bunch import Bunch
from test.conftest import create_machinery
from test.settings.mqttkit import PROCESS_DELAY_MQTT
from test.util import mqtt_json_sensor, sleep, InfluxWrapper, GrafanaWrapper
logger = logging.getLogger(__name__)
settings = Bunch(
influx_database='hiveeyes_itest',
influx_measurement='site_box_sensors',
mqtt_topic='hiveeyes/itest/site/box/data.json',
grafana_username='admin',
grafana_password='admin',
grafana_dashboards=['hiveeyes-itest-site-box', 'hiveeyes-itest'],
)
influx = InfluxWrapper(database=settings.influx_database, measurement=settings.influx_measurement)
grafana = GrafanaWrapper(settings=settings)
machinery_hiveeyes = create_machinery('./etc/test/hiveeyes.ini')
create_influxdb_hiveeyes = influx.make_create_db()
reset_influxdb_hiveeyes = influx.make_reset_measurement()
reset_grafana_hiveeyes = grafana.make_reset()
@pytest_twisted.inlineCallbacks
@pytest.mark.hiveeyes
def test_mqtt_to_grafana(machinery_hiveeyes, create_influxdb_hiveeyes, reset_influxdb_hiveeyes, reset_grafana_hiveeyes):
"""
Publish a single reading in JSON format to MQTT and proof
- it is stored in the InfluxDB database.
- a corresponding datasource and dashboards have been created in Grafana.
"""
# Submit a single measurement, without timestamp.
data = {
'temperature': 42.84,
'weight': 33.33,
}
yield mqtt_json_sensor(settings.mqtt_topic, data)
# Wait for some time to process the message.
yield sleep(PROCESS_DELAY_MQTT)
# Wait for Grafana to create its artefacts.
yield sleep(2)
# Proof that data arrived in InfluxDB.
record = influx.get_first_record()
del record['time']
assert record == {u'temperature': 42.84, u'weight': 33.33}
yield record
# Proof that Grafana is well provisioned.
logger.info('Grafana: Checking datasource')
datasource_names = []
for datasource in grafana.client.datasources.get():
datasource_names.append(datasource['name'])
assert settings.influx_database in datasource_names
logger.info('Grafana: Checking dashboards')
for dashboard_name in settings.grafana_dashboards:
dashboard = grafana.client.dashboards.db[dashboard_name].get()['dashboard']
if 'rows' in dashboard:
umbrella = dashboard['rows'][0]
else:
umbrella = dashboard
target = umbrella['panels'][0]['targets'][0]
#assert target['measurement'] == settings.influx_measurement
assert 'temperature' in target['query'] or 'weight' in target['query']
| agpl-3.0 | 8,705,837,564,512,757,000 | 33.782051 | 120 | 0.709915 | false |
funtoo/portage-funtoo | pym/portage/package/ebuild/_config/LocationsManager.py | 1 | 11387 | # Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = (
'LocationsManager',
)
import collections
import io
import warnings
import portage
from portage import os, eapi_is_supported, _encodings, _unicode_encode
from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
PROFILE_PATH, USER_CONFIG_PATH
from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
from portage.exception import DirectoryNotFound, ParseError
from portage.localization import _
from portage.util import ensure_dirs, grabfile, \
normalize_path, shlex_split, writemsg
from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
from portage.repository.config import parse_layout_conf, \
_portage1_profiles_allow_directories
_PORTAGE1_DIRECTORIES = frozenset([
'package.mask', 'package.provided',
'package.use', 'package.use.mask', 'package.use.force',
'use.mask', 'use.force'])
_profile_node = collections.namedtuple('_profile_node',
'location portage1_directories user_config')
_allow_parent_colon = frozenset(
["portage-2"])
class LocationsManager(object):
def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
target_root=None):
self.user_profile_dir = None
self._local_repo_conf_path = None
self.eprefix = eprefix
self.config_root = config_root
self.target_root = target_root
self._user_config = local_config
if self.eprefix is None:
self.eprefix = portage.const.EPREFIX
if self.config_root is None:
self.config_root = self.eprefix + os.sep
self.config_root = normalize_path(os.path.abspath(
self.config_root)).rstrip(os.path.sep) + os.path.sep
self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
self.config_profile_path = config_profile_path
def load_profiles(self, repositories, known_repository_paths):
known_repository_paths = set(os.path.realpath(x)
for x in known_repository_paths)
known_repos = []
for x in known_repository_paths:
try:
layout_data = {"profile-formats":
repositories.get_repo_for_location(x).profile_formats}
except KeyError:
layout_data = parse_layout_conf(x)[0]
# force a trailing '/' for ease of doing startswith checks
known_repos.append((x + '/', layout_data))
known_repos = tuple(known_repos)
if self.config_profile_path is None:
deprecated_profile_path = os.path.join(
self.config_root, 'etc', 'make.profile')
self.config_profile_path = \
os.path.join(self.config_root, PROFILE_PATH)
if isdir_raise_eaccess(self.config_profile_path):
self.profile_path = self.config_profile_path
if isdir_raise_eaccess(deprecated_profile_path) and not \
os.path.samefile(self.profile_path,
deprecated_profile_path):
# Don't warn if they refer to the same path, since
# that can be used for backward compatibility with
# old software.
writemsg("!!! %s\n" %
_("Found 2 make.profile dirs: "
"using '%s', ignoring '%s'") %
(self.profile_path, deprecated_profile_path),
noiselevel=-1)
else:
self.config_profile_path = deprecated_profile_path
if isdir_raise_eaccess(self.config_profile_path):
self.profile_path = self.config_profile_path
else:
self.profile_path = None
else:
# NOTE: repoman may pass in an empty string
# here, in order to create an empty profile
# for checking dependencies of packages with
# empty KEYWORDS.
self.profile_path = self.config_profile_path
# The symlink might not exist or might not be a symlink.
self.profiles = []
self.profiles_complex = []
if self.profile_path:
try:
self._addProfile(os.path.realpath(self.profile_path),
repositories, known_repos)
except ParseError as e:
if not portage._sync_disabled_warnings:
writemsg(_("!!! Unable to parse profile: '%s'\n") % self.profile_path, noiselevel=-1)
writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
self.profiles = []
self.profiles_complex = []
if self._user_config and self.profiles:
custom_prof = os.path.join(
self.config_root, CUSTOM_PROFILE_PATH)
if os.path.exists(custom_prof):
self.user_profile_dir = custom_prof
self.profiles.append(custom_prof)
self.profiles_complex.append(
_profile_node(custom_prof, True, True))
del custom_prof
self.profiles = tuple(self.profiles)
self.profiles_complex = tuple(self.profiles_complex)
def _check_var_directory(self, varname, var):
if not isdir_raise_eaccess(var):
writemsg(_("!!! Error: %s='%s' is not a directory. "
"Please correct this.\n") % (varname, var),
noiselevel=-1)
raise DirectoryNotFound(var)
def _addProfile(self, currentPath, repositories, known_repos):
current_abs_path = os.path.abspath(currentPath)
allow_directories = True
allow_parent_colon = True
repo_loc = None
compat_mode = False
eapi_file = os.path.join(currentPath, "eapi")
eapi = "0"
f = None
try:
f = io.open(_unicode_encode(eapi_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace')
eapi = f.readline().strip()
except IOError:
pass
else:
if not eapi_is_supported(eapi):
raise ParseError(_(
"Profile contains unsupported "
"EAPI '%s': '%s'") % \
(eapi, os.path.realpath(eapi_file),))
finally:
if f is not None:
f.close()
intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
if intersecting_repos:
# protect against nested repositories. Insane configuration, but the longest
# path will be the correct one.
repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
layout_data['profile-formats'] == ('portage-1-compat',)
allow_parent_colon = any(x in _allow_parent_colon
for x in layout_data['profile-formats'])
if compat_mode:
offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
offenders = sorted(x for x in offenders
if os.path.isdir(os.path.join(currentPath, x)))
if offenders:
warnings.warn(_(
"\nThe selected profile is implicitly using the 'portage-1' format:\n"
"\tprofile = %(profile_path)s\n"
"But this repository is not using that format:\n"
"\trepo = %(repo_name)s\n"
"This will break in the future. Please convert these dirs to files:\n"
"\t%(files)s\n"
"Or, add this line to the repository's layout.conf:\n"
"\tprofile-formats = portage-1")
% dict(profile_path=currentPath, repo_name=repo_loc,
files='\n\t'.join(offenders)))
parentsFile = os.path.join(currentPath, "parent")
if exists_raise_eaccess(parentsFile):
parents = grabfile(parentsFile)
if not parents:
raise ParseError(
_("Empty parent file: '%s'") % parentsFile)
for parentPath in parents:
abs_parent = parentPath[:1] == os.sep
if not abs_parent and allow_parent_colon:
parentPath = self._expand_parent_colon(parentsFile,
parentPath, repo_loc, repositories)
# NOTE: This os.path.join() call is intended to ignore
# currentPath if parentPath is already absolute.
parentPath = normalize_path(os.path.join(
currentPath, parentPath))
if abs_parent or repo_loc is None or \
not parentPath.startswith(repo_loc):
# It seems that this parent may point outside
# of the current repo, so realpath it.
parentPath = os.path.realpath(parentPath)
if exists_raise_eaccess(parentPath):
self._addProfile(parentPath, repositories, known_repos)
else:
raise ParseError(
_("Parent '%s' not found: '%s'") % \
(parentPath, parentsFile))
self.profiles.append(currentPath)
self.profiles_complex.append(
_profile_node(currentPath, allow_directories, False))
def _expand_parent_colon(self, parentsFile, parentPath,
repo_loc, repositories):
colon = parentPath.find(":")
if colon == -1:
return parentPath
if colon == 0:
if repo_loc is None:
raise ParseError(
_("Parent '%s' not found: '%s'") % \
(parentPath, parentsFile))
else:
parentPath = normalize_path(os.path.join(
repo_loc, 'profiles', parentPath[colon+1:]))
else:
p_repo_name = parentPath[:colon]
try:
p_repo_loc = repositories.get_location_for_name(p_repo_name)
except KeyError:
raise ParseError(
_("Parent '%s' not found: '%s'") % \
(parentPath, parentsFile))
else:
parentPath = normalize_path(os.path.join(
p_repo_loc, 'profiles', parentPath[colon+1:]))
return parentPath
def set_root_override(self, root_overwrite=None):
# Allow ROOT setting to come from make.conf if it's not overridden
# by the constructor argument (from the calling environment).
if self.target_root is None and root_overwrite is not None:
self.target_root = root_overwrite
if not self.target_root.strip():
self.target_root = None
if self.target_root is None:
self.target_root = "/"
self.target_root = normalize_path(os.path.abspath(
self.target_root)).rstrip(os.path.sep) + os.path.sep
ensure_dirs(self.target_root)
self._check_var_directory("ROOT", self.target_root)
self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep
# make.globals should not be relative to config_root
# because it only contains constants. However, if EPREFIX
# is set then there are two possible scenarios:
# 1) If $ROOT == "/" then make.globals should be
# relative to EPREFIX.
# 2) If $ROOT != "/" then the correct location of
# make.globals needs to be specified in the constructor
# parameters, since it's a property of the host system
# (and the current config represents the target system).
self.global_config_path = GLOBAL_CONFIG_PATH
if self.eprefix:
if self.target_root == "/":
# case (1) above
self.global_config_path = os.path.join(self.eprefix,
GLOBAL_CONFIG_PATH.lstrip(os.sep))
else:
# case (2) above
# For now, just assume make.globals is relative
# to EPREFIX.
# TODO: Pass in more info to the constructor,
# so we know the host system configuration.
self.global_config_path = os.path.join(self.eprefix,
GLOBAL_CONFIG_PATH.lstrip(os.sep))
def set_port_dirs(self, portdir, portdir_overlay):
self.portdir = portdir
self.portdir_overlay = portdir_overlay
if self.portdir_overlay is None:
self.portdir_overlay = ""
self.overlay_profiles = []
for ov in shlex_split(self.portdir_overlay):
ov = normalize_path(ov)
profiles_dir = os.path.join(ov, "profiles")
if isdir_raise_eaccess(profiles_dir):
self.overlay_profiles.append(profiles_dir)
self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
self.profile_and_user_locations = self.profile_locations[:]
if self._user_config:
self.profile_and_user_locations.append(self.abs_user_config)
self.profile_locations = tuple(self.profile_locations)
self.profile_and_user_locations = tuple(self.profile_and_user_locations)
| gpl-2.0 | -6,579,109,535,630,492,000 | 34.47352 | 98 | 0.695179 | false |
SRabbelier/Melange | thirdparty/google_appengine/google/appengine/dist/py_zipimport.py | 1 | 9300 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pure Python zipfile importer.
This approximates the standard zipimport module, which isn't supported
by Google App Engine. See PEP 302 for more information about the API
for import hooks.
Usage:
import py_zipimport
As a side effect of importing, the module overrides sys.path_hooks,
and also creates an alias 'zipimport' for itself. When your app is
running in Google App Engine production, you don't even need to import
it, since this is already done for you. In the Google App Engine SDK
this module is not used; instead, the standard zipimport module is
used.
"""
__all__ = ['ZipImportError', 'zipimporter']
import os
import sys
import types
import UserDict
import zipfile
_SEARCH_ORDER = [
('.py', False),
('/__init__.py', True),
]
_zipfile_cache = {}
class ZipImportError(ImportError):
"""Exception raised by zipimporter objects."""
class zipimporter:
"""A PEP-302-style importer that can import from a zipfile.
Just insert or append this class (not an instance) to sys.path_hooks
and you're in business. Instances satisfy both the 'importer' and
'loader' APIs specified in PEP 302.
"""
def __init__(self, path_entry):
"""Constructor.
Args:
path_entry: The entry in sys.path. This should be the name of an
existing zipfile possibly with a path separator and a prefix
path within the archive appended, e.g. /x/django.zip or
/x/django.zip/foo/bar.
Raises:
ZipImportError if the path_entry does not represent a valid
zipfile with optional prefix.
"""
archive = path_entry
prefix = ''
while not os.path.lexists(archive):
head, tail = os.path.split(archive)
if head == archive:
msg = 'Nothing found for %r' % path_entry
raise ZipImportError(msg)
archive = head
prefix = os.path.join(tail, prefix)
if not os.path.isfile(archive):
msg = 'Non-file %r found for %r' % (archive, path_entry)
raise ZipImportError(msg)
self.archive = archive
self.prefix = os.path.join(prefix, '')
self.zipfile = _zipfile_cache.get(archive)
if self.zipfile is None:
try:
self.zipfile = zipfile.ZipFile(self.archive)
except (EnvironmentError, zipfile.BadZipfile), err:
msg = 'Can\'t open zipfile %s: %s: %s' % (self.archive,
err.__class__.__name__, err)
import logging
logging.warn(msg)
raise ZipImportError(msg)
else:
_zipfile_cache[archive] = self.zipfile
import logging
logging.info('zipimporter(%r, %r)', archive, prefix)
def __repr__(self):
"""Return a string representation matching zipimport.c."""
name = self.archive
if self.prefix:
name = os.path.join(name, self.prefix)
return '<zipimporter object "%s">' % name
def _get_info(self, fullmodname):
"""Internal helper for find_module() and load_module().
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
A tuple (submodname, is_package, relpath) where:
submodname: The final component of the module name, e.g. 'mail'.
is_package: A bool indicating whether this is a package.
relpath: The path to the module's source code within to the zipfile.
Raises:
ImportError if the module is not found in the archive.
"""
parts = fullmodname.split('.')
submodname = parts[-1]
for suffix, is_package in _SEARCH_ORDER:
relpath = os.path.join(self.prefix,
submodname + suffix.replace('/', os.sep))
try:
self.zipfile.getinfo(relpath.replace(os.sep, '/'))
except KeyError:
pass
else:
return submodname, is_package, relpath
msg = ('Can\'t find module %s in zipfile %s with prefix %r' %
(fullmodname, self.archive, self.prefix))
raise ZipImportError(msg)
def _get_source(self, fullmodname):
"""Internal helper for load_module().
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
A tuple (submodname, is_package, fullpath, source) where:
submodname: The final component of the module name, e.g. 'mail'.
is_package: A bool indicating whether this is a package.
fullpath: The path to the module's source code including the
zipfile's filename.
source: The module's source code.
Raises:
ImportError if the module is not found in the archive.
"""
submodname, is_package, relpath = self._get_info(fullmodname)
fullpath = '%s%s%s' % (self.archive, os.sep, relpath)
source = self.zipfile.read(relpath.replace(os.sep, '/'))
source = source.replace('\r\n', '\n')
source = source.replace('\r', '\n')
return submodname, is_package, fullpath, source
def find_module(self, fullmodname, path=None):
"""PEP-302-compliant find_module() method.
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
path: Optional and ignored; present for API compatibility only.
Returns:
None if the module isn't found in the archive; self if it is found.
"""
try:
submodname, is_package, relpath = self._get_info(fullmodname)
except ImportError:
return None
else:
return self
def load_module(self, fullmodname):
"""PEP-302-compliant load_module() method.
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
The module object constructed from the source code.
Raises:
SyntaxError if the module's source code is syntactically incorrect.
ImportError if there was a problem accessing the source code.
Whatever else can be raised by executing the module's source code.
"""
submodname, is_package, fullpath, source = self._get_source(fullmodname)
code = compile(source, fullpath, 'exec')
mod = sys.modules.get(fullmodname)
try:
if mod is None:
mod = sys.modules[fullmodname] = types.ModuleType(fullmodname)
mod.__loader__ = self
mod.__file__ = fullpath
mod.__name__ = fullmodname
if is_package:
mod.__path__ = [os.path.dirname(mod.__file__)]
exec code in mod.__dict__
except:
if fullmodname in sys.modules:
del sys.modules[fullmodname]
raise
return mod
def get_data(self, fullpath):
"""Return (binary) content of a data file in the zipfile."""
prefix = os.path.join(self.archive, '')
if fullpath.startswith(prefix):
relpath = fullpath[len(prefix):]
elif os.path.isabs(fullpath):
raise IOError('Absolute path %r doesn\'t start with zipfile name %r' %
(fullpath, prefix))
else:
relpath = fullpath
try:
return self.zipfile.read(relpath.replace(os.sep, '/'))
except KeyError:
raise IOError('Path %r not found in zipfile %r' %
(relpath, self.archive))
def is_package(self, fullmodname):
"""Return whether a module is a package."""
submodname, is_package, relpath = self._get_info(fullmodname)
return is_package
def get_code(self, fullmodname):
"""Return bytecode for a module."""
submodname, is_package, fullpath, source = self._get_source(fullmodname)
return compile(source, fullpath, 'exec')
def get_source(self, fullmodname):
"""Return source code for a module."""
submodname, is_package, fullpath, source = self._get_source(fullmodname)
return source
class ZipFileCache(UserDict.DictMixin):
"""Helper class to export archive data in _zip_directory_cache.
Just take the info from _zipfile_cache and convert it as required.
"""
def __init__(self, archive):
_zipfile_cache[archive]
self._archive = archive
def keys(self):
return _zipfile_cache[self._archive].namelist()
def __getitem__(self, filename):
info = _zipfile_cache[self._archive].getinfo(filename.replace(os.sep, '/'))
dt = info.date_time
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
return (os.path.join(self._archive, info.filename), info.compress_type,
info.compress_size, info.file_size, info.header_offset, dostime,
dosdate, info.CRC)
class ZipDirectoryCache(UserDict.DictMixin):
"""Helper class to export _zip_directory_cache."""
def keys(self):
return _zipfile_cache.keys()
def __getitem__(self, archive):
return ZipFileCache(archive)
_zip_directory_cache = ZipDirectoryCache()
sys.modules['zipimport'] = sys.modules[__name__]
sys.path_hooks[:] = [zipimporter]
| apache-2.0 | 6,846,432,237,036,810,000 | 30.958763 | 79 | 0.653548 | false |
rajeevs1992/mailmancli | src/mailmanclient/cli/lib/mailman_utils.py | 1 | 4303 | #!/usr/bin/python
# Copyright (C) 2010-2014 by the Free Software Foundation, Inc.
#
# This file is part of mailman.client.
#
# mailman.client is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, version 3 of the License.
#
# mailman.client is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with mailman.client. If not, see <http://www.gnu.org/licenses/>.
#
# This file is part of the Mailman CLI Project, Google Summer Of Code, 2014
#
# Author : Rajeev S <[email protected]>
# Mentors : Stephen J. Turnbull <[email protected]>
# Abhilash Raj <[email protected]>
# Barry Warsaw <[email protected]>
from mailmanclient import Client, MailmanConnectionError
from mailman.config import config
from mailmanclient.cli.lib.utils import Utils
class MailmanUtils(Utils):
""" Utilities relating to Mailman
Client or the REST API
"""
def __init__(self):
config.load()
def connect(self, *args, **kwargs):
""" Connect to Mailman REST API using the arguments specified.
Missing arguments are decided from the mailman.cfg file
return a client object.
"""
host, port, username, password = self.get_credentials_from_config()
if 'host' in kwargs and kwargs['host']:
host = kwargs['host']
if 'port' in kwargs and kwargs['port']:
port = kwargs['port']
if 'username' in kwargs and kwargs['username']:
username = kwargs['username']
if 'password' in kwargs and kwargs['password']:
password = kwargs['password']
client = Client('%s:%s/3.0' % (host, port),
username,
password)
try:
client.system
except MailmanConnectionError as e:
self.error(e)
exit(1)
return client
def get_credentials_from_config(self):
""" Returns the credentials required for logging on to
the Mailman REST API, that are read from the Mailman
configuration.
"""
host = 'http://' + config.schema['webservice']['hostname']
port = config.schema['webservice']['port']
username = config.schema['webservice']['admin_user']
password = config.schema['webservice']['admin_pass']
return host, port, username, password
def get_new_domain_name(self):
""" Generates the name of a non existent domain """
client = self.connect()
while True:
domain_name = self.get_random_string(10) + '.com'
try:
client.get_domain(domain_name)
continue
except Exception:
return domain_name
def add_shell_vars(self, arg, shell):
""" Replaces the variables used in the command with thier respective
values if the values are present in the shell environment, else
use the variable as such.
"""
if not shell.env_on or not arg:
return arg
if arg[0] == '$' and arg[1:] in shell.env:
arg = shell.env[arg[1:]]
return arg
def add_reserved_vars(self, args, shell):
""" Adds the reserved variables to a filter query. The reserved variables
are domain, list and user, which are added to respective scopes and
atrribute names.
"""
scope = args['scope']
if 'filters' not in args:
args['filters'] = []
if not shell.env_on:
return args
filters = args['filters']
if scope == 'list':
if 'domain' in shell.env:
filters.append(('mail_host', '=', shell.env['domain']))
elif scope == 'user':
if 'list' in shell.env:
filters.append((shell.env['list'], 'in', 'subscriptions'))
args['filters'] = filters
return args
| lgpl-3.0 | -4,896,144,332,977,876,000 | 35.777778 | 81 | 0.599814 | false |
jseabold/statsmodels | statsmodels/sandbox/stats/contrast_tools.py | 5 | 28790 | '''functions to work with contrasts for multiple tests
contrast matrices for comparing all pairs, all levels to reference level, ...
extension to 2-way groups in progress
TwoWay: class for bringing two-way analysis together and try out
various helper functions
Idea for second part
- get all transformation matrices to move in between different full rank
parameterizations
- standardize to one parameterization to get all interesting effects.
- multivariate normal distribution
- exploit or expand what we have in LikelihoodResults, cov_params, f_test,
t_test, example: resols_dropf_full.cov_params(C2)
- connect to new multiple comparison for contrast matrices, based on
multivariate normal or t distribution (Hothorn, Bretz, Westfall)
'''
from numpy.testing import assert_equal
import numpy as np
#next 3 functions copied from multicomp.py
def contrast_allpairs(nm):
'''contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons
'''
contr = []
for i in range(nm):
for j in range(i+1, nm):
contr_row = np.zeros(nm)
contr_row[i] = 1
contr_row[j] = -1
contr.append(contr_row)
return np.array(contr)
def contrast_all_one(nm):
'''contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons
'''
contr = np.column_stack((np.ones(nm-1), -np.eye(nm-1)))
return contr
def contrast_diff_mean(nm):
'''contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons
'''
return np.eye(nm) - np.ones((nm,nm))/nm
def signstr(x, noplus=False):
if x in [-1,0,1]:
if not noplus:
return '+' if np.sign(x)>=0 else '-'
else:
return '' if np.sign(x)>=0 else '-'
else:
return str(x)
def contrast_labels(contrasts, names, reverse=False):
if reverse:
sl = slice(None, None, -1)
else:
sl = slice(None)
labels = [''.join(['%s%s' % (signstr(c, noplus=True),v)
for c,v in zip(row, names)[sl] if c != 0])
for row in contrasts]
return labels
def contrast_product(names1, names2, intgroup1=None, intgroup2=None, pairs=False):
'''build contrast matrices for products of two categorical variables
this is an experimental script and should be converted to a class
Parameters
----------
names1, names2 : lists of strings
contains the list of level labels for each categorical variable
intgroup1, intgroup2 : ndarrays TODO: this part not tested, finished yet
categorical variable
Notes
-----
This creates a full rank matrix. It does not do all pairwise comparisons,
parameterization is using contrast_all_one to get differences with first
level.
? does contrast_all_pairs work as a plugin to get all pairs ?
'''
n1 = len(names1)
n2 = len(names2)
names_prod = ['%s_%s' % (i,j) for i in names1 for j in names2]
ee1 = np.zeros((1,n1))
ee1[0,0] = 1
if not pairs:
dd = np.r_[ee1, -contrast_all_one(n1)]
else:
dd = np.r_[ee1, -contrast_allpairs(n1)]
contrast_prod = np.kron(dd[1:], np.eye(n2))
names_contrast_prod0 = contrast_labels(contrast_prod, names_prod, reverse=True)
names_contrast_prod = [''.join(['%s%s' % (signstr(c, noplus=True),v)
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod]
ee2 = np.zeros((1,n2))
ee2[0,0] = 1
#dd2 = np.r_[ee2, -contrast_all_one(n2)]
if not pairs:
dd2 = np.r_[ee2, -contrast_all_one(n2)]
else:
dd2 = np.r_[ee2, -contrast_allpairs(n2)]
contrast_prod2 = np.kron(np.eye(n1), dd2[1:])
names_contrast_prod2 = [''.join(['%s%s' % (signstr(c, noplus=True),v)
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod2]
if (intgroup1 is not None) and (intgroup1 is not None):
d1, _ = dummy_1d(intgroup1)
d2, _ = dummy_1d(intgroup2)
dummy = dummy_product(d1, d2)
else:
dummy = None
return (names_prod, contrast_prod, names_contrast_prod,
contrast_prod2, names_contrast_prod2, dummy)
def dummy_1d(x, varname=None):
'''dummy variable for id integer groups
Parameters
----------
x : ndarray, 1d
categorical variable, requires integers if varname is None
varname : str
name of the variable used in labels for category levels
Returns
-------
dummy : ndarray, 2d
array of dummy variables, one column for each level of the
category (full set)
labels : list[str]
labels for the columns, i.e. levels of each category
Notes
-----
use tools.categorical instead for more more options
See Also
--------
statsmodels.tools.categorical
Examples
--------
>>> x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
>>> dummy_1d(x, varname='gender')
(array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M'])
'''
if varname is None: #assumes integer
labels = ['level_%d' % i for i in range(x.max() + 1)]
return (x[:,None]==np.arange(x.max()+1)).astype(int), labels
else:
grouplabels = np.unique(x)
labels = [varname + '_%s' % str(i) for i in grouplabels]
return (x[:,None]==grouplabels).astype(int), labels
def dummy_product(d1, d2, method='full'):
'''dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, assumes full set for methods 'drop-last'
and 'drop-first'
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, encoding of intersection of
categories.
The drop methods provide a difference dummy encoding:
(constant, main effects, interaction effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
dummy matrix.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
dd = (d1[:,:,None]*d2[:,None,:]).reshape(d1.shape[0],-1)
elif method == 'drop-last': #same as SAS transreg
d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,:-1],d12rl))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
d12r = dummy_product(d1[:,1:], d2[:,1:])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,1:],d12r))
else:
raise ValueError('method not recognized')
return dd
def dummy_limits(d):
'''start and endpoints of groups in a sorted dummy variable array
helper function for nested categories
Examples
--------
>>> d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> dummy_limits(d1)
(array([0, 4, 8]), array([ 4, 8, 12]))
get group slices from an array
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
'''
nobs, nvars = d.shape
start1, col1 = np.nonzero(np.diff(d,axis=0)==1)
end1, col1_ = np.nonzero(np.diff(d,axis=0)==-1)
cc = np.arange(nvars)
#print(cc, np.r_[[0], col1], np.r_[col1_, [nvars-1]]
if ((not (np.r_[[0], col1] == cc).all())
or (not (np.r_[col1_, [nvars-1]] == cc).all())):
raise ValueError('dummy variable is not sorted')
start = np.r_[[0], start1+1]
end = np.r_[end1+1, [nobs]]
return start, end
def dummy_nested(d1, d2, method='full'):
'''unfinished and incomplete mainly copy past dummy_product
dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, d2 is assumed to be nested in d1
Assumes full set for methods 'drop-last' and 'drop-first'.
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, which in this case is d2.
The drop methods provide an effects encoding:
(constant, main effects, subgroup effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
encoding.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
return d2
start1, end1 = dummy_limits(d1)
start2, end2 = dummy_limits(d2)
first = np.in1d(start2, start1)
last = np.in1d(end2, end1)
equal = (first == last)
col_dropf = ~first*~equal
col_dropl = ~last*~equal
if method == 'drop-last':
d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,col_dropl]))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
d12r = dummy_product(d1[:,1:], d2[:,1:])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,col_dropf]))
else:
raise ValueError('method not recognized')
return dd, col_dropf, col_dropl
class DummyTransform(object):
'''Conversion between full rank dummy encodings
y = X b + u
b = C a
a = C^{-1} b
y = X C a + u
define Z = X C, then
y = Z a + u
contrasts:
R_b b = r
R_a a = R_b C a = r
where R_a = R_b C
Here C is the transform matrix, with dot_left and dot_right as the main
methods, and the same for the inverse transform matrix, C^{-1}
Note:
- The class was mainly written to keep left and right straight.
- No checking is done.
- not sure yet if method names make sense
'''
def __init__(self, d1, d2):
'''C such that d1 C = d2, with d1 = X, d2 = Z
should be (x, z) in arguments ?
'''
self.transf_matrix = np.linalg.lstsq(d1, d2, rcond=-1)[0]
self.invtransf_matrix = np.linalg.lstsq(d2, d1, rcond=-1)[0]
def dot_left(self, a):
''' b = C a
'''
return np.dot(self.transf_matrix, a)
def dot_right(self, x):
''' z = x C
'''
return np.dot(x, self.transf_matrix)
def inv_dot_left(self, b):
''' a = C^{-1} b
'''
return np.dot(self.invtransf_matrix, b)
def inv_dot_right(self, z):
''' x = z C^{-1}
'''
return np.dot(z, self.invtransf_matrix)
def groupmean_d(x, d):
'''groupmeans using dummy variables
Parameters
----------
x : array_like, ndim
data array, tested for 1,2 and 3 dimensions
d : ndarray, 1d
dummy variable, needs to have the same length
as x in axis 0.
Returns
-------
groupmeans : ndarray, ndim-1
means for each group along axis 0, the levels
of the groups are the last axis
Notes
-----
This will be memory intensive if there are many levels
in the categorical variable, i.e. many columns in the
dummy variable. In this case it is recommended to use
a more efficient version.
'''
x = np.asarray(x)
## if x.ndim == 1:
## nvars = 1
## else:
nvars = x.ndim + 1
sli = [slice(None)] + [None]*(nvars-2) + [slice(None)]
return (x[...,None] * d[sli]).sum(0)*1./d.sum(0)
class TwoWay(object):
'''a wrapper class for two way anova type of analysis with OLS
currently mainly to bring things together
Notes
-----
unclear: adding multiple test might assume block design or orthogonality
This estimates the full dummy version with OLS.
The drop first dummy representation can be recovered through the
transform method.
TODO: add more methods, tests, pairwise, multiple, marginal effects
try out what can be added for userfriendly access.
missing: ANOVA table
'''
def __init__(self, endog, factor1, factor2, varnames=None):
self.nobs = factor1.shape[0]
if varnames is None:
vname1 = 'a'
vname2 = 'b'
else:
vname1, vname1 = varnames
self.d1, self.d1_labels = d1, d1_labels = dummy_1d(factor1, vname1)
self.d2, self.d2_labels = d2, d2_labels = dummy_1d(factor2, vname2)
self.nlevel1 = nlevel1 = d1.shape[1]
self.nlevel2 = nlevel2 = d2.shape[1]
#get product dummies
res = contrast_product(d1_labels, d2_labels)
prodlab, C1, C1lab, C2, C2lab, _ = res
self.prod_label, self.C1, self.C1_label, self.C2, self.C2_label, _ = res
dp_full = dummy_product(d1, d2, method='full')
dp_dropf = dummy_product(d1, d2, method='drop-first')
self.transform = DummyTransform(dp_full, dp_dropf)
#estimate the model
self.nvars = dp_full.shape[1]
self.exog = dp_full
self.resols = sm.OLS(endog, dp_full).fit()
self.params = self.resols.params
#get transformed parameters, (constant, main, interaction effect)
self.params_dropf = self.transform.inv_dot_left(self.params)
self.start_interaction = 1 + (nlevel1 - 1) + (nlevel2 - 1)
self.n_interaction = self.nvars - self.start_interaction
#convert to cached property
def r_nointer(self):
'''contrast/restriction matrix for no interaction
'''
nia = self.n_interaction
R_nointer = np.hstack((np.zeros((nia, self.nvars-nia)), np.eye(nia)))
#inter_direct = resols_full_dropf.tval[-nia:]
R_nointer_transf = self.transform.inv_dot_right(R_nointer)
self.R_nointer_transf = R_nointer_transf
return R_nointer_transf
def ttest_interaction(self):
'''ttests for no-interaction terms are zero
'''
#use self.r_nointer instead
nia = self.n_interaction
R_nointer = np.hstack((np.zeros((nia, self.nvars-nia)), np.eye(nia)))
#inter_direct = resols_full_dropf.tval[-nia:]
R_nointer_transf = self.transform.inv_dot_right(R_nointer)
self.R_nointer_transf = R_nointer_transf
t_res = self.resols.t_test(R_nointer_transf)
return t_res
def ftest_interaction(self):
'''ttests for no-interaction terms are zero
'''
R_nointer_transf = self.r_nointer()
return self.resols.f_test(R_nointer_transf)
def ttest_conditional_effect(self, factorind):
if factorind == 1:
return self.resols.t_test(self.C1), self.C1_label
else:
return self.resols.t_test(self.C2), self.C2_label
def summary_coeff(self):
from statsmodels.iolib import SimpleTable
params_arr = self.params.reshape(self.nlevel1, self.nlevel2)
stubs = self.d1_labels
headers = self.d2_labels
title = 'Estimated Coefficients by factors'
table_fmt = dict(
data_fmts = ["%#10.4g"]*self.nlevel2)
return SimpleTable(params_arr, headers, stubs, title=title,
txt_fmt=table_fmt)
# --------------- tests
# TODO: several tests still missing, several are in the example with print
class TestContrastTools(object):
def __init__(self):
self.v1name = ['a0', 'a1', 'a2']
self.v2name = ['b0', 'b1']
self.d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
def test_dummy_1d(self):
x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
d, labels = (np.array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M'])
res_d, res_labels = dummy_1d(x, varname='gender')
assert_equal(res_d, d)
assert_equal(res_labels, labels)
def test_contrast_product(self):
res_cp = contrast_product(self.v1name, self.v2name)
res_t = [0]*6
res_t[0] = ['a0_b0', 'a0_b1', 'a1_b0', 'a1_b1', 'a2_b0', 'a2_b1']
res_t[1] = np.array([[-1., 0., 1., 0., 0., 0.],
[ 0., -1., 0., 1., 0., 0.],
[-1., 0., 0., 0., 1., 0.],
[ 0., -1., 0., 0., 0., 1.]])
res_t[2] = ['a1_b0-a0_b0', 'a1_b1-a0_b1', 'a2_b0-a0_b0', 'a2_b1-a0_b1']
res_t[3] = np.array([[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 1.]])
res_t[4] = ['a0_b1-a0_b0', 'a1_b1-a1_b0', 'a2_b1-a2_b0']
for ii in range(5):
np.testing.assert_equal(res_cp[ii], res_t[ii], err_msg=str(ii))
def test_dummy_limits(self):
b,e = dummy_limits(self.d1)
assert_equal(b, np.array([0, 4, 8]))
assert_equal(e, np.array([ 4, 8, 12]))
if __name__ == '__main__':
tt = TestContrastTools()
tt.test_contrast_product()
tt.test_dummy_1d()
tt.test_dummy_limits()
import statsmodels.api as sm
examples = ['small', 'large', None][1]
v1name = ['a0', 'a1', 'a2']
v2name = ['b0', 'b1']
res_cp = contrast_product(v1name, v2name)
print(res_cp)
y = np.arange(12)
x1 = np.arange(12)//4
x2 = np.arange(12)//2 % 2
if 'small' in examples:
d1, d1_labels = dummy_1d(x1)
d2, d2_labels = dummy_1d(x2)
if 'large' in examples:
x1 = np.repeat(x1, 5, axis=0)
x2 = np.repeat(x2, 5, axis=0)
nobs = x1.shape[0]
d1, d1_labels = dummy_1d(x1)
d2, d2_labels = dummy_1d(x2)
dd_full = dummy_product(d1, d2, method='full')
dd_dropl = dummy_product(d1, d2, method='drop-last')
dd_dropf = dummy_product(d1, d2, method='drop-first')
#Note: full parameterization of dummies is orthogonal
#np.eye(6)*10 in "large" example
print((np.dot(dd_full.T, dd_full) == np.diag(dd_full.sum(0))).all())
#check that transforms work
#generate 3 data sets with the 3 different parameterizations
effect_size = [1., 0.01][1]
noise_scale = [0.001, 0.1][0]
noise = noise_scale * np.random.randn(nobs)
beta = effect_size * np.arange(1,7)
ydata_full = (dd_full * beta).sum(1) + noise
ydata_dropl = (dd_dropl * beta).sum(1) + noise
ydata_dropf = (dd_dropf * beta).sum(1) + noise
resols_full_full = sm.OLS(ydata_full, dd_full).fit()
resols_full_dropf = sm.OLS(ydata_full, dd_dropf).fit()
params_f_f = resols_full_full.params
params_f_df = resols_full_dropf.params
resols_dropf_full = sm.OLS(ydata_dropf, dd_full).fit()
resols_dropf_dropf = sm.OLS(ydata_dropf, dd_dropf).fit()
params_df_f = resols_dropf_full.params
params_df_df = resols_dropf_dropf.params
tr_of = np.linalg.lstsq(dd_dropf, dd_full, rcond=-1)[0]
tr_fo = np.linalg.lstsq(dd_full, dd_dropf, rcond=-1)[0]
print(np.dot(tr_fo, params_df_df) - params_df_f)
print(np.dot(tr_of, params_f_f) - params_f_df)
transf_f_df = DummyTransform(dd_full, dd_dropf)
print(np.max(np.abs((dd_full - transf_f_df.inv_dot_right(dd_dropf)))))
print(np.max(np.abs((dd_dropf - transf_f_df.dot_right(dd_full)))))
print(np.max(np.abs((params_df_df
- transf_f_df.inv_dot_left(params_df_f)))))
np.max(np.abs((params_f_df
- transf_f_df.inv_dot_left(params_f_f))))
prodlab, C1, C1lab, C2, C2lab,_ = contrast_product(v1name, v2name)
print('\ntvalues for no effect of factor 1')
print('each test is conditional on a level of factor 2')
print(C1lab)
print(resols_dropf_full.t_test(C1).tvalue)
print('\ntvalues for no effect of factor 2')
print('each test is conditional on a level of factor 1')
print(C2lab)
print(resols_dropf_full.t_test(C2).tvalue)
#covariance matrix of restrictions C2, note: orthogonal
resols_dropf_full.cov_params(C2)
#testing for no interaction effect
R_noint = np.hstack((np.zeros((2,4)), np.eye(2)))
inter_direct = resols_full_dropf.tvalues[-2:]
inter_transf = resols_full_full.t_test(transf_f_df.inv_dot_right(R_noint)).tvalue
print(np.max(np.abs((inter_direct - inter_transf))))
#now with class version
tw = TwoWay(ydata_dropf, x1, x2)
print(tw.ttest_interaction().tvalue)
print(tw.ttest_interaction().pvalue)
print(tw.ftest_interaction().fvalue)
print(tw.ftest_interaction().pvalue)
print(tw.ttest_conditional_effect(1)[0].tvalue)
print(tw.ttest_conditional_effect(2)[0].tvalue)
print(tw.summary_coeff())
''' documentation for early examples while developing - some have changed already
>>> y = np.arange(12)
>>> y
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
>>> x1 = np.arange(12)//4
>>> x1
array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
>>> x2 = np.arange(12)//2%2
>>> x2
array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1])
>>> d1 = dummy_1d(x1)
>>> d1
array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> d2 = dummy_1d(x2)
>>> d2
array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]])
>>> d12 = dummy_product(d1, d2)
>>> d12
array([[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]])
>>> d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
>>> np.column_stack((np.ones(d1.shape[0]), d1[:,:-1], d2[:,:-1],d12rl))
array([[ 1., 1., 0., 1., 1., 0.],
[ 1., 1., 0., 1., 1., 0.],
[ 1., 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0., 0.],
[ 1., 0., 1., 1., 0., 1.],
[ 1., 0., 1., 1., 0., 1.],
[ 1., 0., 1., 0., 0., 0.],
[ 1., 0., 1., 0., 0., 0.],
[ 1., 0., 0., 1., 0., 0.],
[ 1., 0., 0., 1., 0., 0.],
[ 1., 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0., 0.]])
'''
#nprod = ['%s_%s' % (i,j) for i in ['a0', 'a1', 'a2'] for j in ['b0', 'b1']]
#>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod) if c != 0])
# for row in np.kron(dd[1:], np.eye(2))]
'''
>>> nprod = ['%s_%s' % (i,j) for i in ['a0', 'a1', 'a2'] for j in ['b0', 'b1']]
>>> nprod
['a0_b0', 'a0_b1', 'a1_b0', 'a1_b1', 'a2_b0', 'a2_b1']
>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod) if c != 0]) for row in np.kron(dd[1:], np.eye(2))]
['-a0b0+a1b0', '-a0b1+a1b1', '-a0b0+a2b0', '-a0b1+a2b1']
>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod)[::-1] if c != 0]) for row in np.kron(dd[1:], np.eye(2))]
['+a1_b0-a0_b0', '+a1_b1-a0_b1', '+a2_b0-a0_b0', '+a2_b1-a0_b1']
>>> np.r_[[[1,0,0,0,0]],contrast_all_one(5)]
array([[ 1., 0., 0., 0., 0.],
[ 1., -1., 0., 0., 0.],
[ 1., 0., -1., 0., 0.],
[ 1., 0., 0., -1., 0.],
[ 1., 0., 0., 0., -1.]])
>>> idxprod = [(i,j) for i in range(3) for j in range(2)]
>>> idxprod
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
>>> np.array(idxprod).reshape(2,3,2,order='F')[:,:,0]
array([[0, 1, 2],
[0, 1, 2]])
>>> np.array(idxprod).reshape(2,3,2,order='F')[:,:,1]
array([[0, 0, 0],
[1, 1, 1]])
>>> dd3_ = np.r_[[[0,0,0]],contrast_all_one(3)]
pairwise contrasts and reparameterization
dd = np.r_[[[1,0,0,0,0]],-contrast_all_one(5)]
>>> dd
array([[ 1., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0.],
[-1., 0., 1., 0., 0.],
[-1., 0., 0., 1., 0.],
[-1., 0., 0., 0., 1.]])
>>> np.dot(dd.T, np.arange(5))
array([-10., 1., 2., 3., 4.])
>>> np.round(np.linalg.inv(dd.T)).astype(int)
array([[1, 1, 1, 1, 1],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
>>> np.round(np.linalg.inv(dd)).astype(int)
array([[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 0, 1]])
>>> dd
array([[ 1., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0.],
[-1., 0., 1., 0., 0.],
[-1., 0., 0., 1., 0.],
[-1., 0., 0., 0., 1.]])
>>> ddinv=np.round(np.linalg.inv(dd.T)).astype(int)
>>> np.dot(ddinv, np.arange(5))
array([10, 1, 2, 3, 4])
>>> np.dot(dd, np.arange(5))
array([ 0., 1., 2., 3., 4.])
>>> np.dot(dd, 5+np.arange(5))
array([ 5., 1., 2., 3., 4.])
>>> ddinv2 = np.round(np.linalg.inv(dd)).astype(int)
>>> np.dot(ddinv2, np.arange(5))
array([0, 1, 2, 3, 4])
>>> np.dot(ddinv2, 5+np.arange(5))
array([ 5, 11, 12, 13, 14])
>>> np.dot(ddinv2, [5, 0, 0 , 1, 2])
array([5, 5, 5, 6, 7])
>>> np.dot(ddinv2, dd)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
>>> dd3 = -np.r_[[[1,0,0]],contrast_all_one(3)]
>>> dd2 = -np.r_[[[1,0]],contrast_all_one(2)]
>>> np.kron(np.eye(3), dd2)
array([[-1., 0., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 0.],
[ 0., 0., 0., 0., -1., 1.]])
>>> dd2
array([[-1., 0.],
[-1., 1.]])
>>> np.kron(np.eye(3), dd2[1:])
array([[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 1.]])
>>> np.kron(dd[1:], np.eye(2))
array([[-1., 0., 1., 0., 0., 0.],
[ 0., -1., 0., 1., 0., 0.],
[-1., 0., 0., 0., 1., 0.],
[ 0., -1., 0., 0., 0., 1.]])
d_ = np.r_[[[1,0,0,0,0]],contrast_all_one(5)]
>>> d_
array([[ 1., 0., 0., 0., 0.],
[ 1., -1., 0., 0., 0.],
[ 1., 0., -1., 0., 0.],
[ 1., 0., 0., -1., 0.],
[ 1., 0., 0., 0., -1.]])
>>> np.round(np.linalg.pinv(d_)).astype(int)
array([[ 1, 0, 0, 0, 0],
[ 1, -1, 0, 0, 0],
[ 1, 0, -1, 0, 0],
[ 1, 0, 0, -1, 0],
[ 1, 0, 0, 0, -1]])
>>> np.linalg.inv(d_).astype(int)
array([[ 1, 0, 0, 0, 0],
[ 1, -1, 0, 0, 0],
[ 1, 0, -1, 0, 0],
[ 1, 0, 0, -1, 0],
[ 1, 0, 0, 0, -1]])
group means
>>> sli = [slice(None)] + [None]*(3-2) + [slice(None)]
>>> (np.column_stack((y, x1, x2))[...,None] * d1[sli]).sum(0)*1./d1.sum(0)
array([[ 1.5, 5.5, 9.5],
[ 0. , 1. , 2. ],
[ 0.5, 0.5, 0.5]])
>>> [(z[:,None] * d1).sum(0)*1./d1.sum(0) for z in np.column_stack((y, x1, x2)).T]
[array([ 1.5, 5.5, 9.5]), array([ 0., 1., 2.]), array([ 0.5, 0.5, 0.5])]
>>>
'''
| bsd-3-clause | -6,191,862,223,996,965,000 | 29.020855 | 121 | 0.501841 | false |
chrislit/abydos | tests/distance/test_distance_goodman_kruskal_lambda.py | 1 | 4737 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_goodman_kruskal_lambda.
This module contains unit tests for abydos.distance.GoodmanKruskalLambda
"""
import unittest
from abydos.distance import GoodmanKruskalLambda
class GoodmanKruskalLambdaTestCases(unittest.TestCase):
"""Test GoodmanKruskalLambda functions.
abydos.distance.GoodmanKruskalLambda
"""
cmp = GoodmanKruskalLambda()
cmp_no_d = GoodmanKruskalLambda(alphabet=0)
def test_goodman_kruskal_lambda_sim(self):
"""Test abydos.distance.GoodmanKruskalLambda.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1.0)
self.assertEqual(self.cmp.sim('a', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp.sim('abc', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.0)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.0)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.0)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.0)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.3333333333
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim('', ''), 1.0)
self.assertEqual(self.cmp_no_d.sim('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp_no_d.sim('Nigel', 'Niall'), 0.0)
self.assertAlmostEqual(self.cmp_no_d.sim('Niall', 'Nigel'), 0.0)
self.assertAlmostEqual(self.cmp_no_d.sim('Colin', 'Coiln'), 0.0)
self.assertAlmostEqual(self.cmp_no_d.sim('Coiln', 'Colin'), 0.0)
self.assertAlmostEqual(
self.cmp_no_d.sim('ATCAACGAGT', 'AACGATTAG'), 0.0
)
def test_goodman_kruskal_lambda_dist(self):
"""Test abydos.distance.GoodmanKruskalLambda.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('abc', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 1.0)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 1.0)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.6666666667
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.dist('', ''), 0.0)
self.assertEqual(self.cmp_no_d.dist('a', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'a'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp_no_d.dist('Nigel', 'Niall'), 1.0)
self.assertAlmostEqual(self.cmp_no_d.dist('Niall', 'Nigel'), 1.0)
self.assertAlmostEqual(self.cmp_no_d.dist('Colin', 'Coiln'), 1.0)
self.assertAlmostEqual(self.cmp_no_d.dist('Coiln', 'Colin'), 1.0)
self.assertAlmostEqual(
self.cmp_no_d.dist('ATCAACGAGT', 'AACGATTAG'), 1.0
)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -650,457,956,675,182,000 | 42.063636 | 73 | 0.628668 | false |
nyddogghr/SixQuiPrend | tests/models/game_test.py | 1 | 41394 | from flask import Flask
from passlib.hash import bcrypt
from sixquiprend.config import *
from sixquiprend.models.card import Card
from sixquiprend.models.chosen_card import ChosenCard
from sixquiprend.models.column import Column
from sixquiprend.models.game import Game
from sixquiprend.models.hand import Hand
from sixquiprend.models.heap import Heap
from sixquiprend.models.six_qui_prend_exception import SixQuiPrendException
from sixquiprend.models.user import User
from sixquiprend.sixquiprend import app, db
from sixquiprend.utils import *
import random
import unittest
class GameTestCase(unittest.TestCase):
USERNAME = 'User'
PASSWORD = 'Password'
def setUp(self):
app.config['SERVER_NAME'] = 'localhost'
app.config['WTF_CSRF_ENABLED'] = False
app.config['DATABASE_NAME'] = 'sixquiprend_test'
db_path = app.config['DATABASE_USER'] + ':' + app.config['DATABASE_PASSWORD']
db_path += '@' + app.config['DATABASE_HOST'] + '/' + app.config['DATABASE_NAME']
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://' + db_path
app.config['TESTING'] = True
self.app = app.test_client()
ctx = app.app_context()
ctx.push()
create_db()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def create_user(self, urole=User.ROLE_PLAYER):
username = 'User #'+str(User.query.count())
password = 'Password'
user = User(username=username,
password=bcrypt.hash(password),
active=True,
urole=urole)
db.session.add(user)
db.session.commit()
return user
def create_game(self, status=Game.STATUS_STARTED, users=[], owner_id=None):
game = Game(status=status)
for user in users:
game.users.append(user)
game.owner_id = owner_id
db.session.add(game)
db.session.commit()
return game
def create_hand(self, game_id, user_id, cards=[]):
hand = Hand(game_id=game_id, user_id=user_id)
for card in cards:
hand.cards.append(card)
db.session.add(hand)
db.session.commit()
return hand
def create_column(self, game_id, cards=[]):
column = Column(game_id=game_id)
for card in cards:
column.cards.append(card)
db.session.add(column)
db.session.commit()
return column
def create_heap(self, game_id, user_id, cards=[]):
heap = Heap(game_id=game_id, user_id=user_id)
for card in cards:
heap.cards.append(card)
db.session.add(heap)
db.session.commit()
return heap
def create_chosen_card(self, game_id, user_id, card_id=None):
if card_id == None:
card_id = self.create_card().id
chosen_card = ChosenCard(game_id=game_id,
user_id=user_id,
card_id=card_id)
db.session.add(chosen_card)
db.session.commit()
return chosen_card
def create_card(self, number=random.randint(1, 1000),
cow_value=random.randint(1, 1000)):
card = Card(number=number, cow_value=cow_value)
db.session.add(card)
db.session.commit()
return card
################################################################################
## Getters
################################################################################
def test_find(self):
game = self.create_game()
assert Game.find(game.id) == game
def test_find_errors(self):
# Game not found
with self.assertRaises(SixQuiPrendException) as e:
Game.find(-1)
assert e.exception.code == 404
def test_find_user(self):
user = self.create_user()
game = self.create_game(users=[user])
assert game.find_user(user.id) == user
def test_find_user_errors(self):
# User not in game
user = self.create_user()
game = self.create_game(users=[user])
with self.assertRaises(SixQuiPrendException) as e:
game.find_user(-1)
assert e.exception.code == 404
def test_find_column(self):
game = self.create_game()
column = self.create_column(game.id)
assert game.find_column(column.id) == column
def test_find_column_errors(self):
# Column not in game
game = self.create_game()
column = self.create_column(game.id)
with self.assertRaises(SixQuiPrendException) as e:
game.find_column(-1)
assert e.exception.code == 404
def test_find_chosen_card(self):
user = self.create_user()
game = self.create_game(users=[user])
chosen_card = self.create_chosen_card(game.id, user.id)
assert game.find_chosen_card(chosen_card.id) == chosen_card
def test_find_chosen_card_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.find_chosen_card(-1)
assert e.exception.code == 404
# User has no chosen card for this game
user = self.create_user()
game = self.create_game()
chosen_card = self.create_chosen_card(game.id, user.id)
with self.assertRaises(SixQuiPrendException) as e:
game.find_chosen_card(-1)
assert e.exception.code == 404
def test_get_user_hand(self):
user = self.create_user()
game = self.create_game(users=[user])
user_hand = self.create_hand(game.id, user.id)
assert game.get_user_hand(user.id) == user_hand
def test_get_user_hand_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.get_user_hand(-1)
assert e.exception.code == 404
def test_get_user_heap(self):
user = self.create_user()
game = self.create_game(users=[user])
user_heap = self.create_heap(game.id, user.id)
assert game.get_user_heap(user.id) == user_heap
def test_get_user_heap_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.get_user_heap(-1)
assert e.exception.code == 404
def test_get_user_status(self):
user = self.create_user()
game = self.create_game(users=[user])
assert game.get_user_status(user.id)['has_chosen_card'] == False
assert game.get_user_status(user.id)['needs_to_choose_column'] == False
def test_get_user_status_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.get_user_status(-1)
assert e.exception.code == 404
def test_get_user_chosen_card(self):
user = self.create_user()
game = self.create_game(users=[user])
card = self.create_card()
chosen_card = self.create_chosen_card(game.id, user.id, card.id)
assert game.get_user_chosen_card(user.id) == chosen_card
def test_get_user_chosen_card_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.get_user_chosen_card(-1)
assert e.exception.code == 404
def test_check_is_started(self):
game = self.create_game(status=Game.STATUS_STARTED)
game.check_is_started()
def test_check_is_started_errors(self):
# Game not started
game = self.create_game(status=Game.STATUS_CREATED)
with self.assertRaises(SixQuiPrendException) as e:
game.check_is_started()
assert e.exception.code == 400
def test_check_is_owner(self):
user = self.create_user()
game = self.create_game(users=[user], owner_id=user.id)
game.check_is_owner(user.id)
def test_check_is_owner_errors(self):
# User not owner
user = self.create_user()
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.check_is_owner(user.id)
assert e.exception.code == 403
def test_get_results(self):
user_one = self.create_user()
user_two = self.create_user()
game = self.create_game(users=[user_one, user_two])
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
user_one_heap = self.create_heap(game.id, user_one.id)
user_one_heap.cards.append(card_one)
user_two_heap = self.create_heap(game.id, user_two.id)
user_two_heap.cards.append(card_two)
user_two_heap.cards.append(card_three)
results = game.get_results()
assert results[user_one.username] == 1
assert results[user_two.username] == 5
def test_get_results_created_game(self):
user_one = self.create_user()
user_two = self.create_user()
game = self.create_game(status=Game.STATUS_CREATED, users=[user_one,
user_two])
results = game.get_results()
assert results == {}
def test_get_lowest_value_column(self):
game = self.create_game()
card_one = self.create_card(1, 10)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
card_four = self.create_card(4, 4)
card_five = self.create_card(5, 5)
column_one = self.create_column(game.id, cards=[card_one])
column_two = self.create_column(game.id, cards=[card_two, card_five])
column_two_bis = self.create_column(game.id, cards=[card_three,
card_four])
assert column_one.get_value() > column_two.get_value()
assert column_two.get_value() == column_two_bis.get_value()
chosen_column_ids = []
for i in range(100):
chosen_column = game.get_lowest_value_column()
assert chosen_column.id != column_one.id
chosen_column_ids.append(chosen_column.id)
assert chosen_column_ids.index(column_two.id) >= 0
assert chosen_column_ids.index(column_two_bis.id) >= 0
def test_get_suitable_column(self):
user = self.create_user()
game = self.create_game(users=[user])
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
card_four = self.create_card(4, 4)
column_one = self.create_column(game.id, cards=[card_two])
column_two = self.create_column(game.id, cards=[card_three])
chosen_card = self.create_chosen_card(game.id, user.id,
card_four.id)
suitable_column = game.get_suitable_column(chosen_card)
assert suitable_column == column_two
def test_get_suitable_column_errors(self):
# Chosen card does not belong to this game
user = self.create_user()
game1 = self.create_game()
game2 = self.create_game()
chosen_card = self.create_chosen_card(game2.id, user.id)
with self.assertRaises(SixQuiPrendException) as e:
game1.get_suitable_column(chosen_card)
assert e.exception.code == 422
# No suitable column
user = self.create_user()
game = self.create_game(users=[user])
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
card_four = self.create_card(4, 4)
column_one = self.create_column(game.id, cards=[card_two])
column_two = self.create_column(game.id, cards=[card_three])
chosen_card = self.create_chosen_card(game.id, user.id,
card_one.id)
with self.assertRaises(SixQuiPrendException) as e:
game.get_suitable_column(chosen_card)
assert e.exception.code == 422
def test_get_available_bots(self):
bot1 = self.create_user(urole=User.ROLE_BOT)
bot2 = self.create_user(urole=User.ROLE_BOT)
game = self.create_game(users=[bot1])
assert game.get_available_bots() == [bot2]
def test_get_chosen_cards_for_current_user(self):
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1, user2])
chosen_card1 = self.create_chosen_card(game.id, user1.id)
chosen_cards = game.get_chosen_cards_for_current_user(user1.id)
assert chosen_cards == [chosen_card1]
chosen_card2 = self.create_chosen_card(game.id, user2.id)
game.is_resolving_turn = True
db.session.add(game)
db.session.commit()
chosen_cards = game.get_chosen_cards_for_current_user(user1.id)
assert chosen_cards == [chosen_card1, chosen_card2]
db.session.delete(chosen_card1)
db.session.refresh(game)
chosen_cards = game.get_chosen_cards_for_current_user(user1.id)
assert chosen_cards == [chosen_card2]
def test_get_chosen_cards_for_current_user_errors(self):
# User is not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.get_chosen_cards_for_current_user(-1)
assert e.exception.code == 404
# User has no chosen card and card is not being placed
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1, user2])
with self.assertRaises(SixQuiPrendException) as e:
game.get_chosen_cards_for_current_user(user1.id)
assert e.exception.code == 400
def test_user_needs_to_choose_column(self):
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1, user2])
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
card_four = self.create_card(4, 4)
card_five = self.create_card(5, 5)
column_one = self.create_column(game.id, cards=[card_three])
column_two = self.create_column(game.id, cards=[card_four])
assert game.user_needs_to_choose_column(user1.id) == False
assert game.user_needs_to_choose_column(user2.id) == False
chosen_card1 = self.create_chosen_card(game.id, user1.id, card_two.id)
assert game.user_needs_to_choose_column(user1.id) == False
assert game.user_needs_to_choose_column(user2.id) == False
chosen_card2 = self.create_chosen_card(game.id, user2.id, card_one.id)
assert game.user_needs_to_choose_column(user1.id) == False
assert game.user_needs_to_choose_column(user2.id) == False
game.is_resolving_turn = True
db.session.add(game)
db.session.commit()
assert game.user_needs_to_choose_column(user1.id) == False
assert game.user_needs_to_choose_column(user2.id) == True
db.session.delete(chosen_card2)
chosen_card2 = self.create_chosen_card(game.id, user2.id, card_five.id)
assert game.user_needs_to_choose_column(user1.id) == True
assert game.user_needs_to_choose_column(user2.id) == False
def test_user_needs_to_choose_column_errors(self):
# Game not started
game = self.create_game(status=Game.STATUS_CREATED)
with self.assertRaises(SixQuiPrendException) as e:
game.get_chosen_cards_for_current_user(-1)
assert e.exception.code == 422
# User not in game
game = self.create_game(status=Game.STATUS_STARTED)
with self.assertRaises(SixQuiPrendException) as e:
game.get_chosen_cards_for_current_user(-1)
assert e.exception.code == 404
def test_can_place_card(self):
user1 = self.create_user()
user2 = self.create_user()
bot = self.create_user(urole=User.ROLE_BOT)
game = self.create_game(users=[user1, user2, bot], owner_id=user1.id)
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
card_four = self.create_card(4, 4)
card_five = self.create_card(5, 5)
card_six = self.create_card(6, 6)
column_one = self.create_column(game.id, cards=[card_two])
column_two = self.create_column(game.id, cards=[card_three])
assert game.can_place_card(user1.id) == False
chosen_card1 = self.create_chosen_card(game.id, user1.id, card_four.id)
assert game.can_place_card(user1.id) == False
chosen_cardb = self.create_chosen_card(game.id, bot.id, card_five.id)
chosen_card2 = self.create_chosen_card(game.id, user2.id, card_six.id)
assert game.can_place_card(user1.id) == True
db.session.delete(chosen_card1)
assert game.can_place_card(user1.id) == False
game.is_resolving_turn = True
db.session.add(game)
db.session.commit()
assert game.can_place_card(user1.id) == True
chosen_card1 = self.create_chosen_card(game.id, user1.id, card_one.id)
assert game.can_place_card(user1.id) == False
db.session.delete(chosen_card1)
db.session.delete(chosen_cardb)
chosen_cardb = self.create_chosen_card(game.id, bot.id, card_one.id)
assert game.can_place_card(user1.id) == True
def test_can_place_card_errors(self):
# User not in game
game = self.create_game(status=Game.STATUS_STARTED)
with self.assertRaises(SixQuiPrendException) as e:
game.can_place_card(-1)
assert e.exception.code == 404
# User not game owner
user = self.create_user()
game = self.create_game(status=Game.STATUS_STARTED, users=[user])
with self.assertRaises(SixQuiPrendException) as e:
game.can_place_card(user.id)
assert e.exception.code == 403
def test_can_choose_cards_for_bots(self):
user = self.create_user()
bot1 = self.create_user(urole=User.ROLE_BOT)
bot2 = self.create_user(urole=User.ROLE_BOT)
game = self.create_game(users=[user, bot1, bot2], owner_id=user.id)
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
column_one = self.create_column(game.id, cards=[card_one])
assert game.can_choose_cards_for_bots(user.id) == True
chosen_card1 = self.create_chosen_card(game.id, bot1.id, card_one.id)
assert game.can_choose_cards_for_bots(user.id) == True
chosen_card2 = self.create_chosen_card(game.id, bot2.id, card_two.id)
assert game.can_choose_cards_for_bots(user.id) == False
db.session.delete(chosen_card1)
assert game.can_choose_cards_for_bots(user.id) == True
chosen_card2 = self.create_chosen_card(game.id, user.id, card_two.id)
assert game.can_choose_cards_for_bots(user.id) == True
game.is_resolving_turn = True
db.session.add(game)
db.session.commit()
assert game.can_choose_cards_for_bots(user.id) == False
def test_can_choose_cards_for_bots_errors(self):
# User not in game
game = self.create_game(status=Game.STATUS_STARTED)
with self.assertRaises(SixQuiPrendException) as e:
game.can_choose_cards_for_bots(-1)
assert e.exception.code == 404
# User not game owner
user = self.create_user()
game = self.create_game(status=Game.STATUS_STARTED, users=[user])
with self.assertRaises(SixQuiPrendException) as e:
game.can_choose_cards_for_bots(user.id)
assert e.exception.code == 403
################################################################################
## Actions
################################################################################
def test_create(self):
user = self.create_user()
game = Game.create(user)
assert game.users.all() == [user]
assert game.owner_id == user.id
assert game.status == Game.STATUS_CREATED
def test_delete(self):
user = self.create_user()
game = self.create_game(users=[user])
card1 = self.create_card()
card2 = self.create_card()
card3 = self.create_card()
card4 = self.create_card()
column = self.create_column(game_id=game.id, cards=[card1])
user_hand = self.create_hand(game_id=game.id, user_id=user.id, cards=[card2])
user_heap = self.create_heap(game_id=game.id, user_id=user.id, cards=[card3])
chosen_card = self.create_chosen_card(game_id=game.id, user_id=user.id,
card_id=card4.id)
Game.delete(game.id)
assert Card.find(card1.id) == card1
assert User.find(user.id) == user
assert Column.query.get(column.id) == None
assert Hand.query.get(user_hand.id) == None
assert Heap.query.get(user_heap.id) == None
with self.assertRaises(SixQuiPrendException) as e:
Game.find(game.id)
assert e.exception.code == 404
assert ChosenCard.query.get(chosen_card.id) == None
def test_delete_errors(self):
# Game not found
with self.assertRaises(SixQuiPrendException) as e:
Game.delete(-1)
assert e.exception.code == 404
def test_setup_game(self):
populate_db()
user = self.create_user()
users = [user]
bots = User.query.filter(User.urole == User.ROLE_BOT).all()
for bot in bots:
users.append(bot)
game = self.create_game(Game.STATUS_CREATED, users=users, owner_id=user.id)
game.setup(user.id)
assert game.status == Game.STATUS_STARTED
assert game.columns.count() == app.config['BOARD_SIZE']
for column in game.columns:
assert len(column.cards) == 1
assert len(game.get_user_hand(user.id).cards) == app.config['HAND_SIZE']
assert len(game.get_user_heap(user.id).cards) == 0
def test_setup_game_errors(self):
# User not in game
game = self.create_game(Game.STATUS_CREATED)
with self.assertRaises(SixQuiPrendException) as e:
game.setup(-1)
assert e.exception.code == 404
# User is not owner
user = self.create_user()
game = self.create_game(Game.STATUS_CREATED, users=[user])
with self.assertRaises(SixQuiPrendException) as e:
game.setup(user.id)
assert e.exception.code == 400
# Game is not CREATED
user = self.create_user()
game = self.create_game(Game.STATUS_STARTED, users=[user], owner_id=user.id)
with self.assertRaises(SixQuiPrendException) as e:
game.setup(user.id)
assert e.exception.code == 400
# Not enough users
game = self.create_game(Game.STATUS_CREATED, users=[user], owner_id=user.id)
with self.assertRaises(SixQuiPrendException) as e:
game.setup(user.id)
assert e.exception.code == 400
def test_add_user(self):
user = self.create_user()
game = self.create_game(Game.STATUS_CREATED)
assert game.users.count() == 0
game.add_user(user)
assert game.users.all() == [user]
def test_add_user_errors(self):
# Game not CREATED
user = self.create_user()
game = self.create_game(status=Game.STATUS_STARTED)
with self.assertRaises(SixQuiPrendException) as e:
game.add_user(user)
assert e.exception.code == 400
# Max number of users reached
game = self.create_game(Game.STATUS_CREATED)
for i in range(app.config['MAX_PLAYER_NUMBER']):
game.add_user(self.create_user())
with self.assertRaises(SixQuiPrendException) as e:
game.add_user(user)
assert e.exception.code == 400
# Already in game
game = self.create_game(Game.STATUS_CREATED, users=[user])
with self.assertRaises(SixQuiPrendException) as e:
game.add_user(user)
assert e.exception.code == 400
def test_add_bot(self):
bot = self.create_user(urole=User.ROLE_BOT)
user = self.create_user()
game = self.create_game(status=Game.STATUS_CREATED, users=[user],
owner_id=user.id)
assert game.users.count() == 1
game.add_bot(bot.id, user.id)
assert game.users.count() == 2
assert game.users.all() == [bot, user]
def test_add_bot_errors(self):
# User not in game
game = self.create_game(Game.STATUS_CREATED)
with self.assertRaises(SixQuiPrendException) as e:
game.add_bot(-1, -1)
assert e.exception.code == 404
# User is not owner
user = self.create_user()
game = self.create_game(Game.STATUS_CREATED, users=[user])
with self.assertRaises(SixQuiPrendException) as e:
game.add_bot(-1, user.id)
assert e.exception.code == 400
# Bot not found
user = self.create_user()
game = self.create_game(status=Game.STATUS_CREATED, users=[user], owner_id=user.id)
with self.assertRaises(SixQuiPrendException) as e:
game.add_bot(-1, user.id)
assert e.exception.code == 404
# User not a bot
not_bot = self.create_user(urole=User.ROLE_PLAYER)
user = self.create_user()
game = self.create_game(status=Game.STATUS_CREATED, users=[user], owner_id=user.id)
with self.assertRaises(SixQuiPrendException) as e:
game.add_bot(not_bot.id, user.id)
assert e.exception.code == 400
# Bot already in game
bot = self.create_user(urole=User.ROLE_BOT)
user = self.create_user()
game = self.create_game(status=Game.STATUS_CREATED, users=[user, bot],
owner_id=user.id)
with self.assertRaises(SixQuiPrendException) as e:
game.add_bot(bot.id, user.id)
assert e.exception.code == 400
def test_remove_user(self):
user = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user, user2])
card1 = self.create_card()
card2 = self.create_card()
card3 = self.create_card()
card4 = self.create_card()
column = self.create_column(game_id=game.id, cards=[card1])
user_hand = self.create_hand(game_id=game.id, user_id=user.id, cards=[card2])
user_heap = self.create_heap(game_id=game.id, user_id=user.id, cards=[card3])
chosen_card = self.create_chosen_card(game_id=game.id, user_id=user.id,
card_id=card4.id)
assert game.users.count() == 2
game.remove_user(user)
assert game.users.all() == [user2]
assert Card.find(card1.id) == card1
assert User.find(user.id) == user
assert Column.query.get(column.id) == column
assert Hand.query.get(user_hand.id) == None
assert Heap.query.get(user_heap.id) == None
assert ChosenCard.query.get(chosen_card.id) == None
def test_remove_user_errors(self):
# User not in game
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1])
with self.assertRaises(SixQuiPrendException) as e:
game.remove_user(user2)
assert e.exception.code == 400
def test_remove_owner(self):
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1, user2], owner_id=user1.id)
game.remove_owner(user1.id)
assert game.owner_id == user2.id
def test_remove_owner_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.remove_user(-1)
assert e.exception.code == 404
# User not owner
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1, user2], owner_id=user1.id)
with self.assertRaises(SixQuiPrendException) as e:
game.remove_owner(user2.id)
assert e.exception.code == 400
# No other non bot player
user = self.create_user()
bot = self.create_user(urole=User.ROLE_BOT)
game = self.create_game(users=[user, bot], owner_id=user.id)
with self.assertRaises(SixQuiPrendException) as e:
game.remove_owner(user.id)
assert e.exception.code == 400
def test_place_card(self):
column_card_size = app.config['COLUMN_CARD_SIZE']
app.config['COLUMN_CARD_SIZE'] = 2
user = self.create_user()
bot = self.create_user(User.ROLE_BOT)
game = self.create_game(users=[user, bot], owner_id=user.id)
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
card_four = self.create_card(4, 4)
card_five = self.create_card(5, 5)
column_one = self.create_column(game.id, cards=[card_two])
column_two = self.create_column(game.id, cards=[card_three, card_four])
bot_heap = self.create_heap(game.id, bot.id)
bot_hand = self.create_hand(game.id, bot.id)
user_heap = self.create_heap(game.id, user.id)
user_hand = self.create_hand(game.id, user.id)
bot_chosen_card = self.create_chosen_card(game.id, bot.id,
card_one.id)
user_chosen_card = self.create_chosen_card(game.id, user.id,
card_five.id)
game.is_resolving_turn = True
db.session.add(game)
db.session.commit()
# Bot auto placing
assert len(bot_heap.cards) == 0
[suitable_column, new_bot_heap] = game.place_card(user.id)
assert suitable_column == column_one
assert new_bot_heap.user_id == bot.id
assert len(new_bot_heap.cards) == 1
assert new_bot_heap.cards[0] == card_two
assert game.get_user_chosen_card(bot.id) == None
assert game.can_place_card(user.id) == True
assert game.is_resolving_turn == True
# User completes a column
assert len(user_heap.cards) == 0
[suitable_column, new_user_heap] = game.place_card(user.id)
assert suitable_column == column_two
assert new_user_heap.user_id == user.id
assert len(new_user_heap.cards) == 2
assert new_user_heap.cards == [card_three, card_four]
assert game.get_user_chosen_card(user.id) == None
assert game.can_place_card(user.id) == False
assert game.is_resolving_turn == False
assert game.status == Game.STATUS_FINISHED
app.config['COLUMN_CARD_SIZE'] = column_card_size
def test_place_card_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.place_card(-1)
assert e.exception.code == 404
# User not owner
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1, user2], owner_id=user1.id)
with self.assertRaises(SixQuiPrendException) as e:
game.place_card(user2.id)
assert e.exception.code == 400
# No chosen card to place
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1, user2], owner_id=user1.id)
with self.assertRaises(SixQuiPrendException) as e:
game.place_card(user1.id)
assert e.exception.code == 422
# Not all users have chosen cards
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
card_four = self.create_card(4, 4)
column_one = self.create_column(game.id, cards=[card_two])
column_two = self.create_column(game.id, cards=[card_three])
user2_heap = self.create_heap(game.id, user2.id)
user2_hand = self.create_hand(game.id, user2.id)
user1_heap = self.create_heap(game.id, user1.id)
user1_hand = self.create_hand(game.id, user1.id)
user2_chosen_card = self.create_chosen_card(game.id, user2.id,
card_one.id)
with self.assertRaises(SixQuiPrendException) as e:
game.place_card(user1.id)
assert e.exception.code == 422
# No suitable column
card_one = self.create_card(1, 1)
card_two = self.create_card(2, 2)
card_three = self.create_card(3, 3)
card_four = self.create_card(4, 4)
column_one = self.create_column(game.id, cards=[card_two])
column_two = self.create_column(game.id, cards=[card_three])
user2_heap = self.create_heap(game.id, user2.id)
user2_hand = self.create_hand(game.id, user2.id)
user1_heap = self.create_heap(game.id, user1.id)
user1_hand = self.create_hand(game.id, user1.id)
user1_chosen_card = self.create_chosen_card(game.id, user1.id,
card_four.id)
with self.assertRaises(SixQuiPrendException) as e:
game.place_card(user1.id)
assert e.exception.code == 422
def test_choose_cards_for_bots(self):
card = self.create_card(1, 1)
card2 = self.create_card(2, 2)
card3 = self.create_card(3, 3)
card4 = self.create_card(4, 4)
user = self.create_user()
bot1 = self.create_user(urole=User.ROLE_BOT)
bot2 = self.create_user(urole=User.ROLE_BOT)
game = self.create_game(users=[user, bot1, bot2], owner_id=user.id)
user_hand = self.create_hand(game.id, user.id, [card])
bot1_hand = self.create_hand(game.id, bot1.id, [card2])
bot2_hand = self.create_hand(game.id, bot2.id, [card3])
bot2_chosen_card = self.create_chosen_card(game.id, bot2.id, card3.id)
game.choose_cards_for_bots(user.id)
assert game.get_user_chosen_card(user.id) == None
bot1_chosen_card = game.get_user_chosen_card(bot1.id)
assert bot1_chosen_card != None
assert bot1_chosen_card.card_id == card2.id
assert len(game.get_user_hand(bot1.id).cards) == 0
assert game.get_user_hand(bot2.id).cards == [card3]
def test_choose_cards_for_bots_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.choose_cards_for_bots(-1)
assert e.exception.code == 404
# User not owner
user1 = self.create_user()
user2 = self.create_user()
game = self.create_game(users=[user1, user2], owner_id=user1.id)
with self.assertRaises(SixQuiPrendException) as e:
game.choose_cards_for_bots(user2.id)
assert e.exception.code == 400
# Game is not STARTED
user = self.create_user()
game = self.create_game(status=Game.STATUS_CREATED,
owner_id=user.id, users=[user])
with self.assertRaises(SixQuiPrendException) as e:
game.choose_cards_for_bots(user.id)
assert e.exception.code == 400
# Card is being placed
game = self.create_game(status=Game.STATUS_STARTED,
owner_id=user.id, users=[user])
game.is_resolving_turn = True
db.session.add(game)
db.session.commit()
with self.assertRaises(SixQuiPrendException) as e:
game.choose_cards_for_bots(user.id)
assert e.exception.code == 400
# Bots have already chosen cards
game = self.create_game(status=Game.STATUS_STARTED,
owner_id=user.id, users=[user])
db.session.add(game)
db.session.commit()
with self.assertRaises(SixQuiPrendException) as e:
game.choose_cards_for_bots(user.id)
assert e.exception.code == 400
def test_choose_card_for_user(self):
user = self.create_user()
game = self.create_game(users=[user])
card = self.create_card(1, 1)
hand = self.create_hand(game.id, user.id, cards=[card])
assert len(game.get_user_hand(user.id).cards) == 1
chosen_card = game.get_user_chosen_card(user.id)
assert chosen_card == None
game.choose_card_for_user(user.id, card.id)
assert len(game.get_user_hand(user.id).cards) == 0
db.session.refresh(game)
assert game.is_resolving_turn == True
chosen_card = game.get_user_chosen_card(user.id)
assert chosen_card.card_id == card.id
def test_choose_card_for_user_errors(self):
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.choose_card_for_user(-1, -1)
assert e.exception.code == 404
# Card is being placed
user = self.create_user()
card = self.create_card(1, 1)
game = self.create_game(users=[user])
game.is_resolving_turn = True
db.session.add(game)
db.session.commit()
with self.assertRaises(SixQuiPrendException) as e:
game.choose_card_for_user(user.id, card.id)
assert e.exception.code == 400
# User has already chosen a card
game = self.create_game(users=[user])
chosen_card = self.create_chosen_card(game.id, user.id, card.id)
with self.assertRaises(SixQuiPrendException) as e:
game.choose_card_for_user(user.id, card.id)
assert e.exception.code == 400
# User tries to choose a card he doesn't own
db.session.delete(chosen_card)
hand = self.create_hand(game.id, user.id)
with self.assertRaises(SixQuiPrendException) as e:
game.choose_card_for_user(user.id, card.id)
assert e.exception.code == 400
def test_choose_column_for_user(self):
user = self.create_user()
game = self.create_game(users=[user])
card_one = self.create_card()
card_two = self.create_card()
column = self.create_column(game.id, cards=[card_two])
user_heap = self.create_heap(game.id, user.id)
chosen_card = self.create_chosen_card(game.id, user.id, card_one.id)
assert column.cards == [card_two]
assert len(user_heap.cards) == 0
[chosen_column, new_user_heap] = game.choose_column_for_user(user.id,
column.id)
assert chosen_column.id == column.id
assert chosen_column.cards == [card_one]
assert new_user_heap.cards == [card_two]
def test_choose_column_for_user_errors(self):
# Game not started
game = self.create_game(status=Game.STATUS_CREATED)
with self.assertRaises(SixQuiPrendException) as e:
game.choose_column_for_user(-1, -1)
assert e.exception.code == 400
# User not in game
game = self.create_game()
with self.assertRaises(SixQuiPrendException) as e:
game.choose_column_for_user(-1, -1)
assert e.exception.code == 404
# Column not in game
user = self.create_user()
game = self.create_game(users=[user])
with self.assertRaises(SixQuiPrendException) as e:
game.choose_column_for_user(user.id, -1)
assert e.exception.code == 404
# User has no chosen card for the game
use = self.create_user()
game = self.create_game(users=[user])
column = self.create_column(game.id)
with self.assertRaises(SixQuiPrendException) as e:
game.choose_column_for_user(user.id, column.id)
assert e.exception.code == 404
def test_update_status(self):
# Users still have chosen cards to place
user = self.create_user()
bot = self.create_user(urole=User.ROLE_BOT)
game = self.create_game(status=Game.STATUS_STARTED, users=[user, bot],
owner_id=user.id)
game.is_resolving_turn = True
db.session.add(game)
db.session.commit()
card = self.create_card()
user_hand = self.create_hand(game.id, user.id, [card])
bot_hand = self.create_hand(game.id, bot.id)
chosen_card = self.create_chosen_card(game.id, user.id)
game.update_status()
assert game.status == Game.STATUS_STARTED
assert game.is_resolving_turn == True
# No chosen cards, but users still have cards in hands
db.session.delete(chosen_card)
db.session.commit()
game.update_status()
assert game.status == Game.STATUS_STARTED
assert game.is_resolving_turn == False
# No remaining card in hands nor to place
user_hand.cards = []
db.session.add(user_hand)
db.session.commit()
game.update_status()
assert game.status == Game.STATUS_FINISHED
assert game.is_resolving_turn == False
def test_update_status_errors(self):
# Game not started
game = self.create_game(status=Game.STATUS_CREATED)
with self.assertRaises(SixQuiPrendException) as e:
game.update_status()
assert e.exception.code == 400
if __name__ == '__main__':
unittest.main()
| mit | -8,186,464,188,850,144,000 | 41.542652 | 91 | 0.603638 | false |
nicko96/Chrome-Infra | appengine/cr-buildbucket/test/config_test.py | 1 | 13547 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from google.appengine.ext import ndb
import mock
from components import config as config_component
from components.config import validation_context
from testing_utils import testing
from proto import project_config_pb2
import config
MASTER_TRYSERVER_CHROMIUM_LINUX_CONFIG_TEXT=(
'''name: "master.tryserver.chromium.linux"
acls {
role: READER
group: "all"
}
acls {
role: SCHEDULER
group: "tryjob-access"
}
''')
MASTER_TRYSERVER_CHROMIUM_WIN_CONFIG_TEXT=(
'''name: "master.tryserver.chromium.win"
acls {
role: READER
group: "all"
}
acls {
role: SCHEDULER
group: "tryjob-access"
}
''')
MASTER_TRYSERVER_CHROMIUM_MAC_CONFIG_TEXT=(
'''name: "master.tryserver.chromium.mac"
acls {
role: READER
group: "all"
}
acls {
role: SCHEDULER
group: "tryjob-access"
}
''')
MASTER_TRYSERVER_V8_CONFIG_TEXT=(
'''name: "master.tryserver.v8"
acls {
role: WRITER
group: "v8-team"
}
''')
MASTER_TRYSERVER_TEST_CONFIG_TEXT=(
'''name: "master.tryserver.test"
acls {
role: WRITER
identity: "user:[email protected]"
}
''')
class ConfigTest(testing.AppengineTestCase):
def test_get_bucket_async(self):
config.Bucket(
id='master.tryserver.chromium.linux',
project_id='chromium',
revision='deadbeef',
config_content=MASTER_TRYSERVER_CHROMIUM_LINUX_CONFIG_TEXT).put()
cfg = config.get_bucket_async(
'master.tryserver.chromium.linux').get_result()
self.assertEqual(
cfg,
project_config_pb2.Bucket(
name='master.tryserver.chromium.linux',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.READER, group='all'),
project_config_pb2.Acl(
role=project_config_pb2.Acl.SCHEDULER, group='tryjob-access'),
]),
)
self.assertIsNone(config.get_bucket_async('non.existing').get_result())
def test_get_buckets_async(self):
config.Bucket(
id='master.tryserver.chromium.linux',
project_id='chromium',
revision='deadbeef',
config_content=MASTER_TRYSERVER_CHROMIUM_LINUX_CONFIG_TEXT).put()
config.Bucket(
id='master.tryserver.chromium.win',
project_id='chromium',
revision='deadbeef',
config_content=MASTER_TRYSERVER_CHROMIUM_WIN_CONFIG_TEXT).put()
actual = config.get_buckets_async().get_result()
expected = [
project_config_pb2.Bucket(
name='master.tryserver.chromium.linux',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.READER, group='all'),
project_config_pb2.Acl(
role=project_config_pb2.Acl.SCHEDULER, group='tryjob-access'),
]),
project_config_pb2.Bucket(
name='master.tryserver.chromium.win',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.READER, group='all'),
project_config_pb2.Acl(
role=project_config_pb2.Acl.SCHEDULER, group='tryjob-access'),
]),
]
self.assertEqual(actual, expected)
def test_cron_update_buckets(self):
chromium_buildbucket_cfg = project_config_pb2.BuildbucketCfg(
buckets=[
project_config_pb2.Bucket(
name='master.tryserver.chromium.linux',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.READER, group='all'),
project_config_pb2.Acl(
role=project_config_pb2.Acl.SCHEDULER, group='tryjob-access'),
],
),
project_config_pb2.Bucket(
name='master.tryserver.chromium.win',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.READER, group='all'),
project_config_pb2.Acl(
role=project_config_pb2.Acl.SCHEDULER, group='tryjob-access'),
],
),
])
v8_buildbucket_cfg = project_config_pb2.BuildbucketCfg(
buckets=[
project_config_pb2.Bucket(
name='master.tryserver.v8',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.WRITER, group='v8-team')
],
),
]
)
test_buildbucket_cfg = project_config_pb2.BuildbucketCfg(
buckets=[
project_config_pb2.Bucket(
name='master.tryserver.test',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.WRITER, identity='[email protected]')
],
),
]
)
self.mock(config_component, 'get_project_configs', mock.Mock())
config_component.get_project_configs.return_value = {
'chromium': ('deadbeef', chromium_buildbucket_cfg),
'v8': (None, v8_buildbucket_cfg),
'test': ('babe', test_buildbucket_cfg),
}
config.cron_update_buckets()
actual = config.Bucket.query().fetch()
actual = sorted(actual, key=lambda b: b.key)
expected = [
config.Bucket(
id='master.tryserver.chromium.linux',
project_id='chromium',
revision='deadbeef',
config_content=MASTER_TRYSERVER_CHROMIUM_LINUX_CONFIG_TEXT,
),
config.Bucket(
id='master.tryserver.chromium.win',
project_id='chromium',
revision='deadbeef',
config_content=MASTER_TRYSERVER_CHROMIUM_WIN_CONFIG_TEXT,
),
config.Bucket(
id='master.tryserver.test',
project_id='test',
revision='babe',
config_content=MASTER_TRYSERVER_TEST_CONFIG_TEXT
),
config.Bucket(
id='master.tryserver.v8',
project_id='v8',
revision='sha1:cfc761d7a953a72ddea8f3d4c9a28e69777ca22c',
config_content=MASTER_TRYSERVER_V8_CONFIG_TEXT,
),
]
self.assertEqual(actual, expected)
def test_cron_update_buckets_with_existing(self):
config.Bucket(
id='master.tryserver.chromium.linux',
project_id='chromium',
revision='deadbeef',
config_content=MASTER_TRYSERVER_CHROMIUM_LINUX_CONFIG_TEXT,
).put()
# Will not be updated.
config.Bucket(
id='master.tryserver.v8',
project_id='v8',
revision='deadbeef',
config_content=MASTER_TRYSERVER_V8_CONFIG_TEXT,
).put()
# Will be deleted.
config.Bucket(
id='master.tryserver.chromium.win',
project_id='chromium',
revision='deadbeef',
config_content=MASTER_TRYSERVER_CHROMIUM_WIN_CONFIG_TEXT,
).put()
chromium_buildbucket_cfg = project_config_pb2.BuildbucketCfg(
buckets=[
project_config_pb2.Bucket(
name='master.tryserver.chromium.linux',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.READER, group='all'),
project_config_pb2.Acl(
role=project_config_pb2.Acl.SCHEDULER, group='tryjob-access'),
],
),
# Will be added.
project_config_pb2.Bucket(
name='master.tryserver.chromium.mac',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.READER, group='all'),
project_config_pb2.Acl(
role=project_config_pb2.Acl.SCHEDULER, group='tryjob-access'),
],
),
])
v8_buildbucket_cfg = project_config_pb2.BuildbucketCfg(
buckets=[
# Reservation will fail.
project_config_pb2.Bucket(
name='master.tryserver.chromium.linux',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.WRITER, group='v8-team')
],
),
# Will not be updated.
project_config_pb2.Bucket(
name='master.tryserver.v8',
acls=[
project_config_pb2.Acl(
role=project_config_pb2.Acl.WRITER, group='v8-team')
],
),
],
)
self.mock(config_component, 'get_project_configs', mock.Mock())
config_component.get_project_configs.return_value = {
'chromium': ('new!', chromium_buildbucket_cfg),
'v8': ('deadbeef', v8_buildbucket_cfg),
}
config.cron_update_buckets()
actual = config.Bucket.query().fetch()
actual = sorted(actual, key=lambda b: b.key.id())
expected = [
config.Bucket(
id='master.tryserver.chromium.linux',
project_id='chromium',
revision='new!',
config_content=MASTER_TRYSERVER_CHROMIUM_LINUX_CONFIG_TEXT,
),
config.Bucket(
id='master.tryserver.chromium.mac',
project_id='chromium',
revision='new!',
config_content=MASTER_TRYSERVER_CHROMIUM_MAC_CONFIG_TEXT,
),
config.Bucket(
id='master.tryserver.v8',
project_id='v8',
revision='deadbeef',
config_content=MASTER_TRYSERVER_V8_CONFIG_TEXT,
),
]
self.assertEqual(actual, expected)
def test_cron_update_buckets_change_reservation(self):
config.Bucket(
id='bucket',
project_id='foo',
revision='deadbeef',
config_content='name: "bucket"',
).put()
buildbucket_cfg = project_config_pb2.BuildbucketCfg(
buckets=[project_config_pb2.Bucket(name='bucket')]
)
self.mock(config_component, 'get_project_configs', mock.Mock())
config_component.get_project_configs.return_value = {
'bar': ('deadbeef', buildbucket_cfg),
}
config.cron_update_buckets()
actual = config.Bucket.query().fetch()
expected = [
config.Bucket(
id='bucket',
project_id='bar',
revision='deadbeef',
config_content='name: "bucket"\n',
)
]
self.assertEqual(actual, expected)
def cfg_validation_test(self, cfg, expected_messages):
ctx = config_component.validation.Context()
ctx.config_set = 'projects/chromium'
config.validate_buildbucket_cfg(cfg, ctx)
self.assertEqual(expected_messages, ctx.result().messages)
def test_validate_buildbucket_cfg_success(self):
self.cfg_validation_test(
project_config_pb2.BuildbucketCfg(
buckets=[
project_config_pb2.Bucket(
name='good.name',
acls=[
project_config_pb2.Acl(
group='writers', role=project_config_pb2.Acl.WRITER)
],
),
project_config_pb2.Bucket(
name='good.name2',
acls=[
project_config_pb2.Acl(
identity='[email protected]', role=project_config_pb2.Acl.READER),
project_config_pb2.Acl(
identity='user:[email protected]', role=project_config_pb2.Acl.READER),
],
)
]),
[]
)
def test_validate_buildbucket_cfg_fail(self):
self.cfg_validation_test(
project_config_pb2.BuildbucketCfg(
buckets=[
project_config_pb2.Bucket(
name='a',
acls=[
project_config_pb2.Acl(
group='writers', identity='[email protected]',
role=project_config_pb2.Acl.READER),
project_config_pb2.Acl(role=project_config_pb2.Acl.READER),
]
),
project_config_pb2.Bucket(
name='b',
acls=[
project_config_pb2.Acl(
identity='ldap', role=project_config_pb2.Acl.READER),
project_config_pb2.Acl(
group='[email protected]', role=project_config_pb2.Acl.READER),
]
),
project_config_pb2.Bucket(),
]),
[
errmsg(
'Bucket a: acl #1: either group or identity must be set, '
'not both'),
errmsg('Bucket a: acl #2: group or identity must be set'),
errmsg('Bucket b: acl #1: Identity has invalid format: ldap'),
errmsg('Bucket b: acl #2: invalid group: [email protected]'),
errmsg('Bucket #3: invalid name: Bucket not specified'),
]
)
def test_validate_buildbucket_cfg_unsorted(self):
self.cfg_validation_test(
project_config_pb2.BuildbucketCfg(
buckets=[
project_config_pb2.Bucket(name='b'),
project_config_pb2.Bucket(name='a')
]),
[
validation_context.Message(
severity=logging.WARNING,
text='Buckets are not sorted by name'),
]
)
def test_validate_buildbucket_cfg_duplicate_names(self):
config.Bucket(
id='master.tryserver.v8',
project_id='v8',
revision='deadbeef',
config_content=MASTER_TRYSERVER_V8_CONFIG_TEXT).put()
self.cfg_validation_test(
project_config_pb2.BuildbucketCfg(
buckets=[
project_config_pb2.Bucket(name='a'),
project_config_pb2.Bucket(name='a'),
project_config_pb2.Bucket(name='master.tryserver.chromium.linux'),
project_config_pb2.Bucket(name='master.tryserver.v8'),
]),
[
errmsg('Bucket a: duplicate bucket name'),
errmsg(
'Bucket master.tryserver.v8: '
'this name is already reserved by another project'),
]
)
def errmsg(text):
return validation_context.Message(severity=logging.ERROR, text=text)
| bsd-3-clause | 8,363,048,127,511,030,000 | 29.238839 | 80 | 0.57939 | false |
jbzdak/data-base-checker | grading/tests/test_models.py | 1 | 7167 | # coding=utf-8
from django.contrib.auth.models import User, Group
from django.test.testcases import TestCase
from grading.models import *
from grading.autograding import get_autograders
class StudentTest(TestCase):
def test_user_creation_creates_student(self):
u = User.objects.create(username = "test1", email="[email protected]")
u.groups.add(Group.objects.get(name = "students"))
u.save()
qs = Student.objects.filter(user=u)
self.assertEqual(len(qs), 1)
def test_can_update_user(self):
u = User.objects.create(username = "test1", email="[email protected]")
u.groups.add(Group.objects.get(name = "students"))
u.save()
u.email = "[email protected]"
u.save()
def test_student_not_created_for_inactive_users(self):
u = User.objects.create(username = "test1", email="[email protected]", is_active=False)
u.groups.add(Group.objects.get(name = "students"))
u.save()
qs = Student.objects.filter(user=u)
self.assertEqual(len(qs), 0)
def test_student_not_created_for_staff_users(self):
u = User.objects.create(username = "test1", email="[email protected]", is_staff=True)
u.groups.add(Group.objects.get(name = "students"))
u.save()
qs = Student.objects.filter(user=u)
self.assertEqual(len(qs), 0)
class ActivityTest(TestCase):
def test_sort_key_auto_set(self):
a = GradeableActivity.objects.create(name="foo")
self.assertEqual(a.sort_key, "foo")
class TestFixture(TestCase):
def setUp(self):
self.u = User.objects.create(username = "test1", email="[email protected]")
self.u.groups.add(Group.objects.get(name = "students"))
self.u.save()
self.student = Student.objects.filter(user=self.u).get()
self.other_user = User.objects.create(username = "other", email="[email protected]")
self.other_user.groups.add(Group.objects.get(name = "students"))
self.other_user.save()
self.other_student =Student.objects.filter(user=self.other_user).get()
self.group = Course.objects.create(name = "course")
self.other_group = Course.objects.create(name = "other_group")
self.student.course = self.group
self.student.save()
self.other_student.course = self.other_group
self.other_student.save()
self.activity = GradeableActivity(name = "activity")
self.activity.save()
self.activity.courses.add(self.group)
self.activity.save()
self.otheractivity = GradeableActivity(name = "other")
self.otheractivity.save()
self.otheractivity.courses.add(self.other_group)
self.otheractivity.save()
class TestGrades(TestFixture):
def test_sync_grades_when_activity_is_added_to_group(self):
# After setup it shpould be so:
self.assertEqual(len(self.student.grades.all()), 1)
#Other student shouldn't change
self.assertEqual(len(self.other_student.grades.all()), 1)
activity = GradeableActivity(name = "activity2")
activity.save()
activity.courses.add(self.group)
activity.save()
#Now we should have two grades
self.assertEqual(len(self.student.grades.all()), 2)
#Other student shouldn't change
self.assertEqual(len(self.other_student.grades.all()), 1)
for g in self.student.grades.all():
self.assertEqual(g.grade, 2.0)
def test_sync_grades_when_student_is_added_to_group(self):
u = User.objects.create(username = "test2", email="[email protected]")
u.groups.add(Group.objects.get(name = "students"))
u.save()
student = Student.objects.filter(user=u).get()
# Before addition there should be no grades
self.assertEqual(len(student.grades.all()), 0)
student.course = self.group
student.save()
self.assertEqual(len(student.grades.all()), 1)
class TestGrading(TestFixture):
def setUp(self):
super(TestGrading, self).setUp()
self.grade_part_1 = GradePart.objects.create(
weight = 1,
required = True,
activity = self.activity,
name = "Zadanie 1"
)
self.grade_part_2 = GradePart.objects.create(
weight = 2,
required = False,
activity = self.activity,
name = "Zadanie 2"
)
self.activity.default_grade = 812.0
self.activity.save()
def test_default_grade_retuended_when_all_activities_unfinished(self):
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 812.0)
self.assertIn('Zadanie 1', sg.short_description)
def test_default_grade_retuended_when_required_activities_unfinished(self):
GradePart.objects.grade(self.grade_part_2, self.student, 5)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 812.0)
self.assertIn('Zadanie 1', sg.short_description)
def test_grade_calculated_when_all_required_activitees_finished(self):
GradePart.objects.grade(self.grade_part_1, self.student, 5)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 3)
def test_grade_calculated_when_all_activities_finished(self):
GradePart.objects.grade(self.grade_part_2, self.student, 3)
GradePart.objects.grade(self.grade_part_1, self.student, 3)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 3)
def test_default_grade_returned_when_regired_activity_has_grade_below_passing(self):
GradePart.objects.grade(self.grade_part_1, self.student, 2)
GradePart.objects.grade(self.grade_part_2, self.student, 3)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 812.0)
def test_grade_gets_updated(self):
GradePart.objects.grade(self.grade_part_1, self.student, 5.0)
self.assertEqual(StudentGrade.objects.get(student=self.student, activity=self.activity).grade, 3)
def test_grade_gets_updated_if_we_add_new_grade_part(self):
#Updates the database so grade is calculated
self.test_grade_calculated_when_all_activities_finished()
#Sanity check
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertNotEqual(sg.grade, 812.0)
GradePart.objects.create(
name = "test-xxx",
required = True,
activity = self.activity,
)
sg = StudentGrade()
grade_student(self.activity, self.student, sg)
self.assertEqual(sg.grade, 812.0)
class TestAutogradeableGradePart(TestFixture):
def test_name_is_set(self):
model = AutogradeableGradePart.objects.create(
activity = self.activity,
autograding_controller = get_autograders()['test']
)
self.assertEqual(model.name, model.autograding_controller)
| gpl-3.0 | 8,593,671,345,695,744,000 | 32.180556 | 104 | 0.637784 | false |
UKPLab/sentence-transformers | tests/test_util.py | 1 | 2711 | from sentence_transformers import util, SentenceTransformer
import unittest
import numpy as np
import sklearn
import torch
class UtilTest(unittest.TestCase):
def test_normalize_embeddings(self):
"""Tests the correct computation of util.normalize_embeddings"""
embedding_size = 100
a = torch.tensor(np.random.randn(50, embedding_size))
a_norm = util.normalize_embeddings(a)
for embedding in a_norm:
assert len(embedding) == embedding_size
emb_norm = torch.norm(embedding)
assert abs(emb_norm.item() - 1) < 0.0001
def test_pytorch_cos_sim(self):
"""Tests the correct computation of util.pytorch_cos_scores"""
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
sklearn_pairwise = sklearn.metrics.pairwise.cosine_similarity(a, b)
pytorch_cos_scores = util.pytorch_cos_sim(a, b).numpy()
for i in range(len(sklearn_pairwise)):
for j in range(len(sklearn_pairwise[i])):
assert abs(sklearn_pairwise[i][j] - pytorch_cos_scores[i][j]) < 0.001
def test_semantic_search(self):
"""Tests util.semantic_search function"""
num_queries = 20
num_k = 10
doc_emb = torch.tensor(np.random.randn(1000, 100))
q_emb = torch.tensor(np.random.randn(num_queries, 100))
hits = util.semantic_search(q_emb, doc_emb, top_k=num_k, query_chunk_size=5, corpus_chunk_size=17)
assert len(hits) == num_queries
assert len(hits[0]) == num_k
#Sanity Check of the results
cos_scores = util.pytorch_cos_sim(q_emb, doc_emb)
cos_scores_values, cos_scores_idx = cos_scores.topk(num_k)
cos_scores_values = cos_scores_values.cpu().tolist()
cos_scores_idx = cos_scores_idx.cpu().tolist()
for qid in range(num_queries):
for hit_num in range(num_k):
assert hits[qid][hit_num]['corpus_id'] == cos_scores_idx[qid][hit_num]
assert np.abs(hits[qid][hit_num]['score'] - cos_scores_values[qid][hit_num]) < 0.001
def test_paraphrase_mining(self):
model = SentenceTransformer('paraphrase-distilroberta-base-v1')
sentences = [
"This is a test", "This is a test!",
"The cat sits on mat", "The cat sits on the mat", "On the mat a cat sits",
"A man eats pasta", "A woman eats pasta", "A man eats spaghetti"
]
duplicates = util.paraphrase_mining(model, sentences)
for score, a, b in duplicates:
if score > 0.5:
assert (a,b) in [(0,1), (2,3), (2,4), (3,4), (5,6), (5,7), (6,7)]
if "__main__" == __name__:
unittest.main() | apache-2.0 | -2,246,015,266,875,438,000 | 37.197183 | 106 | 0.600516 | false |
mschmittfull/nbodykit | nbodykit/core/source/Grid.py | 1 | 3878 | from nbodykit.core import Source
from nbodykit.core.source import Painter
from bigfile import BigFileMPI
from pmesh.pm import RealField, ComplexField, ParticleMesh
import numpy
from pmesh import window
class GridSource(Source):
plugin_name = "Source.Grid"
def __init__(self, path, dataset, attrs={}, painter=Painter()):
# cannot do this in the module because the module file is ran before plugin_manager
# is init.
self.cat = BigFileMPI(comm=self.comm, filename=path)[dataset]
self._attrs = {}
self._attrs.update(self.cat.attrs)
self._attrs.update(attrs)
for key in self.attrs.keys():
self.attrs[key] = numpy.asarray(self.attrs[key])
if self.comm.rank == 0:
self.logger.info("attrs = %s" % self.attrs)
self.painter= painter
self.Nmesh = self.attrs['Nmesh'].squeeze()
if 'shotnoise' in self.attrs:
self.shotnoise = self.attrs['shotnoise'].squeeze()
else:
self.shotnoise = 0
if self.cat.dtype.kind == 'c':
self.isfourier = True
else:
self.isfourier = False
@property
def columns(self):
return []
@property
def attrs(self):
return self._attrs
@classmethod
def fill_schema(cls):
s = cls.schema
s.description = "read snapshot files a multitype file"
s.add_argument("path", help="the file path to load the data from")
s.add_argument("dataset", help="dataset")
s.add_argument("attrs", type=dict, help="override attributes from the file")
s.add_argument("painter", type=Painter.from_config, help="painter parameters")
# XXX for painting needs some refactoring
s.add_argument("painter.paintbrush", choices=list(window.methods.keys()), help="paintbrush")
s.add_argument("painter.frho", type=str, help="A python expresion for transforming the real space density field. variables: rho. example: 1 + (rho - 1)**2")
s.add_argument("painter.fk", type=str, help="A python expresion for transforming the fourier space density field. variables: k, kx, ky, kz. example: exp(-(k * 0.5)**2). applied before frho ")
s.add_argument("painter.normalize", type=bool, help="Normalize the field to set mean == 1. Applied before fk.")
s.add_argument("painter.setMean", type=float, help="Set the mean. Applied after normalize.")
s.add_argument("painter.interlaced", type=bool, help="interlaced.")
def read(self, columns):
yield [None for key in columns]
def paint(self, pm):
if self.painter is None:
raise ValueError("No painter is provided")
real = RealField(pm)
if any(pm.Nmesh != self.Nmesh):
pmread = ParticleMesh(BoxSize=pm.BoxSize, Nmesh=(self.Nmesh, self.Nmesh, self.Nmesh),
dtype='f4', comm=self.comm)
else:
pmread = real.pm
ds = self.cat
if self.isfourier:
if self.comm.rank == 0:
self.logger.info("reading complex field")
complex2 = ComplexField(pmread)
assert self.comm.allreduce(complex2.size) == ds.size
start = sum(self.comm.allgather(complex2.size)[:self.comm.rank])
end = start + complex2.size
complex2.unsort(ds[start:end])
complex2.resample(real)
else:
if self.comm.rank == 0:
self.logger.info("reading real field")
real2 = RealField(pmread)
start = sum(self.comm.allgather(real2.size)[:self.comm.rank])
end = start + real2.size
real2.unsort(ds[start:end])
real2.resample(real)
real.shotnoise = self.shotnoise
# apply transformations
self.painter.transform(self, real)
return real
| gpl-3.0 | -1,629,912,195,537,198,300 | 34.907407 | 199 | 0.610366 | false |
penny4860/SVHN-deep-digit-detector | digit_detector/extractor.py | 1 | 3315 | #-*- coding: utf-8 -*-
import cv2
import numpy as np
import progressbar
import digit_detector.region_proposal as rp
class Extractor:
def __init__(self, region_proposer, annotator, overlap_calculator):
"""
overlap_calculator : OverlapCalculator
instance of OverlapCalculator class
"""
self._positive_samples = []
self._negative_samples = []
self._positive_labels = []
self._negative_labels = []
self._region_proposer = region_proposer
self._annotator = annotator
self._overlap_calculator = overlap_calculator
def extract_patch(self, image_files, patch_size, positive_overlap_thd, negative_overlap_thd):
bar = progressbar.ProgressBar(widgets=[' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ',], maxval=len(image_files)).start()
for i, image_file in enumerate(image_files):
image = cv2.imread(image_file)
# 1. detect regions
candidate_regions = self._region_proposer.detect(image)
candidate_patches = candidate_regions.get_patches(dst_size=patch_size)
candidate_boxes = candidate_regions.get_boxes()
# 2. load ground truth
true_boxes, true_labels = self._annotator.get_boxes_and_labels(image_file)
true_patches = rp.Regions(image, true_boxes).get_patches(dst_size=patch_size)
# 3. calc overlap
overlaps = self._overlap_calculator.calc_ious_per_truth(candidate_boxes, true_boxes)
# 4. add patch to the samples
self._select_positive_patch(candidate_patches, true_labels, overlaps, positive_overlap_thd)
self._append_positive_patch(true_patches, true_labels)
self._select_negative_patch(candidate_patches, overlaps, negative_overlap_thd)
bar.update(i)
bar.finish()
return self._merge_sample()
def _append_positive_patch(self, true_patches, true_labels):
self._positive_samples.append(true_patches)
self._positive_labels.append(true_labels)
def _select_positive_patch(self, candidate_patches, true_labels, overlaps, overlap_thd):
for i, label in enumerate(true_labels):
samples = candidate_patches[overlaps[i,:]>overlap_thd]
labels_ = np.zeros((len(samples), )) + label
self._positive_samples.append(samples)
self._positive_labels.append(labels_)
def _select_negative_patch(self, candidate_patches, overlaps, overlap_thd):
overlaps_max = np.max(overlaps, axis=0)
self._negative_samples.append(candidate_patches[overlaps_max<overlap_thd])
def _merge_sample(self):
negative_samples = np.concatenate(self._negative_samples, axis=0)
negative_labels = np.zeros((len(negative_samples), 1))
positive_samples = np.concatenate(self._positive_samples, axis=0)
positive_labels = np.concatenate(self._positive_labels, axis=0).reshape(-1,1)
samples = np.concatenate([negative_samples, positive_samples], axis=0)
labels = np.concatenate([negative_labels, positive_labels], axis=0)
return samples, labels
| mit | -4,793,058,175,960,898,000 | 40.962025 | 164 | 0.626546 | false |
akrherz/iem | scripts/climodat/qc_last_used_ob.py | 1 | 2574 | """Report any climodat sites without recent observations."""
# stdlib
import datetime
# Third Party
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn, logger
LOG = logger()
FLOOR = datetime.date.today() - datetime.timedelta(days=365)
def remove_track(iemid):
"""Cull the defunct tracks."""
pgconn = get_dbconn("mesosite")
cursor = pgconn.cursor()
cursor.execute(
"DELETE from station_attributes where iemid = %s and "
"attr = 'TRACKS_STATION'",
(iemid,),
)
cursor.close()
pgconn.commit()
def check_last(station, row):
"""Do the work."""
trackstation, tracknetwork = row["tracks"].split("|")
df = read_sql(
"SELECT max(day) from summary s JOIN stations t on "
"(s.iemid = t.iemid) WHERE t.id = %s and t.network = %s and "
"s.day > %s and (s.max_tmpf is not null or "
"s.pday is not null)",
get_dbconn("iem"),
index_col=None,
params=(trackstation, tracknetwork, FLOOR),
)
lastdate = df.iloc[0]["max"]
if lastdate is not None:
return
LOG.info(
"%s %s %.2fE %.2fN tracks non-reporting %s[%s], removing track",
station,
row["name"],
row["lon"],
row["lat"],
trackstation,
tracknetwork,
)
remove_track(row["iemid"])
def set_offline(iemid):
"""Set the station to being offline."""
pgconn = get_dbconn("mesosite")
cursor = pgconn.cursor()
cursor.execute(
"UPDATE stations SET online = 'f', archive_end = 'TODAY' WHERE "
"iemid = %s",
(iemid,),
)
cursor.close()
pgconn.commit()
def main():
"""Go Main Go."""
sdf = read_sql(
"""
with locs as (
select s.iemid, id, network, value from stations s LEFT
JOIN station_attributes a on (s.iemid = a.iemid and
a.attr = 'TRACKS_STATION'))
select s.id, s.iemid, s.network, st_x(geom) as lon, st_y(geom) as lat,
s.name, l.value as tracks from stations S LEFT JOIN locs l on
(s.iemid = l.iemid) WHERE s.network ~* 'CLIMATE' and
substr(s.id, 3, 4) != '0000' and
substr(s.id, 3, 1) != 'C' ORDER by s.id ASC
""",
get_dbconn("mesosite"),
index_col="id",
)
for station, row in sdf.iterrows():
if row["tracks"] is None:
LOG.info("%s tracks no station, setting offline.", station)
set_offline(row["iemid"])
continue
check_last(station, row)
if __name__ == "__main__":
main()
| mit | -500,835,713,754,578,200 | 26.978261 | 78 | 0.560606 | false |
mskarbek/pysx | pysx.py | 1 | 1558 | #!/usr/bin/env python
import base64
import hashlib
from datetime import datetime
import hmac
import binascii
import sys
import json
import requests
import pytz
def hextobyte(hex_str):
bytes = []
hex_str = ''.join( hex_str.split(' ') )
for i in range(0, len(hex_str), 2):
bytes.append( chr( int (hex_str[i:i+2], 16 ) ) )
return ''.join( bytes )
request = {
'Date': datetime.now(pytz.timezone('GMT')).strftime('%a, %d %b %Y %H:%M:%S GMT'),
'Path': '?nodeList',
'Type': 'GET',
'Body': ''
}
pysx = {}
pysx['IP'] = sys.argv[1]
pysx['Key'] = base64.b64decode(sys.argv[2])
pysx['I'] = ''.join(['%02X' % ord(x) for x in pysx['Key']]).strip()[0:40].lower()
pysx['K'] = pysx['Key'][20:40]
pysx['P'] = ''.join(['%02X' % ord(x) for x in pysx['Key']]).strip()[80:84].lower()
pysx['request'] = '{}\n{}\n{}\n{}\n'.format(
request['Type'],
request['Path'],
request['Date'],
hashlib.sha1(request['Body']).hexdigest()
)
pysx['H'] = hmac.new(pysx['K'], pysx['request'], hashlib.sha1).hexdigest()
pysx['A'] = base64.b64encode(hextobyte(pysx['I'] + pysx['H'] + pysx['P']))
headers = {
'Content-Type': 'application/json',
'User-Agent': 'pysx 0.0.1',
'Date': request['Date'],
'Authorization': 'SKY {}'.format(pysx['A'])
}
response = requests.get('https://{}/{}'.format(pysx['IP'], request['Path']), verify = False, headers = headers)
print '\n{}\n'.format(response.request.url)
print '{}\n'.format(response.request.headers)
print '{}\n'.format(response.headers)
print '{}\n'.format(response.text)
| apache-2.0 | 244,371,133,018,332,960 | 25.40678 | 111 | 0.59371 | false |
WemGL/alchemist | alchemist/javaparser.py | 1 | 4703 | from alchemist import Parser
import re
class JavaParser(Parser):
def __init__(self, kwargs):
self._file_extension = "Java"
self._current_match = None
self._fields = []
self._classname = ""
Parser.__init__(self, kwargs)
def parse(self):
fh = open(self.file)
comments = []
for line in fh:
if self.matched_class_pattern(line):
self.parse_class_match(comments)
elif self.matched_comment_pattern(line):
self.parse_comment_match(comments)
elif self.matched_field_pattern(line):
self.parse_field_match(comments)
elif self.matched_end_pattern(line):
self.write_constructor()
self.write_accessors_and_mutators()
self.parse_end_match()
self.file.close()
fh.close()
def matched_class_pattern(self, line):
self._current_match = re.compile(r'^C\s(([A-Z](?=[a-z])[a-z]+)+)$').search(line)
return self._current_match is not None
def parse_class_match(self, comments):
self._classname = self._current_match.group(1)
filename = "{}.{}".format(self._classname, self._file_extension.lower())
self.file = open(filename, "w")
self.format_and_write_comments(comments)
print("public class {} {{".format(self._classname), file=self.file)
def format_and_write_comments(self, comments):
if len(comments) > 0:
joined_comments = "\n".join(comments)
print(joined_comments, file=self.file)
comments.clear()
def matched_comment_pattern(self, line):
self._current_match = re.compile(r'^[/]{2}\s.*$').search(line)
return self._current_match is not None
def parse_comment_match(self, comments):
comments.append(self._current_match.group(0))
def matched_field_pattern(self, line):
self._current_match = re.compile(r'^F\s(\b(?:[a-z]+)(?=[A-Z]+)(?:[A-Za-z]+)|[a-z]+\b)\s*((?:[A-Z]?[a-z]+(?:[[]])?))$').search(line)
return self._current_match is not None
def parse_field_match(self, comments):
self.format_and_write_comments(comments)
type = self._current_match.group(2)
identifier = self._current_match.group(1)
field = dict()
field[type] = identifier
self._fields.append(field)
print(" {} {};".format(type, identifier), file=self.file)
def matched_end_pattern(self, line):
self._current_match = re.compile(r'^E$').search(line)
return self._current_match is not None
def write_constructor(self):
match_found = len(self._current_match.group(0)) > 0
if not match_found:
return
self.write_newline()
fields = ", ".join(self.format_type_and_identifier())
print(" public {}({}) {{".format(self._classname, fields), file=self.file)
for identifier in self.get_identifiers():
self.write_initialization_for(identifier)
print(" }", file=self.file)
self.write_newline()
def write_newline(self):
print("", file=self.file, end="\n")
def format_type_and_identifier(self):
return ["{} {}".format(list(field.keys())[0], list(field.values())[0]) for field in self._fields]
def get_identifiers(self):
return [list(field.values())[0] for field in self._fields]
def write_initialization_for(self, identifier):
print(" this.{} = {};".format(identifier, identifier), file=self.file)
def write_accessors_and_mutators(self):
for type_with_identifier in self._fields:
type = list(type_with_identifier.keys())[0]
identifier = list(type_with_identifier.values())[0]
self.write_accessor(type, identifier)
self.write_mutator(type, identifier)
def write_accessor(self, type, identifier):
print(" public {} get{}() {{".format(type, self.to_pascal_case(identifier)), file=self.file)
print(" return {};".format(identifier), file=self.file)
print(" }", file=self.file)
self.write_newline()
def to_pascal_case(self, identifier):
return re.sub(r'^[a-z]', lambda letter: letter.group(0).upper(), identifier)
def write_mutator(self, type, identifier):
print(" public void set{}({} {}) {{".format(self.to_pascal_case(identifier), type, identifier), file=self.file)
print(" this.{} = {};".format(identifier, identifier), file=self.file)
print(" }", file=self.file)
self.write_newline()
def parse_end_match(self):
print("}", file=self.file)
| gpl-3.0 | -5,193,369,130,521,656,000 | 36.927419 | 139 | 0.586647 | false |
vstconsulting/polemarch | polemarch/main/migrations/0004_auto_20170710_0857.py | 1 | 9227 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-09 22:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0003_initial'),
]
operations = [
migrations.CreateModel(
name='History',
fields=[
('id', models.AutoField(max_length=20, primary_key=True, serialize=False)),
('playbook', models.CharField(max_length=256)),
('start_time', models.DateTimeField(default=django.utils.timezone.now)),
('stop_time', models.DateTimeField(blank=True, null=True)),
('raw_args', models.TextField(default='')),
('raw_inventory', models.TextField(default='')),
('status', models.CharField(max_length=50)),
],
options={
'default_related_name': 'history',
},
),
migrations.CreateModel(
name='HistoryLines',
fields=[
('id', models.AutoField(max_length=20, primary_key=True, serialize=False)),
('line', models.TextField(default='')),
('line_number', models.IntegerField(default=0)),
('history', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='raw_history_line', related_query_name='raw_history_line', to='main.History')),
],
options={
'default_related_name': 'raw_history_line',
},
),
migrations.CreateModel(
name='Inventory',
fields=[
('id', models.AutoField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(default=uuid.uuid1, max_length=512)),
],
options={
'default_related_name': 'inventories',
},
),
migrations.CreateModel(
name='PeriodicTask',
fields=[
('id', models.AutoField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(default=uuid.uuid1, max_length=512)),
('playbook', models.CharField(max_length=256)),
('schedule', models.CharField(max_length=4096)),
('type', models.CharField(max_length=10)),
('inventory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodic_tasks', related_query_name='periodic_tasks', to='main.Inventory')),
],
options={
'default_related_name': 'periodic_tasks',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(default=uuid.uuid1, max_length=512)),
('repository', models.CharField(max_length=2048)),
('status', models.CharField(default='NEW', max_length=32)),
],
options={
'default_related_name': 'projects',
},
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(default=uuid.uuid1, max_length=256)),
('playbook', models.CharField(max_length=256)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasks', related_query_name='tasks', to='main.Project')),
],
options={
'default_related_name': 'tasks',
},
),
migrations.CreateModel(
name='TypesPermissions',
fields=[
('id', models.AutoField(max_length=20, primary_key=True, serialize=False)),
],
options={
'default_related_name': 'related_objects',
},
),
migrations.RemoveField(
model_name='host',
name='environment',
),
migrations.AddField(
model_name='group',
name='children',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='group',
name='parents',
field=models.ManyToManyField(blank=True, null=True, related_name='groups', related_query_name='childrens', to='main.Group'),
),
migrations.AlterField(
model_name='variable',
name='value',
field=models.CharField(max_length=2048, null=True),
),
migrations.AlterIndexTogether(
name='group',
index_together=set([('children', 'id'), ('children',)]),
),
migrations.DeleteModel(
name='Environment',
),
migrations.AddField(
model_name='typespermissions',
name='groups',
field=models.ManyToManyField(blank=True, null=True, related_name='related_objects', related_query_name='related_objects', to='main.Group'),
),
migrations.AddField(
model_name='typespermissions',
name='history',
field=models.ManyToManyField(blank=True, null=True, related_name='related_objects', related_query_name='related_objects', to='main.History'),
),
migrations.AddField(
model_name='typespermissions',
name='hosts',
field=models.ManyToManyField(blank=True, null=True, related_name='related_objects', related_query_name='related_objects', to='main.Host'),
),
migrations.AddField(
model_name='typespermissions',
name='inventories',
field=models.ManyToManyField(blank=True, null=True, related_name='related_objects', related_query_name='related_objects', to='main.Inventory'),
),
migrations.AddField(
model_name='typespermissions',
name='periodic_tasks',
field=models.ManyToManyField(blank=True, null=True, related_name='related_objects', related_query_name='related_objects', to='main.PeriodicTask'),
),
migrations.AddField(
model_name='typespermissions',
name='projects',
field=models.ManyToManyField(blank=True, null=True, related_name='related_objects', related_query_name='related_objects', to='main.Project'),
),
migrations.AddField(
model_name='typespermissions',
name='tasks',
field=models.ManyToManyField(blank=True, null=True, related_name='related_objects', related_query_name='related_objects', to='main.Task'),
),
migrations.AddField(
model_name='typespermissions',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_objects', related_query_name='related_objects', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='project',
name='groups',
field=models.ManyToManyField(blank=True, null=True, related_name='projects', to='main.Group'),
),
migrations.AddField(
model_name='project',
name='hosts',
field=models.ManyToManyField(blank=True, null=True, related_name='projects', to='main.Host'),
),
migrations.AddField(
model_name='project',
name='inventories',
field=models.ManyToManyField(blank=True, null=True, related_name='projects', to='main.Inventory'),
),
migrations.AddField(
model_name='periodictask',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodic_tasks', related_query_name='periodic_tasks', to='main.Project'),
),
migrations.AddField(
model_name='inventory',
name='groups',
field=models.ManyToManyField(related_name='inventories', to='main.Group'),
),
migrations.AddField(
model_name='inventory',
name='hosts',
field=models.ManyToManyField(related_name='inventories', to='main.Host'),
),
migrations.AddField(
model_name='history',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', related_query_name='history', to='main.Project'),
),
migrations.AlterIndexTogether(
name='historylines',
index_together=set([('line_number',), ('history',), ('history', 'line_number')]),
),
migrations.AlterIndexTogether(
name='history',
index_together=set([('id', 'project', 'playbook', 'status', 'start_time', 'stop_time')]),
),
]
| agpl-3.0 | -7,657,088,549,418,526,000 | 42.319249 | 183 | 0.563889 | false |
av8ramit/tensorflow | tensorflow/contrib/py2tf/converters/side_effect_guards.py | 1 | 6838 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adds guards against function calls with side effects.
Only standalone calls are guarded.
WARNING: This mechanism is incomplete. Particularly, it only guards the
arguments passed to functions, and does not account for indirectly modified
state.
Example:
y = tf.layers.dense(x) # Creates TF variable 'foo'
loss = loss(y)
opt.minimize(loss) # indirectly affects 'foo'
z = tf.get_variable('foo') # Indirectly affects `loss` and 'foo'
# Here, `loss` can be guarded. But `z` cannot.
# TODO(mdan): We should probably define a safe mode where we guard everything.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import ast_util
from tensorflow.contrib.py2tf.pyct import qual_names
from tensorflow.contrib.py2tf.pyct import templates
from tensorflow.contrib.py2tf.pyct import transformer
from tensorflow.contrib.py2tf.pyct.static_analysis.annos import NodeAnno
class SymbolNamer(object):
"""Describes the interface for SideEffectGuardTransformer's namer."""
def new_symbol(self, name_root, reserved_locals):
"""Generate a new unique function_name.
Args:
name_root: String, used as stem in the new name.
reserved_locals: Set(string), additional local symbols that are reserved.
Returns:
String.
"""
raise NotImplementedError()
class SideEffectGuardTransformer(transformer.Base):
"""Adds control dependencies to functions with side effects."""
def __init__(self, context):
super(SideEffectGuardTransformer, self).__init__(context)
# pylint:disable=invalid-name
def _visit_and_reindent(self, nodes):
new_nodes = []
current_dest = new_nodes
alias_map = {}
reindent_requested = False
for n in nodes:
n = self.visit(n)
# NOTE: the order in which these statements execute is important; in
# particular, watch out for ending up with cycles in the AST.
if alias_map:
n = ast_util.rename_symbols(n, alias_map)
if isinstance(n, (list, tuple)):
current_dest.extend(n)
else:
current_dest.append(n)
if anno.hasanno(n, anno.Basic.INDENT_BLOCK_REMAINDER):
reindent_requested = True
new_dest, new_alias_map = anno.getanno(
n, anno.Basic.INDENT_BLOCK_REMAINDER)
anno.delanno(n, anno.Basic.INDENT_BLOCK_REMAINDER)
new_alias_map.update(alias_map)
alias_map = new_alias_map
current_dest = new_dest
if reindent_requested and not current_dest:
# TODO(mdan): There may still be something that could be done.
raise ValueError('Unable to insert statement into the computation flow: '
'it is not followed by any computation which '
'the statement could gate.')
return new_nodes
def visit_FunctionDef(self, node):
node.body = self._visit_and_reindent(node.body)
return node
def visit_With(self, node):
node.body = self._visit_and_reindent(node.body)
return node
def visit_If(self, node):
node.body = self._visit_and_reindent(node.body)
node.orelse = self._visit_and_reindent(node.orelse)
return node
def visit_While(self, node):
node.body = self._visit_and_reindent(node.body)
node.orelse = self._visit_and_reindent(node.orelse)
return node
def visit_Expr(self, node):
self.generic_visit(node)
if isinstance(node.value, gast.Call):
# Patterns of single function calls, like:
# opt.minimize(loss)
# or:
# tf.py_func(...)
# First, attempt to gate future evaluation of args. If that's not
# possible, gate all remaining statements (and that may fail too, see
# _visit_and_reindent.
args_scope = anno.getanno(node.value, NodeAnno.ARGS_SCOPE)
# NOTE: We can't guard object attributes because they may not be writable.
guarded_args = tuple(
s for s in args_scope.used if not s.is_composite())
# TODO(mdan): Include all arguments which depended on guarded_args too.
# For example, the following will still cause a race:
# tf.assign(a, a + 1)
# b = a + 1
# tf.assign(a, a + 1) # Control deps here should include `b`
# c = b + 1
# Or maybe we should just raise an "unsafe assign" error?
if guarded_args:
# The aliases may need new names to avoid incorrectly making them local.
# TODO(mdan): This is brutal. It will even rename modules - any fix?
need_alias = tuple(
s for s in guarded_args if s not in args_scope.parent.modified)
aliased_new_names = tuple(
qual_names.QN(
self.context.namer.new_symbol(
s.ssf(), args_scope.parent.referenced)) for s in need_alias)
alias_map = dict(zip(need_alias, aliased_new_names))
if len(guarded_args) == 1:
s, = guarded_args
aliased_guarded_args = alias_map.get(s, s)
else:
aliased_guarded_args = gast.Tuple(
[alias_map.get(s, s).ast() for s in guarded_args], None)
template = """
with py2tf_utils.control_dependency_on_returns(tf, call):
aliased_guarded_args = py2tf_utils.alias_tensors(tf, guarded_args)
"""
control_deps_guard = templates.replace(
template,
call=node.value,
aliased_guarded_args=aliased_guarded_args,
guarded_args=guarded_args)[-1]
else:
alias_map = {}
template = """
with py2tf_utils.control_dependency_on_returns(tf, call):
pass
"""
control_deps_guard = templates.replace(template, call=node.value)[-1]
control_deps_guard.body = []
node = control_deps_guard
anno.setanno(node, anno.Basic.INDENT_BLOCK_REMAINDER,
(node.body, alias_map))
return node
# pylint:enable=invalid-name
def transform(node, context):
return SideEffectGuardTransformer(context).visit(node)
| apache-2.0 | 6,557,334,343,230,137,000 | 35.179894 | 80 | 0.652676 | false |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ParamTests/MHDfluid.py | 1 | 13757 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDprec as MHDpreconditioner
import memory_profiler
import gc
import MHDmulti
import MHDmatrixSetup as MHDsetup
#@profile
def foo():
m = 10
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx+ 0
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitSquareMesh(nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "DG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity, Pressure, Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, b, p, r) = TrialFunctions(W)
(v, c, q, s) = TestFunctions(W)
kappa = 1.0
Mu_m =1e1
MU = 1.0/1
IterType = 'CD'
Split = "Yes"
Saddle = "No"
Stokes = "No"
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
Hiptmairtol = 1e-5
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, Hiptmairtol, params)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG")
#plot(p_k, interactive = True)
b_t = TrialFunction(Velocity)
c_t = TestFunction(Velocity)
#print assemble(inner(b,c)*dx).array().shape
#print mat
#ShiftedMass = assemble(inner(mat*b,c)*dx)
#as_vector([inner(b,c)[0]*b_k[0],inner(b,c)[1]*(-b_k[1])])
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
#plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, FSpaces,F_M,F_NS, u_k,b_k,params,IterType,"DG",Saddle,Stokes)
RHSform = forms.PicardRHS(mesh, FSpaces, u_k, p_k, b_k, r_k, params,"DG",Saddle,Stokes)
bcu = DirichletBC(Velocity,Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(Magnetic,Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(Lagrange,Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
parameters['linear_algebra_backend'] = 'uBLAS'
SetupType = 'Matrix'
BC = MHDsetup.BoundaryIndices(mesh)
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
# FSpaces = [Velocity,Magnetic,Pressure,Lagrange]
if IterType == "CD":
MO.PrintStr("Setting up PETSc "+SetupType,2,"=","\n","\n")
Alin = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "Linear",IterType)
Fnlin,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
u = b.duplicate()
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-5
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
AssembleTime = time.time()
if IterType == "CD":
MO.StrTimePrint("MHD CD RHS assemble, time: ", time.time()-AssembleTime)
b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "CD",IterType)
else:
MO.PrintStr("Setting up PETSc "+SetupType,2,"=","\n","\n")
if iter == 1:
Alin = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "Linear",IterType)
Fnlin,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
u = b.duplicate()
else:
Fnline,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
# AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
# A,b = CP.Assemble(AA,bb)
# if iter == 1:
MO.StrTimePrint("MHD total assemble, time: ", time.time()-AssembleTime)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm()
u = b.duplicate()
#A,Q
if IterType == 'Full':
n = FacetNormal(mesh)
mat = as_matrix([[b_k[1]*b_k[1],-b_k[1]*b_k[0]],[-b_k[1]*b_k[0],b_k[0]*b_k[0]]])
F = CP.Scipy2PETSc(Fnlin[0])
a = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1/2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1/2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
ShiftedMass = assemble(a)
bcu.apply(ShiftedMass)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
F = CP.Scipy2PETSc(Fnlin[0])
kspF = NSprecondSetup.LSCKSPnonlinear(F)
stime = time.time()
u, mits,nsits = S.solve(A,b,u,params,W,'Direct',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF)
Soltime = time.time()- stime
Mits += mits
NSits += nsits
SolutionTime += Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()]
#
# ExactSolution = [u0,p0,b0,r0]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(XX,mesh,FSpaces,ExactSolution,order,dim, "DG")
#
# if xx > 1:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
#
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
#
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
#
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
#
#
#
#
# import pandas as pd
#
#
#
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable
#
#
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable
#
import pandas as pd
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(u0,Velocity))
#
# plot(p_k)
#
# plot(interpolate(p0,Pressure))
#
# plot(b_k)
# plot(interpolate(b0,Magnetic))
#
# plot(r_k)
# plot(interpolate(r0,Lagrange))
#
# interactive()
interactive()
foo()
| mit | -2,145,468,271,991,474,000 | 36.080863 | 248 | 0.582758 | false |
makerdao/maker.py | pymaker/vault.py | 1 | 2652 | # This file is part of Maker Keeper Framework.
#
# Copyright (C) 2017-2018 reverendus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from web3 import Web3
from pymaker import Contract, Address, Transact
class DSVault(Contract):
"""A client for the `DSVault` contract.
You can find the source code of the `DSVault` contract here:
<https://github.com/dapphub/ds-vault>.
Attributes:
web3: An instance of `Web` from `web3.py`.
address: Ethereum address of the `DSVault` contract.
"""
abi = Contract._load_abi(__name__, 'abi/DSVault.abi')
bin = Contract._load_bin(__name__, 'abi/DSVault.bin')
def __init__(self, web3: Web3, address: Address):
assert(isinstance(web3, Web3))
assert(isinstance(address, Address))
self.web3 = web3
self.address = address
self._contract = self._get_contract(web3, self.abi, address)
@staticmethod
def deploy(web3: Web3):
"""Deploy a new instance of the `DSVault` contract.
Args:
web3: An instance of `Web` from `web3.py`.
Returns:
A `DSVault` class instance.
"""
return DSVault(web3=web3, address=Contract._deploy(web3, DSVault.abi, DSVault.bin, []))
def authority(self) -> Address:
"""Return the current `authority` of a `DSAuth`-ed contract.
Returns:
The address of the current `authority`.
"""
return Address(self._contract.call().authority())
def set_authority(self, address: Address) -> Transact:
"""Set the `authority` of a `DSAuth`-ed contract.
Args:
address: The address of the new `authority`.
Returns:
A :py:class:`pymaker.Transact` instance, which can be used to trigger the transaction.
"""
assert(isinstance(address, Address))
return Transact(self, self.web3, self.abi, self.address, self._contract, 'setAuthority', [address.address])
def __repr__(self):
return f"DSVault('{self.address}')"
| agpl-3.0 | 8,156,267,412,930,917,000 | 33 | 115 | 0.654223 | false |
redeyser/IceCash2 | clientEgais.py | 1 | 39275 | #!/usr/bin/python
# -*- coding: utf-8
import httplib, urllib,time
import requests
import xml.etree.ElementTree as etree
import re
from icelog import *
from my import curdate2my
from datetime import datetime
import dbIceCash as db
ns={\
"c":"http://fsrar.ru/WEGAIS/Common",\
"wbr":"http://fsrar.ru/WEGAIS/TTNInformF2Reg",\
"pref":"http://fsrar.ru/WEGAIS/ProductRef_v2",\
"oref":"http://fsrar.ru/WEGAIS/ClientRef_v2",\
"rc":"http://fsrar.ru/WEGAIS/ReplyClient_v2",\
"ns":"http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01",\
"wb":"http://fsrar.ru/WEGAIS/TTNSingle_v2",\
"xsi":"http://www.w3.org/2001/XMLSchema-instance",\
"wt":"http://fsrar.ru/WEGAIS/ConfirmTicket",
"qp":"http://fsrar.ru/WEGAIS/QueryParameters",\
'tc':"http://fsrar.ru/WEGAIS/Ticket",\
"rst":"http://fsrar.ru/WEGAIS/ReplyRests_v2",\
'wa':"http://fsrar.ru/WEGAIS/ActTTNSingle_v2",\
'ttn':"http://fsrar.ru/WEGAIS/ReplyNoAnswerTTN",\
'qp':"http://fsrar.ru/WEGAIS/InfoVersionTTN"
}
XML_VERSION=u"""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ns:Documents Version=\"1.0\"
xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"
xmlns:ns=\"http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01\"
xmlns:qp=\"http://fsrar.ru/WEGAIS/InfoVersionTTN\">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:InfoVersionTTN>
<qp:ClientId>%fsrar_id%</qp:ClientId>
<qp:WBTypeUsed>%VERSION%</qp:WBTypeUsed>
</ns:InfoVersionTTN>
</ns:Document>
</ns:Documents>
"""
XML_GET_CLIENTS=u"""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ns:Documents Version=\"1.0\"
xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"
xmlns:ns=\"http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01\"
xmlns:oref=\"http://fsrar.ru/WEGAIS/ClientRef_v2\"
xmlns:qp=\"http://fsrar.ru/WEGAIS/QueryParameters\">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:QueryClients_v2>
<qp:Parameters>
<qp:Parameter>
<qp:Name>ИНН</qp:Name>
<qp:Value>%INN%</qp:Value>
</qp:Parameter>
</qp:Parameters>
</ns:QueryClients_v2>
</ns:Document>
</ns:Documents>
"""
XML_SEND_WAYBILL_HEAD="""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ns:Documents Version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ns= "http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01"
xmlns:c="http://fsrar.ru/WEGAIS/Common"
xmlns:oref="http://fsrar.ru/WEGAIS/ClientRef_v2"
xmlns:pref="http://fsrar.ru/WEGAIS/ProductRef_v2"
xmlns:wb="http://fsrar.ru/WEGAIS/TTNSingle_v2">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:WayBill_v2>
<wb:Identity>%identity%</wb:Identity>
<wb:Header>
<wb:NUMBER>%number%</wb:NUMBER>
<wb:Date>%dt%</wb:Date>
<wb:ShippingDate>%dt%</wb:ShippingDate>
<wb:Type>%type%</wb:Type>
<wb:Shipper>
<oref:UL>
<oref:INN>%inn%</oref:INN><oref:KPP>%kpp%</oref:KPP><oref:ClientRegId>%regid%</oref:ClientRegId>
<oref:ShortName>%name%</oref:ShortName><oref:FullName>%name%</oref:FullName>
<oref:address>
<oref:Country>643</oref:Country><oref:RegionCode>42</oref:RegionCode>
<oref:description></oref:description>
</oref:address>
</oref:UL>
</wb:Shipper>
<wb:Consignee>
<oref:UL>
<oref:INN>%send_inn%</oref:INN><oref:KPP>%send_kpp%</oref:KPP><oref:ClientRegId>%send_regid%</oref:ClientRegId>
<oref:ShortName>%send_name%</oref:ShortName><oref:FullName>%send_name%</oref:FullName>
<oref:address>
<oref:Country>643</oref:Country><oref:RegionCode>42</oref:RegionCode>
<oref:description></oref:description>
</oref:address>
</oref:UL>
</wb:Consignee>
<wb:Transport>
<wb:TRAN_TYPE></wb:TRAN_TYPE>
<wb:TRAN_COMPANY></wb:TRAN_COMPANY>
<wb:TRAN_TRAILER></wb:TRAN_TRAILER>
<wb:TRAN_CAR></wb:TRAN_CAR>
<wb:TRAN_CUSTOMER></wb:TRAN_CUSTOMER>
<wb:TRAN_DRIVER></wb:TRAN_DRIVER>
<wb:TRAN_LOADPOINT></wb:TRAN_LOADPOINT>
<wb:TRAN_UNLOADPOINT></wb:TRAN_UNLOADPOINT>
<wb:TRAN_FORWARDER></wb:TRAN_FORWARDER>
<wb:TRAN_REDIRECT></wb:TRAN_REDIRECT>
</wb:Transport>
<wb:Base>waybill doc</wb:Base>
<wb:Note>NOTE</wb:Note>
</wb:Header>
<wb:Content>
%content%
</wb:Content>
</ns:WayBill_v2>
</ns:Document>
</ns:Documents>
"""
XML_SEND_WAYBILL_CONTENT="""
<wb:Position>
<wb:Quantity>%quantity%</wb:Quantity><wb:Price>%price%</wb:Price><wb:Identity>%identity%</wb:Identity>
<wb:InformF1><pref:RegId>%inform_a%</pref:RegId></wb:InformF1>
<wb:InformF2><pref:InformF2Item><pref:F2RegId>%inform_b%</pref:F2RegId></pref:InformF2Item></wb:InformF2>
<wb:Product>
<pref:Type>%pref_type%</pref:Type><pref:FullName>%shortname%</pref:FullName>
<pref:ShortName>%shortname%</pref:ShortName>
<pref:AlcCode>%alccode%</pref:AlcCode>
<pref:Capacity>%capacity%</pref:Capacity>
<pref:AlcVolume>%alcvolume%</pref:AlcVolume>
<pref:ProductVCode>%productvcode%</pref:ProductVCode>
<pref:UnitType>%packet%</pref:UnitType>
<pref:Producer>
<oref:UL>
<oref:INN>%inn%</oref:INN><oref:KPP>%kpp%</oref:KPP>
<oref:ClientRegId>%regid%</oref:ClientRegId><oref:ShortName>%oref_shortname%</oref:ShortName>
<oref:FullName>%oref_shortname%</oref:FullName>
<oref:address>
<oref:Country>643</oref:Country><oref:RegionCode>42</oref:RegionCode><oref:description></oref:description>
</oref:address>
</oref:UL>
</pref:Producer>
</wb:Product>
</wb:Position>
"""
XML_SEND_ACT="""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<ns:Documents Version=\"1.0\"
xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"
xmlns:ns= \"http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01\"
xmlns:oref=\"http://fsrar.ru/WEGAIS/ClientRef_v2\"
xmlns:pref=\"http://fsrar.ru/WEGAIS/ProductRef_v2\"
xmlns:wa= \"http://fsrar.ru/WEGAIS/ActTTNSingle_v2\">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:WayBillAct_v2>
<wa:Header>
<wa:IsAccept>%accept%</wa:IsAccept>
<wa:ACTNUMBER>%iddoc%</wa:ACTNUMBER>
<wa:ActDate>%date%</wa:ActDate>
<wa:WBRegId>%wb_RegId%</wa:WBRegId>
<wa:Note></wa:Note>
</wa:Header>
<wa:Content>
%content%
</wa:Content>
</ns:WayBillAct_v2>
</ns:Document>
</ns:Documents>
"""
XML_ACT_CONTENT="""
<wa:Position>
\t<wa:Identity>%identity%</wa:Identity>
\t<wa:InformF2RegId>%wb_RegId%</wa:InformF2RegId>
\t<wa:RealQuantity>%real%</wa:RealQuantity>
</wa:Position>
"""
XML_CHECK="""<?xml version="1.0" encoding="UTF-8"?>
<Cheque
inn="%inn%"
datetime="%datetime%"
kpp="%kpp%"
kassa="%kassa%"
address="%address%"
name="%name%"
number="%ncheck%"
shift="1"
>
%bottles%
</Cheque>
"""
XML_BOTTLE="""
\t<Bottle barcode="%barcode%"
\tean="%ean%" price="%price%" %litrag%/>
"""
XML_GET_OSTAT="""<?xml version="1.0" encoding="UTF-8"?>
<ns:Documents Version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ns="http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01"
xmlns:qp="http://fsrar.ru/WEGAIS/QueryParameters">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:QueryRests_v2></ns:QueryRests_v2>
</ns:Document>
</ns:Documents>
"""
XML_GET_REPLY="""<?xml version="1.0" encoding="UTF-8"?>
<ns:Documents Version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ns="http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01"
xmlns:qp="http://fsrar.ru/WEGAIS/QueryParameters"
>
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:QueryResendDoc>
<qp:Parameters>
<qp:Parameter>
<qp:Name>WBREGID</qp:Name>
<qp:Value>%ttn%</qp:Value>
</qp:Parameter>
</qp:Parameters>
</ns:QueryResendDoc>
</ns:Document>
</ns:Documents>
"""
XML_GET_NATTN="""<?xml version="1.0" encoding="UTF-8"?>
<ns:Documents Version="1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ns="http://fsrar.ru/WEGAIS/WB_DOC_SINGLE_01"
xmlns:qp="http://fsrar.ru/WEGAIS/QueryParameters">
<ns:Owner>
<ns:FSRAR_ID>%fsrar_id%</ns:FSRAR_ID>
</ns:Owner>
<ns:Document>
<ns:QueryNATTN>
<qp:Parameters>
<qp:Parameter>
<qp:Name>КОД</qp:Name>
<qp:Value>%fsrar_id%</qp:Value>
</qp:Parameter>
</qp:Parameters>
</ns:QueryNATTN>
</ns:Document>
</ns:Documents>
"""
def findUL(node):
result = node.find("oref:UL",ns)
if result == None:
result = node.find("oref:FO",ns)
return result
class EgaisClient:
def __init__(self,server_ip,server_port,db):
self.server_ip=server_ip
self.server_port=server_port
self.db=db
def assm(self,page):
return "http://%s:%d%s" % (self.server_ip,self.server_port,page)
def _delete(self,page):
print "delete %s" % page
requests.delete(page)
return True
def _get(self,page):
self.data=""
try:
r = requests.get(page)
if r.status_code!=200:
print "error_status"
return False
self.data=r.text.encode("utf8")
except:
return False
return True
def _post(self,page,params):
self.data=""
r=requests.post(page, data=params)
self.data=r.content
if r.status_code!=200:
print "error_status"
return False
return True
def _sendfile(self,url,pname,fname):
files = {pname : open(fname, 'rb')}
r = requests.post(url, files=files)
if r.status_code!=200:
print "error_status"
self.data=r.content
return False
self.data=r.content
return True
def _connect(self):
if self._get(self.assm("/")):
r=re.search("FSRAR-RSA-(\d+)",self.data)
if not r:
return False
self.fsrar_id=r.group(1)
return True
else:
self.fsrar_id=""
return False
def _sendxml(self,fname,page,xml):
f=open(fname,"w")
f.write(xml)
f.close()
return self._sendfile(self.assm(page),'xml_file',fname)
def _send_places(self):
if not self._connect():
return False
xml=XML_GET_CLIENTS.replace("%INN%",self.db.sets['inn'])
xml=xml.replace("%fsrar_id%",self.fsrar_id).encode("utf8")
r=self._sendxml("client.xml","/opt/in/QueryClients_v2",xml)
return r
def _send_ostat(self):
if not self._connect():
return False
xml=XML_GET_OSTAT.replace("%INN%",self.db.sets['inn'])
xml=xml.replace("%fsrar_id%",self.fsrar_id).encode("utf8")
r=self._sendxml("rest.xml","/opt/in/QueryRests_v2",xml)
return r
def _send_reply(self,ttn):
if not self._connect():
return False
xml=XML_GET_REPLY.replace("%ttn%",ttn)
xml=xml.replace("%fsrar_id%",self.fsrar_id).encode("utf8")
r=self._sendxml("reply.xml","/opt/in/QueryResendDoc",xml)
return r
def _send_nattn(self):
if not self._connect():
return False
#self.db._truncate(db.TB_EGAIS_DOCS_NEED)
xml=XML_GET_NATTN.replace("%fsrar_id%",self.fsrar_id)
#.encode("utf8")
r=self._sendxml("nattn.xml","/opt/in/QueryNATTN",xml)
return r
def _send_version(self,version):
if not self._connect():
return False
if version==1:
ver="WayBill"
else:
ver="WayBill_v2"
xml=XML_VERSION.replace("%VERSION%",ver)
xml=xml.replace("%fsrar_id%",self.fsrar_id).encode("utf8")
r=self._sendxml("version.xml","/opt/in/InfoVersionTTN",xml)
return r
def _get_ticket(self):
self.sign=""
#print self.data
tree=etree.fromstring(self.data)
url = tree.find("url")
sign = tree.find("sign")
if url==None:
return ""
if sign!=None:
self.sign=sign.text
return url.text
def _send_act(self,id):
if not self._connect():
return False
xml=self._make_act(id)
if xml=="":
return False
r=self._sendxml("client.xml","/opt/in/WayBillAct_v2",xml)
reply_id=self._get_ticket()
if reply_id!="":
self.db.egais_docs_hd_upd(id,{'status':3,'reply_id':reply_id})
return r
def _send_return(self,id):
if not self._connect():
return False
xml=self._make_return(id)
if xml=="":
return False
r=self._sendxml("return.xml","/opt/in/WayBill_v2",xml)
#print r
reply_id=self._get_ticket()
if reply_id!="":
self.db.egais_docs_hd_upd(id,{'status':3,'reply_id':reply_id})
return r
def _send_check(self,_type,ncheck,pos):
if not self._connect():
return False
xml=self._make_check(_type,ncheck,pos)
if xml=="":
return False
print "-"*80
print xml
print "-"*80
#return False
r=self._sendxml("cheque.xml","/xml",xml)
self.url=self._get_ticket()
if self.url=="" or self.sign=="":
return False
return r
def _send_move(self,id):
if not self._connect():
return False
xml=self._make_move(id)
if xml=="":
return False
r=self._sendxml("move.xml","/opt/in/WayBill_v2",xml)
reply_id=self._get_ticket()
if reply_id!="":
self.db.egais_docs_hd_upd(id,{'status':3,'reply_id':reply_id})
return r
def _create_return(self,id,idd):
if self.db.egais_get_mydoc(id):
struct={\
"type":1,\
"status":1,\
"ns_FSRAR_ID" :self.db.egais_doc_hd['recv_RegId'],\
"wb_Identity" :"0",\
"ns_typedoc" :"WayBill_v2",\
"wb_Date" :curdate2my(),\
"wb_ShippingDate" :curdate2my(),\
"wb_Type" :"WBReturnFromMe",\
"wb_UnitType" :self.db.egais_doc_hd['wb_UnitType'],\
"send_INN" :self.db.egais_doc_hd['recv_INN'],\
"send_KPP" :self.db.egais_doc_hd['recv_KPP'],\
"send_ShortName" :self.db.egais_doc_hd['recv_ShortName'],\
"send_RegId" :self.db.egais_doc_hd['recv_RegId'],\
"recv_INN" :self.db.egais_doc_hd['send_INN'],\
"recv_KPP" :self.db.egais_doc_hd['send_KPP'],\
"recv_ShortName" :self.db.egais_doc_hd['send_ShortName'],\
"recv_RegId" :self.db.egais_doc_hd['send_RegId'],\
}
id=self.db.egais_docs_hd_add(struct)
if id==0:
return False
self.db.egais_docs_hd_upd(id,{"wb_Identity":str(id),"wb_NUMBER":u"В"+self.db.sets['idplace'].rjust(3,"0")+str(id).rjust(4,"0")} )
for rec in self.db.egais_doc_ct:
if int(rec['id'])==idd:
struct=rec
struct["iddoc"]=id
struct["wb_Identity"]="1"
struct["pref_Type"]=u"АП"
del struct['id']
self.db.egais_docs_ct_add(struct)
return True
else:
return False
def _delete_in(self,id):
for d in self.data_url_in:
if id==d['idd']:
self._delete(d['url'])
def _get_docs_in(self):
self.data_url_in=[]
if self._get(self.assm("/opt/in")):
try:
d=etree.fromstring(self.data)
except:
return False
for t in d:
if t.tag!='url':
continue
if t.attrib.has_key('replyId'):
id=t.attrib['replyId']
else:
id=""
url=t.text
self.data_url_in.append({'idd':id,'url':url})
return True
else:
return False
def _get_docs_out(self):
self.data_url=[]
if self._get(self.assm("/opt/out")):
try:
d=etree.fromstring(self.data)
except:
return False
for t in d:
if t.tag!='url':
continue
if t.attrib.has_key('replyId'):
id=t.attrib['replyId']
else:
id=""
url=t.text
self.data_url.append({'idd':id,'url':url})
return True
else:
return False
def _dodoc(self):
res={}
for d in self.data_url:
id=d['idd']
url=d['url']
if not self._get(url):
continue
addLog('/var/log/egaisLog.xml',self.data)
tree=etree.fromstring(self.data)
doc = tree.find("ns:Document",ns)
if doc==None:
continue
typedoc=doc[0].tag
#print typedoc
if typedoc=="{%s}ConfirmTicket" % ns["ns"]:
if self._addConfirmTicket(url,id,tree):
if res.has_key("ConfirmTicket"):
res['ConfirmTicket']+=1
else:
res['ConfirmTicket']=1
print "ConfirmTicket"
self._delete_in(id)
self._delete(url)
pass
if typedoc=="{%s}Ticket" % ns["ns"]:
if self._addTicket(url,id,tree):
if res.has_key("Ticket"):
res['Ticket']+=1
else:
res['Ticket']=1
print "Ticket"
self._delete_in(id)
pass
self._delete(url)
if typedoc=="{%s}ReplyClient_v2" % ns["ns"]:
if res.has_key("ReplyClient"):
res['ReplyClient']+=1
else:
res['ReplyClient']=1
print "ReplyClient"
self._addplaces(doc[0])
self._delete_in(id)
self._delete(url)
if typedoc=="{%s}ReplyRests_v2" % ns["ns"]:
res['ReplyRests.Products']=self._reload_ostat(doc[0])
self._delete_in(id)
self._delete(url)
if typedoc=="{%s}WayBill_v2" % ns["ns"]:
if self._addWayBill(url,id,tree):
if res.has_key("WayBill"):
res['WayBill']+=1
else:
res['WayBill']=1
self._delete(url)
pass
if typedoc=="{%s}WayBillAct" % ns["ns"] or typedoc=="{%s}WayBillAct_v2" % ns["ns"]:
if self._addWayBillAct(url,id,tree):
if res.has_key("WayBillAct"):
res['WayBillAct']+=1
else:
res['WayBillAct']=1
self._delete(url)
pass
if typedoc=="{%s}TTNInformF2Reg" % ns["ns"]:
if self._addInformBReg(url,id,tree):
if res.has_key("TTNInformBReg"):
res['TTNInformBReg']+=1
else:
res['TTNInformBReg']=1
self._delete(url)
pass
if typedoc=="{%s}ReplyNoAnswerTTN" % ns["ns"]:
res['ReplyNoAnswerTTN']=self._read_nattn(doc[0])
self._delete_in(id)
self._delete(url)
return res
def _recalc(self):
docs=self.db.egais_get_mydocs(0,None,None,None,None)
for d in docs:
iddoc=int(d['id'])
tree=etree.fromstring(d['xml_inform'].encode('utf8'))
if tree=="":
continue
if not self.db.egais_get_mydoc(iddoc):
continue
content=self._readhead_InformBReg(tree)
for pos in content.findall("wbr:Position",ns):
self.struct={}
id=self._readcontent_InformBReg(pos)
self.db.egais_docs_ct_updId(iddoc,id,self.struct)
return True
def _addplaces(self,tree):
clients=tree.find("rc:Clients",ns)
if clients==None:
print "no clients"
return
struct={}
self.db.egais_places_clear()
for t in clients.findall("rc:Client",ns):
t=t.find("oref:OrgInfoV2",ns)
t = findUL(t)
a=t.find("oref:address",ns)
for f in self.db.tb_egais_places.record_add:
r=t.find("oref:"+f,ns)
if r!=None:
struct[f]=r.text
else:
r=a.find("oref:"+f,ns)
if r!=None:
struct[f]=r.text
self.db.egais_places_add(struct)
def _setstruct(self,base,tag,field=None):
t=base.find(tag,ns)
if field==None:
field=tag.replace(":","_")
try:
self.struct[field]=t.text
return True
except:
print "error:%s" % tag
return False
def _readhead_WayBill(self,tree):
owner=tree.find("ns:Owner",ns)
doc=tree.find("ns:Document",ns)
doc=doc[0]
header=doc.find("wb:Header",ns)
node=header.find("wb:Shipper",ns)
shipper=findUL(node)
node=header.find("wb:Consignee",ns)
consignee=findUL(node)
self._setstruct(owner,"ns:FSRAR_ID")
self._setstruct(doc,"wb:Identity")
self._setstruct(header,"wb:NUMBER")
self._setstruct(header,"wb:Date")
self._setstruct(header,"wb:ShippingDate")
self._setstruct(header,"wb:Type")
self._setstruct(header,"wb:UnitType")
self._setstruct(shipper,"oref:INN","send_INN")
self._setstruct(shipper,"oref:KPP","send_KPP")
self._setstruct(shipper,"oref:ShortName","send_ShortName")
self._setstruct(shipper,"oref:ClientRegId","send_RegId")
self._setstruct(consignee,"oref:INN","recv_INN")
self._setstruct(consignee,"oref:KPP","recv_KPP")
self._setstruct(consignee,"oref:ShortName","recv_ShortName")
self._setstruct(consignee,"oref:ClientRegId","recv_RegId")
content=doc.find("wb:Content",ns)
return content
def _readhead_InformBReg(self,tree):
owner=tree.find("ns:Owner",ns)
doc=tree.find("ns:Document",ns)
doc=doc[0]
header=doc.find("wbr:Header",ns)
shipper=header.find("wbr:Shipper",ns)
shipper=findUL(shipper)
consignee=header.find("wbr:Consignee",ns)
consignee=findUL(consignee)
self._setstruct(shipper,"oref:ClientRegId","send_RegId")
self._setstruct(consignee,"oref:ClientRegId","recv_RegId")
self._setstruct(header,"wbr:WBNUMBER")
self._setstruct(header,"wbr:WBRegId","tc_RegId")
self._setstruct(header,"wbr:Identity")
content=doc.find("wbr:Content",ns)
return content
def _readhead_Ticket(self,tree):
doc=tree.find("ns:Document",ns)
doc=doc[0]
self._setstruct(doc,"tc:RegID")
oper=doc.find("tc:OperationResult",ns)
if oper!=None:
self._setstruct(oper,"tc:OperationResult")
self._setstruct(oper,"tc:OperationName")
regid=self.struct['tc_RegID']
del self.struct['tc_RegID']
return regid
def _readhead_ConfirmTicket(self,tree):
doc=tree.find("ns:Document",ns)
doc=doc[0]
header=doc.find("wt:Header",ns)
self._setstruct(header,"wt:WBRegId")
self._setstruct(header,"wt:IsConfirm")
regid=self.struct['wt_WBRegId']
del self.struct['wt_WBRegId']
return regid
def _readhead_WayBillAct(self,tree):
doc=tree.find("ns:Document",ns)
doc=doc[0]
header=doc.find("wa:Header",ns)
self._setstruct(header,"wa:WBRegId")
self._setstruct(header,"wa:IsAccept")
regid=self.struct['wa_WBRegId']
del self.struct['wa_WBRegId']
return regid
def _readcontent_WayBill(self,pos):
informA=pos.find("wb:InformF1",ns)
informB=pos.find("wb:InformF2",ns)
informB=informB.find("pref:InformF2Item",ns)
product=pos.find("wb:Product",ns)
node=product.find("pref:Producer",ns)
producer=findUL(node)
self._setstruct(pos,"wb:Identity")
self._setstruct(pos,"wb:Quantity")
self._setstruct(pos,"wb:Price")
self._setstruct(pos,"wb:Pack_ID")
self._setstruct(pos,"wb:Party")
self._setstruct(informA,"pref:RegId")
self._setstruct(informB,"pref:F2RegId","pref_BRegId")
self._setstruct(product,"pref:Type")
if not self._setstruct(product,"pref:ShortName"):
self._setstruct(product,"pref:FullName","pref_ShortName")
self._setstruct(product,"pref:AlcCode")
self._setstruct(product,"pref:Capacity")
self._setstruct(product,"pref:AlcVolume")
self._setstruct(product,"pref:ProductVCode")
self._setstruct(producer,"oref:ClientRegId")
self._setstruct(producer,"oref:INN")
self._setstruct(producer,"oref:KPP")
self._setstruct(producer,"oref:ShortName")
def _readcontent_InformBReg(self,pos):
self._setstruct(pos,"wbr:Identity")
self._setstruct(pos,"wbr:InformF2RegId","wbr_InformBRegId")
id=self.struct['wbr_Identity']
del self.struct['wbr_Identity']
return id
def _read_nattn(self,doc):
content=doc.find("ttn:ttnlist",ns)
self.db._truncate(db.TB_EGAIS_DOCS_NEED)
findtag=("ttn:WbRegID","ttn:ttnNumber","ttn:ttnDate","ttn:Shipper")
res=0
for t in content.findall("ttn:NoAnswer",ns):
struct={}
for tag in findtag:
val=t.find(tag,ns)
if val!=None:
struct[tag.replace(":","_")] = val.text
res+=1
self.db._insert(db.TB_EGAIS_DOCS_NEED,struct)
return res
def _reload_ostat(self,tree):
replacing = {
'rst_InformARegId':'rst_InformF1RegId',
'rst_InformBRegId':'rst_InformF2RegId',
}
products=tree.find("rst:Products",ns)
if products==None:
print "no products"
return
res=0
self.db.egais_ostat_clear()
for t in products.findall("rst:StockPosition",ns):
n=t.find("rst:Product",ns)
p=n.find("pref:Producer",ns)
# UL FO ...
ul=findUL(p)
a=ul.find("oref:address",ns)
struct={}
for f in self.db.tb_egais_ostat.record_add:
if f in replacing:
rf=replacing[f]
else:
rf=f
xf=rf.replace("_",":")
for x in (t,n,p,a):
r=x.find(xf,ns)
if r!=None:
break
if r!=None:
struct[f]=r.text
res+=1
#print struct
self.db.egais_ostat_add(struct)
return res
def _addTicket(self,url,reply_id,tree):
self.struct={}
id=self._readhead_Ticket(tree)
if not self.db.egais_find_replyId(reply_id):
return False
if self.db.egais_doc[3] == 5:
return True
if self.struct.has_key("tc_OperationResult"):
if self.struct['tc_OperationResult'] == 'Accepted':
self.struct['status'] = 5
else:
self.struct['status'] = 6
else:
self.struct['status'] = 4
self.struct['xml_ticket']= self.data
self.struct['reply_id'] = reply_id
self.struct['ns_typedoc']= "Ticket"
id=self.db.egais_doc[0]
return self.db.egais_docs_hd_upd(id,self.struct)
def _addConfirmTicket(self,url,reply_id,tree):
self.struct={}
regid=self._readhead_ConfirmTicket(tree)
if not self.db.egais_find_ttn(regid):
return False
if self.struct.has_key("wt_IsConfirm"):
if self.struct['wt_IsConfirm'] == 'Accepted':
self.struct['status'] = 5
else:
self.struct['status'] = 6
self.struct['xml_ticket']= self.data
self.struct['ns_typedoc']= "ConfirmTicket"
id=self.db.egais_doc[0]
return self.db.egais_docs_hd_upd(id,self.struct)
def _addWayBillAct(self,url,reply_id,tree):
self.struct={}
regid=self._readhead_WayBillAct(tree)
if not self.db.egais_find_ttn(regid):
return False
if self.struct.has_key("wa_IsAccept"):
if self.struct['wa_IsAccept'] == 'Accepted':
self.struct['status'] = 5
else:
self.struct['status'] = 6
self.struct['xml_ticket']= self.data
self.struct['ns_typedoc']= "WayBillAct_v2"
self.struct['wt_IsConfirm']=self.struct['wa_IsAccept']
del self.struct['wa_IsAccept']
id=self.db.egais_doc[0]
return self.db.egais_docs_hd_upd(id,self.struct)
def _addWayBill(self,url,id,tree):
self.struct={}
self.struct['type'] = 0
self.struct['status'] = 0
self.struct['xml_doc'] = self.data
self.struct['reply_id'] = id
self.struct['url'] = url
self.struct['ns_typedoc']= "WayBill_v2"
content=self._readhead_WayBill(tree)
if self.db.egais_docs_find(0,self.struct["recv_RegId"],self.struct["send_RegId"],self.struct["wb_NUMBER"]):
#Возможно это стоит включить. Если документ приходит с темже номером то он перезаписывается
#!!! Требует проверки!
self.db.egais_docs_hd_del(self.db.egais_doc[0])
if self.db.egais_get_mydoc(self.db.egais_doc[0]):
return False
id=self.db.egais_docs_hd_add(self.struct)
if id==0:
return False
for pos in content.findall("wb:Position",ns):
self.struct={'iddoc':id}
self._readcontent_WayBill(pos)
self.struct['real_Quantity']=self.struct['wb_Quantity']
self.db.egais_docs_ct_add(self.struct)
return True
def _addInformBReg(self,url,id,tree):
self.struct={}
content=self._readhead_InformBReg(tree)
if not self.db.egais_find_replyId(id) or id=="":
print "error:replyid %s" % id
if not self.db.egais_docs_find(None,self.struct["recv_RegId"],self.struct["send_RegId"],self.struct["wbr_WBNUMBER"]):
print "not found doc"
return False
if self.db.egais_doc[3] not in (0,3,5,6) :
print "error:doc status=%d" % self.db.egais_doc[3]
#return False
iddoc=self.db.egais_doc[0]
tc_regId=self.struct['tc_RegId']
self.struct={}
if self.db.egais_doc[3]==0:
self.struct['status']=1
if self.db.egais_doc[3]==3:
self.struct['status']=4
self.struct['xml_inform']=self.data
self.struct['url']=url
#self.struct['reply_id'] = id
self.struct['ns_typedoc']= "InformF2Reg"
self.struct['tc_RegId']=tc_regId
#print self.struct;
self.db.egais_docs_hd_upd(iddoc,self.struct)
for pos in content.findall("wbr:Position",ns):
self.struct={}
id=self._readcontent_InformBReg(pos)
self.db.egais_docs_ct_updId(iddoc,id,self.struct)
return True
def _addReplyNoAnswerTTN(self,url,id,tree):
self.struct={}
content=self._readhead_InformBReg(tree)
def _make_act(self,id):
if not self.db.egais_get_mydoc(id):
return ""
xml=XML_SEND_ACT.replace("%fsrar_id%",self.fsrar_id)
xml=xml.replace("%accept%",self.db.egais_doc_hd['answer'])
xml=xml.replace("%iddoc%",str(self.db.sets['idplace'])+"_"+self.db.egais_doc_hd['id'])
xml=xml.replace("%date%",curdate2my())
xml=xml.replace("%wb_RegId%",self.db.egais_doc_hd['tc_RegId'])
XML=xml
XML_CONTENT=""
use_content=False
for ct in self.db.egais_doc_ct:
if ct['real_Quantity']!=ct['wb_Quantity']:
use_content=True
xml=XML_ACT_CONTENT.replace("%identity%",ct['wb_Identity'])
xml=xml.replace("%real%",ct['real_Quantity'])
xml=xml.replace("%wb_RegId%",str(ct['wbr_InformBRegId']))
XML_CONTENT+=xml
if not use_content:
XML_CONTENT=""
XML=XML.replace("%content%",XML_CONTENT)
return XML.encode("utf8")
def _make_return(self,id):
if not self.db.egais_get_mydoc(id):
return ""
replacing = {
'wbr_InformBRegId':'wbr_InformF2RegId',
}
xml=XML_SEND_WAYBILL_HEAD.replace("%fsrar_id%",self.fsrar_id)
rlist={ "%identity%" :"wb_Identity",\
"%number%" :"wb_NUMBER",\
"%dt%" :"wb_Date",\
"%inn%" :"send_INN",\
"%kpp%" :"send_KPP",\
"%regid%" :"send_RegId",\
"%name%" :"send_ShortName",\
"%send_inn%" :"recv_INN",\
"%send_kpp%" :"recv_KPP",\
"%send_regid%" :"recv_RegId",\
"%send_name%" :"recv_ShortName",\
}
for k,v in rlist.items():
if v.find('ShortName')!=-1:
self.db.egais_doc_hd[v]=self.db.egais_doc_hd[v][:64]
xml=xml.replace(k,self.db.egais_doc_hd[v])
xml=xml.replace( "%type%","WBReturnFromMe")
rlist={ "%identity%" :"wb_Identity",\
"%quantity%" :"real_Quantity",\
"%price%" :"wb_Price",\
"%inform_a%" :"pref_RegId",\
"%inform_b%" :"wbr_InformBRegId",\
"%shortname%" :"pref_ShortName",\
"%alccode%" :"pref_AlcCode",\
"%capacity%" :"pref_Capacity",\
"%alcvolume%" :"pref_AlcVolume",\
"%productvcode%":"pref_ProductVCode",\
"%regid%" :"oref_ClientRegId",\
"%inn%" :"oref_INN",\
"%kpp%" :"oref_KPP",\
"%oref_shortname%" :"oref_ShortName",\
}
XML_CONTENT=""
for ct in self.db.egais_doc_ct:
xml2=XML_SEND_WAYBILL_CONTENT
for k,v in rlist.items():
if ct[v]!=None and ct[v]!='None':
if v=='pref_ShortName':
ct[v]=ct[v][:64]
xml2=xml2.replace(k,ct[v])
else:
xml2=xml2.replace(k,"None")
t=v.replace("_",":")
t1="<%s>" % t
t2="</%s>" % t
xml2=xml2.replace(t1+"None"+t2,"")
xml2=xml2.replace("%pref_type%",u"АП")
xml2=xml2.replace("%packet%",self.db.egais_doc_hd["wb_UnitType"])
XML_CONTENT+="\n"+xml2
XML=xml.replace("%content%",XML_CONTENT)
return XML.encode("utf8")
def _make_check(self,_type,ncheck,pos):
dttm=datetime.now().strftime(format="%d%m%y%H%M")
xml=XML_CHECK.replace("%inn%",self.db.sets['inn'])
xml=xml.replace("%kpp%",self.db.sets['kpp'])
xml=xml.replace("%name%",self.db.sets['orgname'])
xml=xml.replace("%address%",self.db.sets['placename'])
xml=xml.replace("%kassa%",self.db.sets['nkassa'])
xml=xml.replace("%datetime%",dttm)
xml=xml.replace("%ncheck%",str(ncheck))
XML=xml
XML_CONTENT=""
for i in range(len(pos)):
p=pos[i]
if not (p['storno']==0 and p['p_alco']==1):
continue
xml=XML_BOTTLE.replace("%barcode%",p['barcode'])
xml=xml.replace("%ean%",p['p_shk'])
if p['paramf1']>0 and _type==1:
price=-p['paramf1']
else:
price=p['paramf1']
xml=xml.replace("%price%",price.__format__(".2f"))
if p['p_litrag']!=0:
xml=xml.replace("%litrag%","volume=\"%s\"" % p['p_litrag'].__format__(".4f"))
else:
xml=xml.replace("%litrag%","")
XML_CONTENT+=xml
XML=XML.replace("%bottles%",XML_CONTENT)
return XML.encode("utf8")
def _make_move(self,id):
if not self.db.egais_get_mydoc(id):
return ""
xml=XML_SEND_WAYBILL_HEAD.replace("%fsrar_id%",self.fsrar_id)
rlist={ "%identity%" :"wb_Identity",\
"%number%" :"wb_NUMBER",\
"%dt%" :"wb_Date",\
"%packet%" :"wb_UnitType",\
"%inn%" :"send_INN",\
"%kpp%" :"send_KPP",\
"%regid%" :"send_RegId",\
"%name%" :"send_ShortName",\
"%send_inn%" :"recv_INN",\
"%send_kpp%" :"recv_KPP",\
"%send_regid%" :"recv_RegId",\
"%send_name%" :"recv_ShortName",\
}
for k,v in rlist.items():
if v.find('ShortName')!=-1:
self.db.egais_doc_hd[v]=self.db.egais_doc_hd[v][:64]
xml=xml.replace(k,self.db.egais_doc_hd[v])
xml=xml.replace( "%type%","WBReturnFromMe")
rlist={ "%identity%" :"wb_Identity",\
"%quantity%" :"real_Quantity",\
"%price%" :"wb_Price",\
"%inform_a%" :"pref_RegId",\
"%inform_b%" :"wbr_InformF2RegId",\
"%shortname%" :"pref_ShortName",\
"%alccode%" :"pref_AlcCode",\
"%capacity%" :"pref_Capacity",\
"%alcvolume%" :"pref_AlcVolume",\
"%productvcode%":"pref_ProductVCode",\
"%regid%" :"oref_ClientRegId",\
"%inn%" :"oref_INN",\
"%kpp%" :"oref_KPP",\
"%oref_shortname%" :"oref_ShortName",\
}
XML_CONTENT=""
for ct in self.db.egais_doc_ct:
xml2=XML_SEND_WAYBILL_CONTENT
for k,v in rlist.items():
if ct[v]!=None and ct[v]!='None':
if v=='pref_ShortName':
ct[v]=ct[v][:64]
xml2=xml2.replace(k,ct[v])
else:
xml2=xml2.replace(k,"None")
t=v.replace("_",":")
t1="<%s>" % t
t2="</%s>" % t
xml2=xml2.replace(t1+"None"+t2,"")
xml2=xml2.replace("%pref_type%",u"АП")
XML_CONTENT+="\n"+xml2
XML=xml.replace("%content%",XML_CONTENT)
return XML.encode("utf8")
| gpl-3.0 | -6,480,574,202,339,699,000 | 32.91342 | 141 | 0.52724 | false |
MichSchli/Mindblocks | model/module/module_repository.py | 1 | 4319 | import json
import os
from model.component.component_specification import ComponentSpecification
from model.component.subgraph_component import SubgraphComponentModel
from model.module.graph_prototype.graph_prototype_specifications import GraphPrototypeSpecifications
from model.module.module_model import ModuleModel
from model.module.toolbox_item.toolbox_item_model import ToolboxItemModel
from model.module.toolbox_item.toolbox_item_specifications import ToolboxItemSpecifications
from observables.observable_dictionary import ObservableDict
from packages.graph.subgraph_component import SubgraphComponent
class ModuleRepository:
modules = None
component_dir = '/home/michael/Projects/Mindblocks/packages'
prototype_repository = None
graph_prototype_repository = None
def __init__(self, prototype_repository, graph_prototype_repository):
self.prototype_repository = prototype_repository
self.graph_prototype_repository = graph_prototype_repository
self.modules = ObservableDict()
def load_basic_modules(self):
for package_name in self.get_all_package_names():
module = self.load_basic_module_by_package_name(package_name)
self.modules.append(module)
def load_basic_module_by_package_name(self, package_name):
manifest = self.load_package_manifest(package_name)
module = ModuleModel(manifest['name'])
prototypes = self.prototype_repository.load_prototypes(manifest)
module.extend_prototypes(prototypes)
return module
def get_prototype_by_id(self, id):
for module in self.get_basic_modules(None):
for prototype in module.components:
print(prototype.get_unique_identifier())
print(id)
if prototype.get_unique_identifier() == id:
print(prototype)
return prototype
print("NOT FOUND")
for module in self.get_canvas_modules(None):
for prototype in module.components:
if prototype.get_unique_identifier() == id:
return prototype
return None
def get_prototype(self, specifications):
basic_prototype = self.prototype_repository.get(specifications)
if basic_prototype is not None:
return basic_prototype
graph_prototype_specifications = GraphPrototypeSpecifications()
graph_prototype_specifications.graph_identifier = specifications.name
graph_prototype_specifications.canvas_identifier = specifications.canvas
graph_prototype = self.graph_prototype_repository.get(graph_prototype_specifications)[0]
return graph_prototype
def get_basic_modules(self, specifications):
return list(self.modules.elements.values())
def get_canvas_modules(self, specifications):
prototypes = self.graph_prototype_repository.get_all()
modules = {}
for prototype in prototypes:
if prototype.canvas_identifier not in modules:
modules[prototype.canvas_identifier] = ModuleModel(prototype.canvas_identifier)
modules[prototype.canvas_identifier].components.append(prototype)
return list(modules.values())
'''
Logic for loading modules:
'''
def get_all_package_names(self):
all_subitems = os.listdir(self.component_dir)
filtered_subitems = [item for item in all_subitems if self.filter_name(item)]
absolute_subitems = [(d,os.path.join(self.component_dir, d)) for d in filtered_subitems]
subfolders = [d[0] for d in absolute_subitems if os.path.isdir(d[1])]
return subfolders
def filter_name(self, name):
if name.startswith('.') or name.startswith('_'):
return False
return True
def load_manifest(self, path):
manifest_path = os.path.join(path, 'manifest.json')
with open(manifest_path) as data_file:
manifest = json.load(data_file)
manifest['path'] = path
return manifest
def load_package_manifest(self, package_name):
manifest_path = os.path.join(self.component_dir, package_name)
manifest = self.load_manifest(manifest_path)
manifest['package'] = package_name
return manifest | gpl-3.0 | -4,040,655,532,269,729,000 | 36.241379 | 100 | 0.683492 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_pathlib.py | 1 | 97139 | import collections.abc
import io
import os
import sys
import errno
import pathlib
import pickle
import socket
import stat
import tempfile
import unittest
from unittest import mock
from test import support
from test.support import TESTFN, FakePath
try:
import grp, pwd
except ImportError:
grp = pwd = None
class _BaseFlavourTest(object):
def _check_parse_parts(self, arg, expected):
f = self.flavour.parse_parts
sep = self.flavour.sep
altsep = self.flavour.altsep
actual = f([x.replace('/', sep) for x in arg])
self.assertEqual(actual, expected)
if altsep:
actual = f([x.replace('/', altsep) for x in arg])
self.assertEqual(actual, expected)
def test_parse_parts_common(self):
check = self._check_parse_parts
sep = self.flavour.sep
# Unanchored parts.
check([], ('', '', []))
check(['a'], ('', '', ['a']))
check(['a/'], ('', '', ['a']))
check(['a', 'b'], ('', '', ['a', 'b']))
# Expansion.
check(['a/b'], ('', '', ['a', 'b']))
check(['a/b/'], ('', '', ['a', 'b']))
check(['a', 'b/c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Collapsing and stripping excess slashes.
check(['a', 'b//c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
check(['a', 'b/c/', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Eliminating standalone dots.
check(['.'], ('', '', []))
check(['.', '.', 'b'], ('', '', ['b']))
check(['a', '.', 'b'], ('', '', ['a', 'b']))
check(['a', '.', '.'], ('', '', ['a']))
# The first part is anchored.
check(['/a/b'], ('', sep, [sep, 'a', 'b']))
check(['/a', 'b'], ('', sep, [sep, 'a', 'b']))
check(['/a/', 'b'], ('', sep, [sep, 'a', 'b']))
# Ignoring parts before an anchored part.
check(['a', '/b', 'c'], ('', sep, [sep, 'b', 'c']))
check(['a', '/b', '/c'], ('', sep, [sep, 'c']))
class PosixFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._posix_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# Collapsing of excess leading slashes, except for the double-slash
# special case.
check(['//a', 'b'], ('', '//', ['//', 'a', 'b']))
check(['///a', 'b'], ('', '/', ['/', 'a', 'b']))
check(['////a', 'b'], ('', '/', ['/', 'a', 'b']))
# Paths which look like NT paths aren't treated specially.
check(['c:a'], ('', '', ['c:a']))
check(['c:\\a'], ('', '', ['c:\\a']))
check(['\\a'], ('', '', ['\\a']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a/b'), ('', '', 'a/b'))
self.assertEqual(f('a/b/'), ('', '', 'a/b/'))
self.assertEqual(f('/a'), ('', '/', 'a'))
self.assertEqual(f('/a/b'), ('', '/', 'a/b'))
self.assertEqual(f('/a/b/'), ('', '/', 'a/b/'))
# The root is collapsed when there are redundant slashes
# except when there are exactly two leading slashes, which
# is a special case in POSIX.
self.assertEqual(f('//a'), ('', '//', 'a'))
self.assertEqual(f('///a'), ('', '/', 'a'))
self.assertEqual(f('///a/b'), ('', '/', 'a/b'))
# Paths which look like NT paths aren't treated specially.
self.assertEqual(f('c:/a/b'), ('', '', 'c:/a/b'))
self.assertEqual(f('\\/a/b'), ('', '', '\\/a/b'))
self.assertEqual(f('\\a\\b'), ('', '', '\\a\\b'))
class NTFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._windows_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# First part is anchored.
check(['c:'], ('c:', '', ['c:']))
check(['c:/'], ('c:', '\\', ['c:\\']))
check(['/'], ('', '\\', ['\\']))
check(['c:a'], ('c:', '', ['c:', 'a']))
check(['c:/a'], ('c:', '\\', ['c:\\', 'a']))
check(['/a'], ('', '\\', ['\\', 'a']))
# UNC paths.
check(['//a/b'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['//a/b/'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['//a/b/c'], ('\\\\a\\b', '\\', ['\\\\a\\b\\', 'c']))
# Second part is anchored, so that the first part is ignored.
check(['a', 'Z:b', 'c'], ('Z:', '', ['Z:', 'b', 'c']))
check(['a', 'Z:/b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
# UNC paths.
check(['a', '//b/c', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Collapsing and stripping excess slashes.
check(['a', 'Z://b//c/', 'd/'], ('Z:', '\\', ['Z:\\', 'b', 'c', 'd']))
# UNC paths.
check(['a', '//b/c//', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Extended paths.
check(['//?/c:/'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\']))
check(['//?/c:/a'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'a']))
check(['//?/c:/a', '/b'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'b']))
# Extended UNC paths (format is "\\?\UNC\server\share").
check(['//?/UNC/b/c'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\']))
check(['//?/UNC/b/c/d'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\', 'd']))
# Second part has a root but not drive.
check(['a', '/b', 'c'], ('', '\\', ['\\', 'b', 'c']))
check(['Z:/a', '/b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
check(['//?/Z:/a', '/b', 'c'], ('\\\\?\\Z:', '\\', ['\\\\?\\Z:\\', 'b', 'c']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a\\b'), ('', '', 'a\\b'))
self.assertEqual(f('\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('c:a\\b'), ('c:', '', 'a\\b'))
self.assertEqual(f('c:\\a\\b'), ('c:', '\\', 'a\\b'))
# Redundant slashes in the root are collapsed.
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\\\\\a/b'), ('', '\\', 'a/b'))
self.assertEqual(f('c:\\\\a'), ('c:', '\\', 'a'))
self.assertEqual(f('c:\\\\\\a/b'), ('c:', '\\', 'a/b'))
# Valid UNC paths.
self.assertEqual(f('\\\\a\\b'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\c\\d'), ('\\\\a\\b', '\\', 'c\\d'))
# These are non-UNC paths (according to ntpath.py and test_ntpath).
# However, command.com says such paths are invalid, so it's
# difficult to know what the right semantics are.
self.assertEqual(f('\\\\\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
#
# Tests for the pure classes.
#
class _BasePurePathTest(object):
# Keys are canonical paths, values are list of tuples of arguments
# supposed to produce equal paths.
equivalences = {
'a/b': [
('a', 'b'), ('a/', 'b'), ('a', 'b/'), ('a/', 'b/'),
('a/b/',), ('a//b',), ('a//b//',),
# Empty components get removed.
('', 'a', 'b'), ('a', '', 'b'), ('a', 'b', ''),
],
'/b/c/d': [
('a', '/b/c', 'd'), ('a', '///b//c', 'd/'),
('/a', '/b/c', 'd'),
# Empty components get removed.
('/', 'b', '', 'c/d'), ('/', '', 'b/c/d'), ('', '/b/c/d'),
],
}
def setUp(self):
p = self.cls('a')
self.flavour = p._flavour
self.sep = self.flavour.sep
self.altsep = self.flavour.altsep
def test_constructor_common(self):
P = self.cls
p = P('a')
self.assertIsInstance(p, P)
P('a', 'b', 'c')
P('/a', 'b', 'c')
P('a/b/c')
P('/a/b/c')
P(FakePath("a/b/c"))
self.assertEqual(P(P('a')), P('a'))
self.assertEqual(P(P('a'), 'b'), P('a/b'))
self.assertEqual(P(P('a'), P('b')), P('a/b'))
self.assertEqual(P(P('a'), P('b'), P('c')), P(FakePath("a/b/c")))
def _check_str_subclass(self, *args):
# Issue #21127: it should be possible to construct a PurePath object
# from a str subclass instance, and it then gets converted to
# a pure str object.
class StrSubclass(str):
pass
P = self.cls
p = P(*(StrSubclass(x) for x in args))
self.assertEqual(p, P(*args))
for part in p.parts:
self.assertIs(type(part), str)
def test_str_subclass_common(self):
self._check_str_subclass('')
self._check_str_subclass('.')
self._check_str_subclass('a')
self._check_str_subclass('a/b.txt')
self._check_str_subclass('/a/b.txt')
def test_join_common(self):
P = self.cls
p = P('a/b')
pp = p.joinpath('c')
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p.joinpath('c', 'd')
self.assertEqual(pp, P('a/b/c/d'))
pp = p.joinpath(P('c'))
self.assertEqual(pp, P('a/b/c'))
pp = p.joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div_common(self):
# Basically the same as joinpath().
P = self.cls
p = P('a/b')
pp = p / 'c'
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p / 'c/d'
self.assertEqual(pp, P('a/b/c/d'))
pp = p / 'c' / 'd'
self.assertEqual(pp, P('a/b/c/d'))
pp = 'c' / p / 'd'
self.assertEqual(pp, P('c/a/b/d'))
pp = p / P('c')
self.assertEqual(pp, P('a/b/c'))
pp = p/ '/c'
self.assertEqual(pp, P('/c'))
def _check_str(self, expected, args):
p = self.cls(*args)
self.assertEqual(str(p), expected.replace('/', self.sep))
def test_str_common(self):
# Canonicalized paths roundtrip.
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self._check_str(pathstr, (pathstr,))
# Special case for the empty path.
self._check_str('.', ('',))
# Other tests for str() are in test_equivalences().
def test_as_posix_common(self):
P = self.cls
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self.assertEqual(P(pathstr).as_posix(), pathstr)
# Other tests for as_posix() are in test_equivalences().
def test_as_bytes_common(self):
sep = os.fsencode(self.sep)
P = self.cls
self.assertEqual(bytes(P('a/b')), b'a' + sep + b'b')
def test_as_uri_common(self):
P = self.cls
with self.assertRaises(ValueError):
P('a').as_uri()
with self.assertRaises(ValueError):
P().as_uri()
def test_repr_common(self):
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
p = self.cls(pathstr)
clsname = p.__class__.__name__
r = repr(p)
# The repr() is in the form ClassName("forward-slashes path").
self.assertTrue(r.startswith(clsname + '('), r)
self.assertTrue(r.endswith(')'), r)
inner = r[len(clsname) + 1 : -1]
self.assertEqual(eval(inner), p.as_posix())
# The repr() roundtrips.
q = eval(r, pathlib.__dict__)
self.assertIs(q.__class__, p.__class__)
self.assertEqual(q, p)
self.assertEqual(repr(q), r)
def test_eq_common(self):
P = self.cls
self.assertEqual(P('a/b'), P('a/b'))
self.assertEqual(P('a/b'), P('a', 'b'))
self.assertNotEqual(P('a/b'), P('a'))
self.assertNotEqual(P('a/b'), P('/a/b'))
self.assertNotEqual(P('a/b'), P())
self.assertNotEqual(P('/a/b'), P('/'))
self.assertNotEqual(P(), P('/'))
self.assertNotEqual(P(), "")
self.assertNotEqual(P(), {})
self.assertNotEqual(P(), int)
def test_match_common(self):
P = self.cls
self.assertRaises(ValueError, P('a').match, '')
self.assertRaises(ValueError, P('a').match, '.')
# Simple relative pattern.
self.assertTrue(P('b.py').match('b.py'))
self.assertTrue(P('a/b.py').match('b.py'))
self.assertTrue(P('/a/b.py').match('b.py'))
self.assertFalse(P('a.py').match('b.py'))
self.assertFalse(P('b/py').match('b.py'))
self.assertFalse(P('/a.py').match('b.py'))
self.assertFalse(P('b.py/c').match('b.py'))
# Wilcard relative pattern.
self.assertTrue(P('b.py').match('*.py'))
self.assertTrue(P('a/b.py').match('*.py'))
self.assertTrue(P('/a/b.py').match('*.py'))
self.assertFalse(P('b.pyc').match('*.py'))
self.assertFalse(P('b./py').match('*.py'))
self.assertFalse(P('b.py/c').match('*.py'))
# Multi-part relative pattern.
self.assertTrue(P('ab/c.py').match('a*/*.py'))
self.assertTrue(P('/d/ab/c.py').match('a*/*.py'))
self.assertFalse(P('a.py').match('a*/*.py'))
self.assertFalse(P('/dab/c.py').match('a*/*.py'))
self.assertFalse(P('ab/c.py/d').match('a*/*.py'))
# Absolute pattern.
self.assertTrue(P('/b.py').match('/*.py'))
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('a/b.py').match('/*.py'))
self.assertFalse(P('/a/b.py').match('/*.py'))
# Multi-part absolute pattern.
self.assertTrue(P('/a/b.py').match('/a/*.py'))
self.assertFalse(P('/ab.py').match('/a/*.py'))
self.assertFalse(P('/a/b/c.py').match('/a/*.py'))
# Multi-part glob-style pattern.
self.assertFalse(P('/a/b/c.py').match('/**/*.py'))
self.assertTrue(P('/a/b/c.py').match('/a/**/*.py'))
def test_ordering_common(self):
# Ordering is tuple-alike.
def assertLess(a, b):
self.assertLess(a, b)
self.assertGreater(b, a)
P = self.cls
a = P('a')
b = P('a/b')
c = P('abc')
d = P('b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
P = self.cls
a = P('/a')
b = P('/a/b')
c = P('/abc')
d = P('/b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
with self.assertRaises(TypeError):
P() < {}
def test_parts_common(self):
# `parts` returns a tuple.
sep = self.sep
P = self.cls
p = P('a/b')
parts = p.parts
self.assertEqual(parts, ('a', 'b'))
# The object gets reused.
self.assertIs(parts, p.parts)
# When the path is absolute, the anchor is a separate part.
p = P('/a/b')
parts = p.parts
self.assertEqual(parts, (sep, 'a', 'b'))
def test_fspath_common(self):
P = self.cls
p = P('a/b')
self._check_str(p.__fspath__(), ('a/b',))
self._check_str(os.fspath(p), ('a/b',))
def test_equivalences(self):
for k, tuples in self.equivalences.items():
canon = k.replace('/', self.sep)
posix = k.replace(self.sep, '/')
if canon != posix:
tuples = tuples + [
tuple(part.replace('/', self.sep) for part in t)
for t in tuples
]
tuples.append((posix, ))
pcanon = self.cls(canon)
for t in tuples:
p = self.cls(*t)
self.assertEqual(p, pcanon, "failed with args {}".format(t))
self.assertEqual(hash(p), hash(pcanon))
self.assertEqual(str(p), canon)
self.assertEqual(p.as_posix(), posix)
def test_parent_common(self):
# Relative
P = self.cls
p = P('a/b/c')
self.assertEqual(p.parent, P('a/b'))
self.assertEqual(p.parent.parent, P('a'))
self.assertEqual(p.parent.parent.parent, P())
self.assertEqual(p.parent.parent.parent.parent, P())
# Anchored
p = P('/a/b/c')
self.assertEqual(p.parent, P('/a/b'))
self.assertEqual(p.parent.parent, P('/a'))
self.assertEqual(p.parent.parent.parent, P('/'))
self.assertEqual(p.parent.parent.parent.parent, P('/'))
def test_parents_common(self):
# Relative
P = self.cls
p = P('a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('a/b'))
self.assertEqual(par[1], P('a'))
self.assertEqual(par[2], P('.'))
self.assertEqual(list(par), [P('a/b'), P('a'), P('.')])
with self.assertRaises(IndexError):
par[-1]
with self.assertRaises(IndexError):
par[3]
with self.assertRaises(TypeError):
par[0] = p
# Anchored
p = P('/a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('/a/b'))
self.assertEqual(par[1], P('/a'))
self.assertEqual(par[2], P('/'))
self.assertEqual(list(par), [P('/a/b'), P('/a'), P('/')])
with self.assertRaises(IndexError):
par[3]
def test_drive_common(self):
P = self.cls
self.assertEqual(P('a/b').drive, '')
self.assertEqual(P('/a/b').drive, '')
self.assertEqual(P('').drive, '')
def test_root_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').root, '')
self.assertEqual(P('a/b').root, '')
self.assertEqual(P('/').root, sep)
self.assertEqual(P('/a/b').root, sep)
def test_anchor_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').anchor, '')
self.assertEqual(P('a/b').anchor, '')
self.assertEqual(P('/').anchor, sep)
self.assertEqual(P('/a/b').anchor, sep)
def test_name_common(self):
P = self.cls
self.assertEqual(P('').name, '')
self.assertEqual(P('.').name, '')
self.assertEqual(P('/').name, '')
self.assertEqual(P('a/b').name, 'b')
self.assertEqual(P('/a/b').name, 'b')
self.assertEqual(P('/a/b/.').name, 'b')
self.assertEqual(P('a/b.py').name, 'b.py')
self.assertEqual(P('/a/b.py').name, 'b.py')
def test_suffix_common(self):
P = self.cls
self.assertEqual(P('').suffix, '')
self.assertEqual(P('.').suffix, '')
self.assertEqual(P('..').suffix, '')
self.assertEqual(P('/').suffix, '')
self.assertEqual(P('a/b').suffix, '')
self.assertEqual(P('/a/b').suffix, '')
self.assertEqual(P('/a/b/.').suffix, '')
self.assertEqual(P('a/b.py').suffix, '.py')
self.assertEqual(P('/a/b.py').suffix, '.py')
self.assertEqual(P('a/.hgrc').suffix, '')
self.assertEqual(P('/a/.hgrc').suffix, '')
self.assertEqual(P('a/.hg.rc').suffix, '.rc')
self.assertEqual(P('/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('/a/Some name. Ending with a dot.').suffix, '')
def test_suffixes_common(self):
P = self.cls
self.assertEqual(P('').suffixes, [])
self.assertEqual(P('.').suffixes, [])
self.assertEqual(P('/').suffixes, [])
self.assertEqual(P('a/b').suffixes, [])
self.assertEqual(P('/a/b').suffixes, [])
self.assertEqual(P('/a/b/.').suffixes, [])
self.assertEqual(P('a/b.py').suffixes, ['.py'])
self.assertEqual(P('/a/b.py').suffixes, ['.py'])
self.assertEqual(P('a/.hgrc').suffixes, [])
self.assertEqual(P('/a/.hgrc').suffixes, [])
self.assertEqual(P('a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('/a/Some name. Ending with a dot.').suffixes, [])
def test_stem_common(self):
P = self.cls
self.assertEqual(P('').stem, '')
self.assertEqual(P('.').stem, '')
self.assertEqual(P('..').stem, '..')
self.assertEqual(P('/').stem, '')
self.assertEqual(P('a/b').stem, 'b')
self.assertEqual(P('a/b.py').stem, 'b')
self.assertEqual(P('a/.hgrc').stem, '.hgrc')
self.assertEqual(P('a/.hg.rc').stem, '.hg')
self.assertEqual(P('a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name_common(self):
P = self.cls
self.assertEqual(P('a/b').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/b.py').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b.py').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/Dot ending.').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/Dot ending.').with_name('d.xml'), P('/a/d.xml'))
self.assertRaises(ValueError, P('').with_name, 'd.xml')
self.assertRaises(ValueError, P('.').with_name, 'd.xml')
self.assertRaises(ValueError, P('/').with_name, 'd.xml')
self.assertRaises(ValueError, P('a/b').with_name, '')
self.assertRaises(ValueError, P('a/b').with_name, '/c')
self.assertRaises(ValueError, P('a/b').with_name, 'c/')
self.assertRaises(ValueError, P('a/b').with_name, 'c/d')
def test_with_suffix_common(self):
P = self.cls
self.assertEqual(P('a/b').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b').with_suffix('.gz'), P('/a/b.gz'))
self.assertEqual(P('a/b.py').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b.py').with_suffix('.gz'), P('/a/b.gz'))
# Stripping suffix.
self.assertEqual(P('a/b.py').with_suffix(''), P('a/b'))
self.assertEqual(P('/a/b').with_suffix(''), P('/a/b'))
# Path doesn't have a "filename" component.
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
# Invalid suffix.
self.assertRaises(ValueError, P('a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('a/b').with_suffix, '/')
self.assertRaises(ValueError, P('a/b').with_suffix, '.')
self.assertRaises(ValueError, P('a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.c/.d')
self.assertRaises(ValueError, P('a/b').with_suffix, './.d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.d/.')
self.assertRaises(ValueError, P('a/b').with_suffix,
(self.flavour.sep, 'd'))
def test_relative_to_common(self):
P = self.cls
p = P('a/b')
self.assertRaises(TypeError, p.relative_to)
self.assertRaises(TypeError, p.relative_to, b'a')
self.assertEqual(p.relative_to(P()), P('a/b'))
self.assertEqual(p.relative_to(''), P('a/b'))
self.assertEqual(p.relative_to(P('a')), P('b'))
self.assertEqual(p.relative_to('a'), P('b'))
self.assertEqual(p.relative_to('a/'), P('b'))
self.assertEqual(p.relative_to(P('a/b')), P())
self.assertEqual(p.relative_to('a/b'), P())
# With several args.
self.assertEqual(p.relative_to('a', 'b'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P('c'))
self.assertRaises(ValueError, p.relative_to, P('a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('a/c'))
self.assertRaises(ValueError, p.relative_to, P('/a'))
p = P('/a/b')
self.assertEqual(p.relative_to(P('/')), P('a/b'))
self.assertEqual(p.relative_to('/'), P('a/b'))
self.assertEqual(p.relative_to(P('/a')), P('b'))
self.assertEqual(p.relative_to('/a'), P('b'))
self.assertEqual(p.relative_to('/a/'), P('b'))
self.assertEqual(p.relative_to(P('/a/b')), P())
self.assertEqual(p.relative_to('/a/b'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P('/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/c'))
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('a'))
def test_pickling_common(self):
P = self.cls
p = P('/a/b')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertIs(pp.__class__, p.__class__)
self.assertEqual(pp, p)
self.assertEqual(hash(pp), hash(p))
self.assertEqual(str(pp), str(p))
class PurePosixPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePosixPath
def test_root(self):
P = self.cls
self.assertEqual(P('/a/b').root, '/')
self.assertEqual(P('///a/b').root, '/')
# POSIX special case for two leading slashes.
self.assertEqual(P('//a/b').root, '//')
def test_eq(self):
P = self.cls
self.assertNotEqual(P('a/b'), P('A/b'))
self.assertEqual(P('/a'), P('///a'))
self.assertNotEqual(P('/a'), P('//a'))
def test_as_uri(self):
P = self.cls
self.assertEqual(P('/').as_uri(), 'file:///')
self.assertEqual(P('/a/b.c').as_uri(), 'file:///a/b.c')
self.assertEqual(P('/a/b%#c').as_uri(), 'file:///a/b%25%23c')
def test_as_uri_non_ascii(self):
from urllib.parse import quote_from_bytes
P = self.cls
try:
os.fsencode('\xe9')
except UnicodeEncodeError:
self.skipTest("\\xe9 cannot be encoded to the filesystem encoding")
self.assertEqual(P('/a/b\xe9').as_uri(),
'file:///a/b' + quote_from_bytes(os.fsencode('\xe9')))
def test_match(self):
P = self.cls
self.assertFalse(P('A.py').match('a.PY'))
def test_is_absolute(self):
P = self.cls
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertTrue(P('/').is_absolute())
self.assertTrue(P('/a').is_absolute())
self.assertTrue(P('/a/b/').is_absolute())
self.assertTrue(P('//a').is_absolute())
self.assertTrue(P('//a/b').is_absolute())
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(False, P('/dev/con/PRN/NUL').is_reserved())
def test_join(self):
P = self.cls
p = P('//a')
pp = p.joinpath('b')
self.assertEqual(pp, P('//a/b'))
pp = P('/a').joinpath('//c')
self.assertEqual(pp, P('//c'))
pp = P('//a').joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div(self):
# Basically the same as joinpath().
P = self.cls
p = P('//a')
pp = p / 'b'
self.assertEqual(pp, P('//a/b'))
pp = P('/a') / '//c'
self.assertEqual(pp, P('//c'))
pp = P('//a') / '/c'
self.assertEqual(pp, P('/c'))
class PureWindowsPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PureWindowsPath
equivalences = _BasePurePathTest.equivalences.copy()
equivalences.update({
'c:a': [ ('c:', 'a'), ('c:', 'a/'), ('/', 'c:', 'a') ],
'c:/a': [
('c:/', 'a'), ('c:', '/', 'a'), ('c:', '/a'),
('/z', 'c:/', 'a'), ('//x/y', 'c:/', 'a'),
],
'//a/b/': [ ('//a/b',) ],
'//a/b/c': [
('//a/b', 'c'), ('//a/b/', 'c'),
],
})
def test_str(self):
p = self.cls('a/b/c')
self.assertEqual(str(p), 'a\\b\\c')
p = self.cls('c:/a/b/c')
self.assertEqual(str(p), 'c:\\a\\b\\c')
p = self.cls('//a/b')
self.assertEqual(str(p), '\\\\a\\b\\')
p = self.cls('//a/b/c')
self.assertEqual(str(p), '\\\\a\\b\\c')
p = self.cls('//a/b/c/d')
self.assertEqual(str(p), '\\\\a\\b\\c\\d')
def test_str_subclass(self):
self._check_str_subclass('c:')
self._check_str_subclass('c:a')
self._check_str_subclass('c:a\\b.txt')
self._check_str_subclass('c:\\')
self._check_str_subclass('c:\\a')
self._check_str_subclass('c:\\a\\b.txt')
self._check_str_subclass('\\\\some\\share')
self._check_str_subclass('\\\\some\\share\\a')
self._check_str_subclass('\\\\some\\share\\a\\b.txt')
def test_eq(self):
P = self.cls
self.assertEqual(P('c:a/b'), P('c:a/b'))
self.assertEqual(P('c:a/b'), P('c:', 'a', 'b'))
self.assertNotEqual(P('c:a/b'), P('d:a/b'))
self.assertNotEqual(P('c:a/b'), P('c:/a/b'))
self.assertNotEqual(P('/a/b'), P('c:/a/b'))
# Case-insensitivity.
self.assertEqual(P('a/B'), P('A/b'))
self.assertEqual(P('C:a/B'), P('c:A/b'))
self.assertEqual(P('//Some/SHARE/a/B'), P('//somE/share/A/b'))
def test_as_uri(self):
P = self.cls
with self.assertRaises(ValueError):
P('/a/b').as_uri()
with self.assertRaises(ValueError):
P('c:a/b').as_uri()
self.assertEqual(P('c:/').as_uri(), 'file:///c:/')
self.assertEqual(P('c:/a/b.c').as_uri(), 'file:///c:/a/b.c')
self.assertEqual(P('c:/a/b%#c').as_uri(), 'file:///c:/a/b%25%23c')
self.assertEqual(P('c:/a/b\xe9').as_uri(), 'file:///c:/a/b%C3%A9')
self.assertEqual(P('//some/share/').as_uri(), 'file://some/share/')
self.assertEqual(P('//some/share/a/b.c').as_uri(),
'file://some/share/a/b.c')
self.assertEqual(P('//some/share/a/b%#c\xe9').as_uri(),
'file://some/share/a/b%25%23c%C3%A9')
def test_match_common(self):
P = self.cls
# Absolute patterns.
self.assertTrue(P('c:/b.py').match('/*.py'))
self.assertTrue(P('c:/b.py').match('c:*.py'))
self.assertTrue(P('c:/b.py').match('c:/*.py'))
self.assertFalse(P('d:/b.py').match('c:/*.py')) # wrong drive
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('b.py').match('c:*.py'))
self.assertFalse(P('b.py').match('c:/*.py'))
self.assertFalse(P('c:b.py').match('/*.py'))
self.assertFalse(P('c:b.py').match('c:/*.py'))
self.assertFalse(P('/b.py').match('c:*.py'))
self.assertFalse(P('/b.py').match('c:/*.py'))
# UNC patterns.
self.assertTrue(P('//some/share/a.py').match('/*.py'))
self.assertTrue(P('//some/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//other/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//some/share/a/b.py').match('//some/share/*.py'))
# Case-insensitivity.
self.assertTrue(P('B.py').match('b.PY'))
self.assertTrue(P('c:/a/B.Py').match('C:/A/*.pY'))
self.assertTrue(P('//Some/Share/B.Py').match('//somE/sharE/*.pY'))
def test_ordering_common(self):
# Case-insensitivity.
def assertOrderedEqual(a, b):
self.assertLessEqual(a, b)
self.assertGreaterEqual(b, a)
P = self.cls
p = P('c:A/b')
q = P('C:a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
p = P('//some/Share/A/b')
q = P('//Some/SHARE/a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
def test_parts(self):
P = self.cls
p = P('c:a/b')
parts = p.parts
self.assertEqual(parts, ('c:', 'a', 'b'))
p = P('c:/a/b')
parts = p.parts
self.assertEqual(parts, ('c:\\', 'a', 'b'))
p = P('//a/b/c/d')
parts = p.parts
self.assertEqual(parts, ('\\\\a\\b\\', 'c', 'd'))
def test_parent(self):
# Anchored
P = self.cls
p = P('z:a/b/c')
self.assertEqual(p.parent, P('z:a/b'))
self.assertEqual(p.parent.parent, P('z:a'))
self.assertEqual(p.parent.parent.parent, P('z:'))
self.assertEqual(p.parent.parent.parent.parent, P('z:'))
p = P('z:/a/b/c')
self.assertEqual(p.parent, P('z:/a/b'))
self.assertEqual(p.parent.parent, P('z:/a'))
self.assertEqual(p.parent.parent.parent, P('z:/'))
self.assertEqual(p.parent.parent.parent.parent, P('z:/'))
p = P('//a/b/c/d')
self.assertEqual(p.parent, P('//a/b/c'))
self.assertEqual(p.parent.parent, P('//a/b'))
self.assertEqual(p.parent.parent.parent, P('//a/b'))
def test_parents(self):
# Anchored
P = self.cls
p = P('z:a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:a'))
self.assertEqual(par[1], P('z:'))
self.assertEqual(list(par), [P('z:a'), P('z:')])
with self.assertRaises(IndexError):
par[2]
p = P('z:/a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:/a'))
self.assertEqual(par[1], P('z:/'))
self.assertEqual(list(par), [P('z:/a'), P('z:/')])
with self.assertRaises(IndexError):
par[2]
p = P('//a/b/c/d')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('//a/b/c'))
self.assertEqual(par[1], P('//a/b'))
self.assertEqual(list(par), [P('//a/b/c'), P('//a/b')])
with self.assertRaises(IndexError):
par[2]
def test_drive(self):
P = self.cls
self.assertEqual(P('c:').drive, 'c:')
self.assertEqual(P('c:a/b').drive, 'c:')
self.assertEqual(P('c:/').drive, 'c:')
self.assertEqual(P('c:/a/b/').drive, 'c:')
self.assertEqual(P('//a/b').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/c/d').drive, '\\\\a\\b')
def test_root(self):
P = self.cls
self.assertEqual(P('c:').root, '')
self.assertEqual(P('c:a/b').root, '')
self.assertEqual(P('c:/').root, '\\')
self.assertEqual(P('c:/a/b/').root, '\\')
self.assertEqual(P('//a/b').root, '\\')
self.assertEqual(P('//a/b/').root, '\\')
self.assertEqual(P('//a/b/c/d').root, '\\')
def test_anchor(self):
P = self.cls
self.assertEqual(P('c:').anchor, 'c:')
self.assertEqual(P('c:a/b').anchor, 'c:')
self.assertEqual(P('c:/').anchor, 'c:\\')
self.assertEqual(P('c:/a/b/').anchor, 'c:\\')
self.assertEqual(P('//a/b').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/c/d').anchor, '\\\\a\\b\\')
def test_name(self):
P = self.cls
self.assertEqual(P('c:').name, '')
self.assertEqual(P('c:/').name, '')
self.assertEqual(P('c:a/b').name, 'b')
self.assertEqual(P('c:/a/b').name, 'b')
self.assertEqual(P('c:a/b.py').name, 'b.py')
self.assertEqual(P('c:/a/b.py').name, 'b.py')
self.assertEqual(P('//My.py/Share.php').name, '')
self.assertEqual(P('//My.py/Share.php/a/b').name, 'b')
def test_suffix(self):
P = self.cls
self.assertEqual(P('c:').suffix, '')
self.assertEqual(P('c:/').suffix, '')
self.assertEqual(P('c:a/b').suffix, '')
self.assertEqual(P('c:/a/b').suffix, '')
self.assertEqual(P('c:a/b.py').suffix, '.py')
self.assertEqual(P('c:/a/b.py').suffix, '.py')
self.assertEqual(P('c:a/.hgrc').suffix, '')
self.assertEqual(P('c:/a/.hgrc').suffix, '')
self.assertEqual(P('c:a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('//My.py/Share.php').suffix, '')
self.assertEqual(P('//My.py/Share.php/a/b').suffix, '')
def test_suffixes(self):
P = self.cls
self.assertEqual(P('c:').suffixes, [])
self.assertEqual(P('c:/').suffixes, [])
self.assertEqual(P('c:a/b').suffixes, [])
self.assertEqual(P('c:/a/b').suffixes, [])
self.assertEqual(P('c:a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:/a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:a/.hgrc').suffixes, [])
self.assertEqual(P('c:/a/.hgrc').suffixes, [])
self.assertEqual(P('c:a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('c:/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('//My.py/Share.php').suffixes, [])
self.assertEqual(P('//My.py/Share.php/a/b').suffixes, [])
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffixes, [])
def test_stem(self):
P = self.cls
self.assertEqual(P('c:').stem, '')
self.assertEqual(P('c:.').stem, '')
self.assertEqual(P('c:..').stem, '..')
self.assertEqual(P('c:/').stem, '')
self.assertEqual(P('c:a/b').stem, 'b')
self.assertEqual(P('c:a/b.py').stem, 'b')
self.assertEqual(P('c:a/.hgrc').stem, '.hgrc')
self.assertEqual(P('c:a/.hg.rc').stem, '.hg')
self.assertEqual(P('c:a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('c:a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name(self):
P = self.cls
self.assertEqual(P('c:a/b').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/b').with_name('d.xml'), P('c:/a/d.xml'))
self.assertEqual(P('c:a/Dot ending.').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/Dot ending.').with_name('d.xml'), P('c:/a/d.xml'))
self.assertRaises(ValueError, P('c:').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:/').with_name, 'd.xml')
self.assertRaises(ValueError, P('//My/Share').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:e')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:/e')
self.assertRaises(ValueError, P('c:a/b').with_name, '//My/Share')
def test_with_suffix(self):
P = self.cls
self.assertEqual(P('c:a/b').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b').with_suffix('.gz'), P('c:/a/b.gz'))
self.assertEqual(P('c:a/b.py').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b.py').with_suffix('.gz'), P('c:/a/b.gz'))
# Path doesn't have a "filename" component.
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
self.assertRaises(ValueError, P('//My/Share').with_suffix, '.gz')
# Invalid suffix.
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c\\d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c\\d')
def test_relative_to(self):
P = self.cls
p = P('C:Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:foO')), P('Bar'))
self.assertEqual(p.relative_to('c:foO'), P('Bar'))
self.assertEqual(p.relative_to('c:foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:foO/baR')), P())
self.assertEqual(p.relative_to('c:foO/baR'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('Foo'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Baz'))
p = P('C:/Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('/Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('/Foo/Bar'))
self.assertEqual(str(p.relative_to(P('c:'))), '\\Foo\\Bar')
self.assertEqual(str(p.relative_to('c:')), '\\Foo\\Bar')
self.assertEqual(p.relative_to(P('c:/')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:/foO')), P('Bar'))
self.assertEqual(p.relative_to('c:/foO'), P('Bar'))
self.assertEqual(p.relative_to('c:/foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:/foO/baR')), P())
self.assertEqual(p.relative_to('c:/foO/baR'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P('C:/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo'))
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('d:/'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//C/Foo'))
# UNC paths.
p = P('//Server/Share/Foo/Bar')
self.assertEqual(p.relative_to(P('//sErver/sHare')), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare'), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo')), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo'), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo/'), P('Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo/Bar')), P())
self.assertEqual(p.relative_to('//sErver/sHare/Foo/Bar'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P('/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('c:/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//z/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//Server/z/Foo'))
def test_is_absolute(self):
P = self.cls
# Under NT, only paths with both a drive and a root are absolute.
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertFalse(P('/').is_absolute())
self.assertFalse(P('/a').is_absolute())
self.assertFalse(P('/a/b/').is_absolute())
self.assertFalse(P('c:').is_absolute())
self.assertFalse(P('c:a').is_absolute())
self.assertFalse(P('c:a/b/').is_absolute())
self.assertTrue(P('c:/').is_absolute())
self.assertTrue(P('c:/a').is_absolute())
self.assertTrue(P('c:/a/b/').is_absolute())
# UNC paths are absolute by definition.
self.assertTrue(P('//a/b').is_absolute())
self.assertTrue(P('//a/b/').is_absolute())
self.assertTrue(P('//a/b/c').is_absolute())
self.assertTrue(P('//a/b/c/d').is_absolute())
def test_join(self):
P = self.cls
p = P('C:/a/b')
pp = p.joinpath('x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('/x/y')
self.assertEqual(pp, P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
pp = p.joinpath('D:x/y')
self.assertEqual(pp, P('D:x/y'))
pp = p.joinpath('D:/x/y')
self.assertEqual(pp, P('D:/x/y'))
pp = p.joinpath('//host/share/x/y')
self.assertEqual(pp, P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
pp = p.joinpath('c:x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('c:/x/y')
self.assertEqual(pp, P('C:/x/y'))
def test_div(self):
# Basically the same as joinpath().
P = self.cls
p = P('C:/a/b')
self.assertEqual(p / 'x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'x' / 'y', P('C:/a/b/x/y'))
self.assertEqual(p / '/x/y', P('C:/x/y'))
self.assertEqual(p / '/x' / 'y', P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
self.assertEqual(p / 'D:x/y', P('D:x/y'))
self.assertEqual(p / 'D:' / 'x/y', P('D:x/y'))
self.assertEqual(p / 'D:/x/y', P('D:/x/y'))
self.assertEqual(p / 'D:' / '/x/y', P('D:/x/y'))
self.assertEqual(p / '//host/share/x/y', P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
self.assertEqual(p / 'c:x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'c:/x/y', P('C:/x/y'))
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(True, P('con').is_reserved())
self.assertIs(True, P('NUL').is_reserved())
self.assertIs(True, P('NUL.txt').is_reserved())
self.assertIs(True, P('com1').is_reserved())
self.assertIs(True, P('com9.bar').is_reserved())
self.assertIs(False, P('bar.com9').is_reserved())
self.assertIs(True, P('lpt1').is_reserved())
self.assertIs(True, P('lpt9.bar').is_reserved())
self.assertIs(False, P('bar.lpt9').is_reserved())
# Only the last component matters.
self.assertIs(False, P('c:/NUL/con/baz').is_reserved())
# UNC paths are never reserved.
self.assertIs(False, P('//my/share/nul/con/aux').is_reserved())
class PurePathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePath
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.PureWindowsPath if os.name == 'nt' else pathlib.PurePosixPath)
def test_different_flavours_unequal(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
self.assertNotEqual(p, q)
def test_different_flavours_unordered(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
with self.assertRaises(TypeError):
p < q
with self.assertRaises(TypeError):
p <= q
with self.assertRaises(TypeError):
p > q
with self.assertRaises(TypeError):
p >= q
#
# Tests for the concrete classes.
#
# Make sure any symbolic links in the base test path are resolved.
BASE = os.path.realpath(TESTFN)
join = lambda *x: os.path.join(BASE, *x)
rel_join = lambda *x: os.path.join(TESTFN, *x)
only_nt = unittest.skipIf(os.name != 'nt',
'test requires a Windows-compatible system')
only_posix = unittest.skipIf(os.name == 'nt',
'test requires a POSIX-compatible system')
@only_posix
class PosixPathAsPureTest(PurePosixPathTest):
cls = pathlib.PosixPath
@only_nt
class WindowsPathAsPureTest(PureWindowsPathTest):
cls = pathlib.WindowsPath
def test_owner(self):
P = self.cls
with self.assertRaises(NotImplementedError):
P('c:/').owner()
def test_group(self):
P = self.cls
with self.assertRaises(NotImplementedError):
P('c:/').group()
class _BasePathTest(object):
"""Tests for the FS-accessing functionalities of the Path classes."""
# (BASE)
# |
# |-- brokenLink -> non-existing
# |-- dirA
# | `-- linkC -> ../dirB
# |-- dirB
# | |-- fileB
# | `-- linkD -> ../dirB
# |-- dirC
# | |-- dirD
# | | `-- fileD
# | `-- fileC
# |-- dirE # No permissions
# |-- fileA
# |-- linkA -> fileA
# |-- linkB -> dirB
# `-- brokenLinkLoop -> brokenLinkLoop
#
def setUp(self):
def cleanup():
os.chmod(join('dirE'), 0o777)
support.rmtree(BASE)
self.addCleanup(cleanup)
os.mkdir(BASE)
os.mkdir(join('dirA'))
os.mkdir(join('dirB'))
os.mkdir(join('dirC'))
os.mkdir(join('dirC', 'dirD'))
os.mkdir(join('dirE'))
with open(join('fileA'), 'wb') as f:
f.write(b"this is file A\n")
with open(join('dirB', 'fileB'), 'wb') as f:
f.write(b"this is file B\n")
with open(join('dirC', 'fileC'), 'wb') as f:
f.write(b"this is file C\n")
with open(join('dirC', 'dirD', 'fileD'), 'wb') as f:
f.write(b"this is file D\n")
os.chmod(join('dirE'), 0)
if support.can_symlink():
# Relative symlinks.
os.symlink('fileA', join('linkA'))
os.symlink('non-existing', join('brokenLink'))
self.dirlink('dirB', join('linkB'))
self.dirlink(os.path.join('..', 'dirB'), join('dirA', 'linkC'))
# This one goes upwards, creating a loop.
self.dirlink(os.path.join('..', 'dirB'), join('dirB', 'linkD'))
# Broken symlink (pointing to itself).
os.symlink('brokenLinkLoop', join('brokenLinkLoop'))
if os.name == 'nt':
# Workaround for http://bugs.python.org/issue13772.
def dirlink(self, src, dest):
os.symlink(src, dest, target_is_directory=True)
else:
def dirlink(self, src, dest):
os.symlink(src, dest)
def assertSame(self, path_a, path_b):
self.assertTrue(os.path.samefile(str(path_a), str(path_b)),
"%r and %r don't point to the same file" %
(path_a, path_b))
def assertFileNotFound(self, func, *args, **kwargs):
with self.assertRaises(FileNotFoundError) as cm:
func(*args, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def assertEqualNormCase(self, path_a, path_b):
self.assertEqual(os.path.normcase(path_a), os.path.normcase(path_b))
def _test_cwd(self, p):
q = self.cls(os.getcwd())
self.assertEqual(p, q)
self.assertEqualNormCase(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_cwd(self):
p = self.cls.cwd()
self._test_cwd(p)
def _test_home(self, p):
q = self.cls(os.path.expanduser('~'))
self.assertEqual(p, q)
self.assertEqualNormCase(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_home(self):
p = self.cls.home()
self._test_home(p)
def test_samefile(self):
fileA_path = os.path.join(BASE, 'fileA')
fileB_path = os.path.join(BASE, 'dirB', 'fileB')
p = self.cls(fileA_path)
pp = self.cls(fileA_path)
q = self.cls(fileB_path)
self.assertTrue(p.samefile(fileA_path))
self.assertTrue(p.samefile(pp))
self.assertFalse(p.samefile(fileB_path))
self.assertFalse(p.samefile(q))
# Test the non-existent file case
non_existent = os.path.join(BASE, 'foo')
r = self.cls(non_existent)
self.assertRaises(FileNotFoundError, p.samefile, r)
self.assertRaises(FileNotFoundError, p.samefile, non_existent)
self.assertRaises(FileNotFoundError, r.samefile, p)
self.assertRaises(FileNotFoundError, r.samefile, non_existent)
self.assertRaises(FileNotFoundError, r.samefile, r)
self.assertRaises(FileNotFoundError, r.samefile, non_existent)
def test_empty_path(self):
# The empty path points to '.'
p = self.cls('')
self.assertEqual(p.stat(), os.stat('.'))
def test_expanduser_common(self):
P = self.cls
p = P('~')
self.assertEqual(p.expanduser(), P(os.path.expanduser('~')))
p = P('foo')
self.assertEqual(p.expanduser(), p)
p = P('/~')
self.assertEqual(p.expanduser(), p)
p = P('../~')
self.assertEqual(p.expanduser(), p)
p = P(P('').absolute().anchor) / '~'
self.assertEqual(p.expanduser(), p)
def test_exists(self):
P = self.cls
p = P(BASE)
self.assertIs(True, p.exists())
self.assertIs(True, (p / 'dirA').exists())
self.assertIs(True, (p / 'fileA').exists())
self.assertIs(False, (p / 'fileA' / 'bah').exists())
if support.can_symlink():
self.assertIs(True, (p / 'linkA').exists())
self.assertIs(True, (p / 'linkB').exists())
self.assertIs(True, (p / 'linkB' / 'fileB').exists())
self.assertIs(False, (p / 'linkA' / 'bah').exists())
self.assertIs(False, (p / 'foo').exists())
self.assertIs(False, P('/xyzzy').exists())
self.assertIs(False, P(BASE + '\udfff').exists())
self.assertIs(False, P(BASE + '\x00').exists())
def test_open_common(self):
p = self.cls(BASE)
with (p / 'fileA').open('r') as f:
self.assertIsInstance(f, io.TextIOBase)
self.assertEqual(f.read(), "this is file A\n")
with (p / 'fileA').open('rb') as f:
self.assertIsInstance(f, io.BufferedIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
with (p / 'fileA').open('rb', buffering=0) as f:
self.assertIsInstance(f, io.RawIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
def test_read_write_bytes(self):
p = self.cls(BASE)
(p / 'fileA').write_bytes(b'abcdefg')
self.assertEqual((p / 'fileA').read_bytes(), b'abcdefg')
# Check that trying to write str does not truncate the file.
self.assertRaises(TypeError, (p / 'fileA').write_bytes, 'somestr')
self.assertEqual((p / 'fileA').read_bytes(), b'abcdefg')
def test_read_write_text(self):
p = self.cls(BASE)
(p / 'fileA').write_text('äbcdefg', encoding='latin-1')
self.assertEqual((p / 'fileA').read_text(
encoding='utf-8', errors='ignore'), 'bcdefg')
# Check that trying to write bytes does not truncate the file.
self.assertRaises(TypeError, (p / 'fileA').write_text, b'somebytes')
self.assertEqual((p / 'fileA').read_text(encoding='latin-1'), 'äbcdefg')
def test_iterdir(self):
P = self.cls
p = P(BASE)
it = p.iterdir()
paths = set(it)
expected = ['dirA', 'dirB', 'dirC', 'dirE', 'fileA']
if support.can_symlink():
expected += ['linkA', 'linkB', 'brokenLink', 'brokenLinkLoop']
self.assertEqual(paths, { P(BASE, q) for q in expected })
@support.skip_unless_symlink
def test_iterdir_symlink(self):
# __iter__ on a symlink to a directory.
P = self.cls
p = P(BASE, 'linkB')
paths = set(p.iterdir())
expected = { P(BASE, 'linkB', q) for q in ['fileB', 'linkD'] }
self.assertEqual(paths, expected)
def test_iterdir_nodir(self):
# __iter__ on something that is not a directory.
p = self.cls(BASE, 'fileA')
with self.assertRaises(OSError) as cm:
next(p.iterdir())
# ENOENT or EINVAL under Windows, ENOTDIR otherwise
# (see issue #12802).
self.assertIn(cm.exception.errno, (errno.ENOTDIR,
errno.ENOENT, errno.EINVAL))
def test_glob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), { P(BASE, q) for q in expected })
P = self.cls
p = P(BASE)
it = p.glob("fileA")
self.assertIsInstance(it, collections.abc.Iterator)
_check(it, ["fileA"])
_check(p.glob("fileB"), [])
_check(p.glob("dir*/file*"), ["dirB/fileB", "dirC/fileC"])
if not support.can_symlink():
_check(p.glob("*A"), ['dirA', 'fileA'])
else:
_check(p.glob("*A"), ['dirA', 'fileA', 'linkA'])
if not support.can_symlink():
_check(p.glob("*B/*"), ['dirB/fileB'])
else:
_check(p.glob("*B/*"), ['dirB/fileB', 'dirB/linkD',
'linkB/fileB', 'linkB/linkD'])
if not support.can_symlink():
_check(p.glob("*/fileB"), ['dirB/fileB'])
else:
_check(p.glob("*/fileB"), ['dirB/fileB', 'linkB/fileB'])
def test_rglob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), { P(BASE, q) for q in expected })
P = self.cls
p = P(BASE)
it = p.rglob("fileA")
self.assertIsInstance(it, collections.abc.Iterator)
_check(it, ["fileA"])
_check(p.rglob("fileB"), ["dirB/fileB"])
_check(p.rglob("*/fileA"), [])
if not support.can_symlink():
_check(p.rglob("*/fileB"), ["dirB/fileB"])
else:
_check(p.rglob("*/fileB"), ["dirB/fileB", "dirB/linkD/fileB",
"linkB/fileB", "dirA/linkC/fileB"])
_check(p.rglob("file*"), ["fileA", "dirB/fileB",
"dirC/fileC", "dirC/dirD/fileD"])
p = P(BASE, "dirC")
_check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"])
_check(p.rglob("*/*"), ["dirC/dirD/fileD"])
@support.skip_unless_symlink
def test_rglob_symlink_loop(self):
# Don't get fooled by symlink loops (Issue #26012).
P = self.cls
p = P(BASE)
given = set(p.rglob('*'))
expect = {'brokenLink',
'dirA', 'dirA/linkC',
'dirB', 'dirB/fileB', 'dirB/linkD',
'dirC', 'dirC/dirD', 'dirC/dirD/fileD', 'dirC/fileC',
'dirE',
'fileA',
'linkA',
'linkB',
'brokenLinkLoop',
}
self.assertEqual(given, {p / x for x in expect})
def test_glob_many_open_files(self):
depth = 30
P = self.cls
base = P(BASE) / 'deep'
p = P(base, *(['d']*depth))
p.mkdir(parents=True)
pattern = '/'.join(['*'] * depth)
iters = [base.glob(pattern) for j in range(100)]
for it in iters:
self.assertEqual(next(it), p)
iters = [base.rglob('d') for j in range(100)]
p = base
for i in range(depth):
p = p / 'd'
for it in iters:
self.assertEqual(next(it), p)
def test_glob_dotdot(self):
# ".." is not special in globs.
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("..")), { P(BASE, "..") })
self.assertEqual(set(p.glob("dirA/../file*")), { P(BASE, "dirA/../fileA") })
self.assertEqual(set(p.glob("../xyzzy")), set())
def _check_resolve(self, p, expected, strict=True):
q = p.resolve(strict)
self.assertEqual(q, expected)
# This can be used to check both relative and absolute resolutions.
_check_resolve_relative = _check_resolve_absolute = _check_resolve
@support.skip_unless_symlink
def test_resolve_common(self):
P = self.cls
p = P(BASE, 'foo')
with self.assertRaises(OSError) as cm:
p.resolve(strict=True)
self.assertEqual(cm.exception.errno, errno.ENOENT)
# Non-strict
self.assertEqualNormCase(str(p.resolve(strict=False)),
os.path.join(BASE, 'foo'))
p = P(BASE, 'foo', 'in', 'spam')
self.assertEqualNormCase(str(p.resolve(strict=False)),
os.path.join(BASE, 'foo', 'in', 'spam'))
p = P(BASE, '..', 'foo', 'in', 'spam')
self.assertEqualNormCase(str(p.resolve(strict=False)),
os.path.abspath(os.path.join('foo', 'in', 'spam')))
# These are all relative symlinks.
p = P(BASE, 'dirB', 'fileB')
self._check_resolve_relative(p, p)
p = P(BASE, 'linkA')
self._check_resolve_relative(p, P(BASE, 'fileA'))
p = P(BASE, 'dirA', 'linkC', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
p = P(BASE, 'dirB', 'linkD', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
# Non-strict
p = P(BASE, 'dirA', 'linkC', 'fileB', 'foo', 'in', 'spam')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB', 'foo', 'in',
'spam'), False)
p = P(BASE, 'dirA', 'linkC', '..', 'foo', 'in', 'spam')
if os.name == 'nt':
# In Windows, if linkY points to dirB, 'dirA\linkY\..'
# resolves to 'dirA' without resolving linkY first.
self._check_resolve_relative(p, P(BASE, 'dirA', 'foo', 'in',
'spam'), False)
else:
# In Posix, if linkY points to dirB, 'dirA/linkY/..'
# resolves to 'dirB/..' first before resolving to parent of dirB.
self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False)
# Now create absolute symlinks.
d = support._longpath(tempfile.mkdtemp(suffix='-dirD', dir=os.getcwd()))
self.addCleanup(support.rmtree, d)
os.symlink(os.path.join(d), join('dirA', 'linkX'))
os.symlink(join('dirB'), os.path.join(d, 'linkY'))
p = P(BASE, 'dirA', 'linkX', 'linkY', 'fileB')
self._check_resolve_absolute(p, P(BASE, 'dirB', 'fileB'))
# Non-strict
p = P(BASE, 'dirA', 'linkX', 'linkY', 'foo', 'in', 'spam')
self._check_resolve_relative(p, P(BASE, 'dirB', 'foo', 'in', 'spam'),
False)
p = P(BASE, 'dirA', 'linkX', 'linkY', '..', 'foo', 'in', 'spam')
if os.name == 'nt':
# In Windows, if linkY points to dirB, 'dirA\linkY\..'
# resolves to 'dirA' without resolving linkY first.
self._check_resolve_relative(p, P(d, 'foo', 'in', 'spam'), False)
else:
# In Posix, if linkY points to dirB, 'dirA/linkY/..'
# resolves to 'dirB/..' first before resolving to parent of dirB.
self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False)
@support.skip_unless_symlink
def test_resolve_dot(self):
# See https://bitbucket.org/pitrou/pathlib/issue/9/pathresolve-fails-on-complex-symlinks
p = self.cls(BASE)
self.dirlink('.', join('0'))
self.dirlink(os.path.join('0', '0'), join('1'))
self.dirlink(os.path.join('1', '1'), join('2'))
q = p / '2'
self.assertEqual(q.resolve(strict=True), p)
r = q / '3' / '4'
self.assertRaises(FileNotFoundError, r.resolve, strict=True)
# Non-strict
self.assertEqual(r.resolve(strict=False), p / '3' / '4')
def test_with(self):
p = self.cls(BASE)
it = p.iterdir()
it2 = p.iterdir()
next(it2)
with p:
pass
# I/O operation on closed path.
self.assertRaises(ValueError, next, it)
self.assertRaises(ValueError, next, it2)
self.assertRaises(ValueError, p.open)
self.assertRaises(ValueError, p.resolve)
self.assertRaises(ValueError, p.absolute)
self.assertRaises(ValueError, p.__enter__)
def test_chmod(self):
p = self.cls(BASE) / 'fileA'
mode = p.stat().st_mode
# Clear writable bit.
new_mode = mode & ~0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# Set writable bit.
new_mode = mode | 0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# XXX also need a test for lchmod.
def test_stat(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(p.stat(), st)
# Change file mode by flipping write bit.
p.chmod(st.st_mode ^ 0o222)
self.addCleanup(p.chmod, st.st_mode)
self.assertNotEqual(p.stat(), st)
@support.skip_unless_symlink
def test_lstat(self):
p = self.cls(BASE)/ 'linkA'
st = p.stat()
self.assertNotEqual(st, p.lstat())
def test_lstat_nosymlink(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(st, p.lstat())
@unittest.skipUnless(pwd, "the pwd module is needed for this test")
def test_owner(self):
p = self.cls(BASE) / 'fileA'
uid = p.stat().st_uid
try:
name = pwd.getpwuid(uid).pw_name
except KeyError:
self.skipTest(
"user %d doesn't have an entry in the system database" % uid)
self.assertEqual(name, p.owner())
@unittest.skipUnless(grp, "the grp module is needed for this test")
def test_group(self):
p = self.cls(BASE) / 'fileA'
gid = p.stat().st_gid
try:
name = grp.getgrgid(gid).gr_name
except KeyError:
self.skipTest(
"group %d doesn't have an entry in the system database" % gid)
self.assertEqual(name, p.group())
def test_unlink(self):
p = self.cls(BASE) / 'fileA'
p.unlink()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_unlink_missing_ok(self):
p = self.cls(BASE) / 'fileAAA'
self.assertFileNotFound(p.unlink)
p.unlink(missing_ok=True)
def test_rmdir(self):
p = self.cls(BASE) / 'dirA'
for q in p.iterdir():
q.unlink()
p.rmdir()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_link_to(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# linking to another path.
q = P / 'dirA' / 'fileAA'
try:
p.link_to(q)
except PermissionError as e:
self.skipTest('os.link(): %s' % e)
self.assertEqual(q.stat().st_size, size)
self.assertEqual(os.path.samefile(p, q), True)
self.assertTrue(p.stat)
# Linking to a str of a relative path.
r = rel_join('fileAAA')
q.link_to(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertTrue(q.stat)
def test_rename(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Renaming to another path.
q = P / 'dirA' / 'fileAA'
renamed_p = p.rename(q)
self.assertEqual(renamed_p, q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Renaming to a str of a relative path.
r = rel_join('fileAAA')
renamed_q = q.rename(r)
self.assertEqual(renamed_q, self.cls(r))
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_replace(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Replacing a non-existing path.
q = P / 'dirA' / 'fileAA'
replaced_p = p.replace(q)
self.assertEqual(replaced_p, q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Replacing another (existing) path.
r = rel_join('dirB', 'fileB')
replaced_q = q.replace(r)
self.assertEqual(replaced_q, self.cls(r))
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_touch_common(self):
P = self.cls(BASE)
p = P / 'newfileA'
self.assertFalse(p.exists())
p.touch()
self.assertTrue(p.exists())
st = p.stat()
old_mtime = st.st_mtime
old_mtime_ns = st.st_mtime_ns
# Rewind the mtime sufficiently far in the past to work around
# filesystem-specific timestamp granularity.
os.utime(str(p), (old_mtime - 10, old_mtime - 10))
# The file mtime should be refreshed by calling touch() again.
p.touch()
st = p.stat()
self.assertGreaterEqual(st.st_mtime_ns, old_mtime_ns)
self.assertGreaterEqual(st.st_mtime, old_mtime)
# Now with exist_ok=False.
p = P / 'newfileB'
self.assertFalse(p.exists())
p.touch(mode=0o700, exist_ok=False)
self.assertTrue(p.exists())
self.assertRaises(OSError, p.touch, exist_ok=False)
def test_touch_nochange(self):
P = self.cls(BASE)
p = P / 'fileA'
p.touch()
with p.open('rb') as f:
self.assertEqual(f.read().strip(), b"this is file A")
def test_mkdir(self):
P = self.cls(BASE)
p = P / 'newdirA'
self.assertFalse(p.exists())
p.mkdir()
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_parents(self):
# Creating a chain of directories.
p = self.cls(BASE, 'newdirB', 'newdirC')
self.assertFalse(p.exists())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
p.mkdir(parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
# Test `mode` arg.
mode = stat.S_IMODE(p.stat().st_mode) # Default mode.
p = self.cls(BASE, 'newdirD', 'newdirE')
p.mkdir(0o555, parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
if os.name != 'nt':
# The directory's permissions follow the mode argument.
self.assertEqual(stat.S_IMODE(p.stat().st_mode), 0o7555 & mode)
# The parent's permissions follow the default process settings.
self.assertEqual(stat.S_IMODE(p.parent.stat().st_mode), mode)
def test_mkdir_exist_ok(self):
p = self.cls(BASE, 'dirB')
st_ctime_first = p.stat().st_ctime
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
p.mkdir(exist_ok=True)
self.assertTrue(p.exists())
self.assertEqual(p.stat().st_ctime, st_ctime_first)
def test_mkdir_exist_ok_with_parent(self):
p = self.cls(BASE, 'dirC')
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
p = p / 'newdirC'
p.mkdir(parents=True)
st_ctime_first = p.stat().st_ctime
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
p.mkdir(parents=True, exist_ok=True)
self.assertTrue(p.exists())
self.assertEqual(p.stat().st_ctime, st_ctime_first)
def test_mkdir_exist_ok_root(self):
# Issue #25803: A drive root could raise PermissionError on Windows.
self.cls('/').resolve().mkdir(exist_ok=True)
self.cls('/').resolve().mkdir(parents=True, exist_ok=True)
@only_nt # XXX: not sure how to test this on POSIX.
def test_mkdir_with_unknown_drive(self):
for d in 'ZYXWVUTSRQPONMLKJIHGFEDCBA':
p = self.cls(d + ':\\')
if not p.is_dir():
break
else:
self.skipTest("cannot find a drive that doesn't exist")
with self.assertRaises(OSError):
(p / 'child' / 'path').mkdir(parents=True)
def test_mkdir_with_child_file(self):
p = self.cls(BASE, 'dirB', 'fileB')
self.assertTrue(p.exists())
# An exception is raised when the last path component is an existing
# regular file, regardless of whether exist_ok is true or not.
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True, exist_ok=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_no_parents_file(self):
p = self.cls(BASE, 'fileA')
self.assertTrue(p.exists())
# An exception is raised when the last path component is an existing
# regular file, regardless of whether exist_ok is true or not.
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(FileExistsError) as cm:
p.mkdir(exist_ok=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_concurrent_parent_creation(self):
for pattern_num in range(32):
p = self.cls(BASE, 'dirCPC%d' % pattern_num)
self.assertFalse(p.exists())
def my_mkdir(path, mode=0o777):
path = str(path)
# Emulate another process that would create the directory
# just before we try to create it ourselves. We do it
# in all possible pattern combinations, assuming that this
# function is called at most 5 times (dirCPC/dir1/dir2,
# dirCPC/dir1, dirCPC, dirCPC/dir1, dirCPC/dir1/dir2).
if pattern.pop():
os.mkdir(path, mode) # From another process.
concurrently_created.add(path)
os.mkdir(path, mode) # Our real call.
pattern = [bool(pattern_num & (1 << n)) for n in range(5)]
concurrently_created = set()
p12 = p / 'dir1' / 'dir2'
try:
with mock.patch("pathlib._normal_accessor.mkdir", my_mkdir):
p12.mkdir(parents=True, exist_ok=False)
except FileExistsError:
self.assertIn(str(p12), concurrently_created)
else:
self.assertNotIn(str(p12), concurrently_created)
self.assertTrue(p.exists())
@support.skip_unless_symlink
def test_symlink_to(self):
P = self.cls(BASE)
target = P / 'fileA'
# Symlinking a path target.
link = P / 'dirA' / 'linkAA'
link.symlink_to(target)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
# Symlinking a str target.
link = P / 'dirA' / 'linkAAA'
link.symlink_to(str(target))
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertFalse(link.is_dir())
# Symlinking to a directory.
target = P / 'dirB'
link = P / 'dirA' / 'linkAAAA'
link.symlink_to(target, target_is_directory=True)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertTrue(link.is_dir())
self.assertTrue(list(link.iterdir()))
def test_is_dir(self):
P = self.cls(BASE)
self.assertTrue((P / 'dirA').is_dir())
self.assertFalse((P / 'fileA').is_dir())
self.assertFalse((P / 'non-existing').is_dir())
self.assertFalse((P / 'fileA' / 'bah').is_dir())
if support.can_symlink():
self.assertFalse((P / 'linkA').is_dir())
self.assertTrue((P / 'linkB').is_dir())
self.assertFalse((P/ 'brokenLink').is_dir(), False)
self.assertIs((P / 'dirA\udfff').is_dir(), False)
self.assertIs((P / 'dirA\x00').is_dir(), False)
def test_is_file(self):
P = self.cls(BASE)
self.assertTrue((P / 'fileA').is_file())
self.assertFalse((P / 'dirA').is_file())
self.assertFalse((P / 'non-existing').is_file())
self.assertFalse((P / 'fileA' / 'bah').is_file())
if support.can_symlink():
self.assertTrue((P / 'linkA').is_file())
self.assertFalse((P / 'linkB').is_file())
self.assertFalse((P/ 'brokenLink').is_file())
self.assertIs((P / 'fileA\udfff').is_file(), False)
self.assertIs((P / 'fileA\x00').is_file(), False)
@only_posix
def test_is_mount(self):
P = self.cls(BASE)
R = self.cls('/') # TODO: Work out Windows.
self.assertFalse((P / 'fileA').is_mount())
self.assertFalse((P / 'dirA').is_mount())
self.assertFalse((P / 'non-existing').is_mount())
self.assertFalse((P / 'fileA' / 'bah').is_mount())
self.assertTrue(R.is_mount())
if support.can_symlink():
self.assertFalse((P / 'linkA').is_mount())
self.assertIs(self.cls('/\udfff').is_mount(), False)
self.assertIs(self.cls('/\x00').is_mount(), False)
def test_is_symlink(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_symlink())
self.assertFalse((P / 'dirA').is_symlink())
self.assertFalse((P / 'non-existing').is_symlink())
self.assertFalse((P / 'fileA' / 'bah').is_symlink())
if support.can_symlink():
self.assertTrue((P / 'linkA').is_symlink())
self.assertTrue((P / 'linkB').is_symlink())
self.assertTrue((P/ 'brokenLink').is_symlink())
self.assertIs((P / 'fileA\udfff').is_file(), False)
self.assertIs((P / 'fileA\x00').is_file(), False)
if support.can_symlink():
self.assertIs((P / 'linkA\udfff').is_file(), False)
self.assertIs((P / 'linkA\x00').is_file(), False)
def test_is_fifo_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_fifo())
self.assertFalse((P / 'dirA').is_fifo())
self.assertFalse((P / 'non-existing').is_fifo())
self.assertFalse((P / 'fileA' / 'bah').is_fifo())
self.assertIs((P / 'fileA\udfff').is_fifo(), False)
self.assertIs((P / 'fileA\x00').is_fifo(), False)
@unittest.skipUnless(hasattr(os, "mkfifo"), "os.mkfifo() required")
def test_is_fifo_true(self):
P = self.cls(BASE, 'myfifo')
try:
os.mkfifo(str(P))
except PermissionError as e:
self.skipTest('os.mkfifo(): %s' % e)
self.assertTrue(P.is_fifo())
self.assertFalse(P.is_socket())
self.assertFalse(P.is_file())
self.assertIs(self.cls(BASE, 'myfifo\udfff').is_fifo(), False)
self.assertIs(self.cls(BASE, 'myfifo\x00').is_fifo(), False)
def test_is_socket_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_socket())
self.assertFalse((P / 'dirA').is_socket())
self.assertFalse((P / 'non-existing').is_socket())
self.assertFalse((P / 'fileA' / 'bah').is_socket())
self.assertIs((P / 'fileA\udfff').is_socket(), False)
self.assertIs((P / 'fileA\x00').is_socket(), False)
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
def test_is_socket_true(self):
P = self.cls(BASE, 'mysock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(sock.close)
try:
sock.bind(str(P))
except OSError as e:
if (isinstance(e, PermissionError) or
"AF_UNIX path too long" in str(e)):
self.skipTest("cannot bind Unix socket: " + str(e))
self.assertTrue(P.is_socket())
self.assertFalse(P.is_fifo())
self.assertFalse(P.is_file())
self.assertIs(self.cls(BASE, 'mysock\udfff').is_socket(), False)
self.assertIs(self.cls(BASE, 'mysock\x00').is_socket(), False)
def test_is_block_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_block_device())
self.assertFalse((P / 'dirA').is_block_device())
self.assertFalse((P / 'non-existing').is_block_device())
self.assertFalse((P / 'fileA' / 'bah').is_block_device())
self.assertIs((P / 'fileA\udfff').is_block_device(), False)
self.assertIs((P / 'fileA\x00').is_block_device(), False)
def test_is_char_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_char_device())
self.assertFalse((P / 'dirA').is_char_device())
self.assertFalse((P / 'non-existing').is_char_device())
self.assertFalse((P / 'fileA' / 'bah').is_char_device())
self.assertIs((P / 'fileA\udfff').is_char_device(), False)
self.assertIs((P / 'fileA\x00').is_char_device(), False)
def test_is_char_device_true(self):
# Under Unix, /dev/null should generally be a char device.
P = self.cls('/dev/null')
if not P.exists():
self.skipTest("/dev/null required")
self.assertTrue(P.is_char_device())
self.assertFalse(P.is_block_device())
self.assertFalse(P.is_file())
self.assertIs(self.cls('/dev/null\udfff').is_char_device(), False)
self.assertIs(self.cls('/dev/null\x00').is_char_device(), False)
def test_pickling_common(self):
p = self.cls(BASE, 'fileA')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertEqual(pp.stat(), p.stat())
def test_parts_interning(self):
P = self.cls
p = P('/usr/bin/foo')
q = P('/usr/local/bin')
# 'usr'
self.assertIs(p.parts[1], q.parts[1])
# 'bin'
self.assertIs(p.parts[2], q.parts[3])
def _check_complex_symlinks(self, link0_target):
# Test solving a non-looping chain of symlinks (issue #19887).
P = self.cls(BASE)
self.dirlink(os.path.join('link0', 'link0'), join('link1'))
self.dirlink(os.path.join('link1', 'link1'), join('link2'))
self.dirlink(os.path.join('link2', 'link2'), join('link3'))
self.dirlink(link0_target, join('link0'))
# Resolve absolute paths.
p = (P / 'link0').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = (P / 'link1').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = (P / 'link2').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = (P / 'link3').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
# Resolve relative paths.
old_path = os.getcwd()
os.chdir(BASE)
try:
p = self.cls('link0').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = self.cls('link1').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = self.cls('link2').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = self.cls('link3').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
finally:
os.chdir(old_path)
@support.skip_unless_symlink
def test_complex_symlinks_absolute(self):
self._check_complex_symlinks(BASE)
@support.skip_unless_symlink
def test_complex_symlinks_relative(self):
self._check_complex_symlinks('.')
@support.skip_unless_symlink
def test_complex_symlinks_relative_dot_dot(self):
self._check_complex_symlinks(os.path.join('dirA', '..'))
class PathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.Path
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.WindowsPath if os.name == 'nt' else pathlib.PosixPath)
def test_unsupported_flavour(self):
if os.name == 'nt':
self.assertRaises(NotImplementedError, pathlib.PosixPath)
else:
self.assertRaises(NotImplementedError, pathlib.WindowsPath)
def test_glob_empty_pattern(self):
p = self.cls()
with self.assertRaisesRegex(ValueError, 'Unacceptable pattern'):
list(p.glob(''))
@only_posix
class PosixPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.PosixPath
def _check_symlink_loop(self, *args, strict=True):
path = self.cls(*args)
with self.assertRaises(RuntimeError):
print(path.resolve(strict))
def test_open_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
with (p / 'new_file').open('wb'):
pass
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
with (p / 'other_new_file').open('wb'):
pass
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
def test_touch_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
(p / 'new_file').touch()
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
(p / 'other_new_file').touch()
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
(p / 'masked_new_file').touch(mode=0o750)
st = os.stat(join('masked_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o750)
@support.skip_unless_symlink
def test_resolve_loop(self):
# Loops with relative symlinks.
os.symlink('linkX/inside', join('linkX'))
self._check_symlink_loop(BASE, 'linkX')
os.symlink('linkY', join('linkY'))
self._check_symlink_loop(BASE, 'linkY')
os.symlink('linkZ/../linkZ', join('linkZ'))
self._check_symlink_loop(BASE, 'linkZ')
# Non-strict
self._check_symlink_loop(BASE, 'linkZ', 'foo', strict=False)
# Loops with absolute symlinks.
os.symlink(join('linkU/inside'), join('linkU'))
self._check_symlink_loop(BASE, 'linkU')
os.symlink(join('linkV'), join('linkV'))
self._check_symlink_loop(BASE, 'linkV')
os.symlink(join('linkW/../linkW'), join('linkW'))
self._check_symlink_loop(BASE, 'linkW')
# Non-strict
self._check_symlink_loop(BASE, 'linkW', 'foo', strict=False)
def test_glob(self):
P = self.cls
p = P(BASE)
given = set(p.glob("FILEa"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.glob("FILEa*")), set())
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
given = set(p.rglob("FILEd"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.rglob("FILEd*")), set())
@unittest.skipUnless(hasattr(pwd, 'getpwall'),
'pwd module does not expose getpwall()')
def test_expanduser(self):
P = self.cls
support.import_module('pwd')
import pwd
pwdent = pwd.getpwuid(os.getuid())
username = pwdent.pw_name
userhome = pwdent.pw_dir.rstrip('/') or '/'
# Find arbitrary different user (if exists).
for pwdent in pwd.getpwall():
othername = pwdent.pw_name
otherhome = pwdent.pw_dir.rstrip('/')
if othername != username and otherhome:
break
else:
othername = username
otherhome = userhome
p1 = P('~/Documents')
p2 = P('~' + username + '/Documents')
p3 = P('~' + othername + '/Documents')
p4 = P('../~' + username + '/Documents')
p5 = P('/~' + username + '/Documents')
p6 = P('')
p7 = P('~fakeuser/Documents')
with support.EnvironmentVarGuard() as env:
env.pop('HOME', None)
self.assertEqual(p1.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p2.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p3.expanduser(), P(otherhome) / 'Documents')
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
self.assertRaises(RuntimeError, p7.expanduser)
env['HOME'] = '/tmp'
self.assertEqual(p1.expanduser(), P('/tmp/Documents'))
self.assertEqual(p2.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p3.expanduser(), P(otherhome) / 'Documents')
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
self.assertRaises(RuntimeError, p7.expanduser)
@unittest.skipIf(sys.platform != "darwin",
"Bad file descriptor in /dev/fd affects only macOS")
def test_handling_bad_descriptor(self):
try:
file_descriptors = list(pathlib.Path('/dev/fd').rglob("*"))[3:]
if not file_descriptors:
self.skipTest("no file descriptors - issue was not reproduced")
# Checking all file descriptors because there is no guarantee
# which one will fail.
for f in file_descriptors:
f.exists()
f.is_dir()
f.is_file()
f.is_symlink()
f.is_block_device()
f.is_char_device()
f.is_fifo()
f.is_socket()
except OSError as e:
if e.errno == errno.EBADF:
self.fail("Bad file descriptor not handled.")
raise
@only_nt
class WindowsPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.WindowsPath
def test_glob(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("FILEa")), { P(BASE, "fileA") })
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
self.assertEqual(set(p.rglob("FILEd")), { P(BASE, "dirC/dirD/fileD") })
def test_expanduser(self):
P = self.cls
with support.EnvironmentVarGuard() as env:
env.pop('HOME', None)
env.pop('USERPROFILE', None)
env.pop('HOMEPATH', None)
env.pop('HOMEDRIVE', None)
env['USERNAME'] = 'alice'
# test that the path returns unchanged
p1 = P('~/My Documents')
p2 = P('~alice/My Documents')
p3 = P('~bob/My Documents')
p4 = P('/~/My Documents')
p5 = P('d:~/My Documents')
p6 = P('')
self.assertRaises(RuntimeError, p1.expanduser)
self.assertRaises(RuntimeError, p2.expanduser)
self.assertRaises(RuntimeError, p3.expanduser)
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
def check():
env.pop('USERNAME', None)
self.assertEqual(p1.expanduser(),
P('C:/Users/alice/My Documents'))
self.assertRaises(KeyError, p2.expanduser)
env['USERNAME'] = 'alice'
self.assertEqual(p2.expanduser(),
P('C:/Users/alice/My Documents'))
self.assertEqual(p3.expanduser(),
P('C:/Users/bob/My Documents'))
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
# Test the first lookup key in the env vars.
env['HOME'] = 'C:\\Users\\alice'
check()
# Test that HOMEPATH is available instead.
env.pop('HOME', None)
env['HOMEPATH'] = 'C:\\Users\\alice'
check()
env['HOMEDRIVE'] = 'C:\\'
env['HOMEPATH'] = 'Users\\alice'
check()
env.pop('HOMEDRIVE', None)
env.pop('HOMEPATH', None)
env['USERPROFILE'] = 'C:\\Users\\alice'
check()
class CompatiblePathTest(unittest.TestCase):
"""
Test that a type can be made compatible with PurePath
derivatives by implementing division operator overloads.
"""
class CompatPath:
"""
Minimum viable class to test PurePath compatibility.
Simply uses the division operator to join a given
string and the string value of another object with
a forward slash.
"""
def __init__(self, string):
self.string = string
def __truediv__(self, other):
return type(self)(f"{self.string}/{other}")
def __rtruediv__(self, other):
return type(self)(f"{other}/{self.string}")
def test_truediv(self):
result = pathlib.PurePath("test") / self.CompatPath("right")
self.assertIsInstance(result, self.CompatPath)
self.assertEqual(result.string, "test/right")
with self.assertRaises(TypeError):
# Verify improper operations still raise a TypeError
pathlib.PurePath("test") / 10
def test_rtruediv(self):
result = self.CompatPath("left") / pathlib.PurePath("test")
self.assertIsInstance(result, self.CompatPath)
self.assertEqual(result.string, "left/test")
with self.assertRaises(TypeError):
# Verify improper operations still raise a TypeError
10 / pathlib.PurePath("test")
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 4,477,674,600,441,387,500 | 39.507506 | 96 | 0.527636 | false |
cloudfoundry/php-buildpack | tests/test_newrelic.py | 1 | 12713 | import os
import os.path
import tempfile
import shutil
import json
from nose.tools import eq_
from nose.tools import with_setup
from build_pack_utils import utils
from common.integration import ErrorHelper
from common.components import BuildPackAssertHelper
from common.components import HttpdAssertHelper
from common.components import PhpAssertHelper
from common.components import NoWebServerAssertHelper
from common.components import NewRelicAssertHelper
from common.components import DownloadAssertHelper
from common.base import BaseCompileApp
newrelic = utils.load_extension('extensions/newrelic')
def create_manifest_file(manifest_filename,contents):
file = open(manifest_filename,'w+')
file.write(contents)
file.close()
class TestNewRelic(object):
def setUp(self):
self.manifest_dir = tempfile.mkdtemp()
self.buildpack_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
self.build_dir = tempfile.mkdtemp('build-')
self.php_dir = os.path.join(self.build_dir, 'php', 'etc')
os.makedirs(self.php_dir)
shutil.copy('defaults/config/php/7.3.x/php.ini', self.php_dir)
def tearDown(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
if os.path.exists(self.manifest_dir):
shutil.rmtree(self.manifest_dir)
def test_set_default_version(self):
manifest_filename = os.path.join(self.manifest_dir, 'manifest.yml')
create_manifest_file(manifest_filename, GOOD_MANIFEST)
# create the object with the buildpack manifest
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VM': 'php',
'BP_DIR': self.buildpack_dir
}))
eq_(True, 'NEWRELIC_VERSION' in nr._ctx.keys())
del nr._ctx['NEWRELIC_VERSION']
# and test it with our custom manifest
nr._set_default_version(manifest_filename)
eq_(True, 'NEWRELIC_VERSION' in nr._ctx.keys())
eq_(nr._ctx['NEWRELIC_VERSION'], '6.4.0.99')
def test_set_default_version_bad_manifest(self):
manifest_filename = os.path.join(self.manifest_dir, 'manifest.yml')
create_manifest_file(manifest_filename, BAD_MANIFEST)
# create the object with the buildpack manifest
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VM': 'php',
'BP_DIR': self.buildpack_dir
}))
# and test it with our custom manifest
exception = None
try:
nr._set_default_version(manifest_filename)
except RuntimeError as e:
exception = e
eq_("Error detecting NewRelic default version", str(exception))
def testDefaults(self):
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VM': 'php',
'BP_DIR': self.buildpack_dir
}))
eq_(True, 'NEWRELIC_HOST' in nr._ctx.keys())
eq_(True, 'NEWRELIC_VERSION' in nr._ctx.keys())
eq_(True, 'NEWRELIC_PACKAGE' in nr._ctx.keys())
eq_(True, 'NEWRELIC_DOWNLOAD_URL' in nr._ctx.keys())
eq_(True, 'NEWRELIC_STRIP' in nr._ctx.keys())
def testShouldNotInstall(self):
nr = newrelic.NewRelicInstaller(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'BP_DIR': self.buildpack_dir
}))
eq_(False, nr.should_install())
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstall(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'BP_DIR': self.buildpack_dir,
'NEWRELIC_LICENSE': 'JUNK_LICENSE',
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
#eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20170718', nr._php_extn_dir)
eq_(False, nr._php_zts)
#eq_('20170718', nr._php_api)
#eq_('@{HOME}/newrelic/agent/x64/newrelic-20170718.so', nr.newrelic_so)
eq_('app-name-1', nr.app_name)
eq_('JUNK_LICENSE', nr.license_key)
eq_('@{HOME}/logs/newrelic.log', nr.log_path)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.daemon_log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstallService(self):
ctx = utils.FormattedDict({
'BP_DIR': self.buildpack_dir,
'BUILD_DIR': self.build_dir,
'VCAP_SERVICES': {
'newrelic': [{
'name': 'newrelic',
'label': 'newrelic',
'tags': ['Monitoring'],
'plan': 'standard',
'credentials': {'licenseKey': 'LICENSE'}}]
},
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
#eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20170718',
# nr._php_extn_dir)
eq_(False, nr._php_zts)
#eq_('20170718', nr._php_api)
#eq_('@{HOME}/newrelic/agent/x64/newrelic-20170718.so', nr.newrelic_so)
eq_('app-name-1', nr.app_name)
eq_('LICENSE', nr.license_key)
eq_('@{HOME}/logs/newrelic.log', nr.log_path)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.daemon_log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testShouldInstallServiceAndManual(self):
ctx = utils.FormattedDict({
'BP_DIR': self.buildpack_dir,
'BUILD_DIR': self.build_dir,
'VCAP_SERVICES': {
'newrelic': [{
'name': 'newrelic',
'label': 'newrelic',
'tags': ['Monitoring'],
'plan': 'standard',
'credentials': {'licenseKey': 'LICENSE'}}]
},
'NEWRELIC_LICENSE': 'LICENSE2',
'VCAP_APPLICATION': {
'name': 'app-name-2'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
eq_(True, nr.should_install())
eq_('x64', nr._php_arch)
# TODO eq_('@{HOME}/php/lib/php/extensions/no-debug-non-zts-20170718',
#nr._php_extn_dir)
eq_(False, nr._php_zts)
# TODO eq_('20170718', nr._php_api)
#eq_('@{HOME}/newrelic/agent/x64/newrelic-20170718.so', nr.newrelic_so)
eq_('app-name-2', nr.app_name)
eq_('LICENSE2', nr.license_key)
eq_('@{HOME}/logs/newrelic.log', nr.log_path)
eq_('@{HOME}/logs/newrelic-daemon.log', nr.daemon_log_path)
eq_('@{HOME}/newrelic/daemon/newrelic-daemon.x64', nr.daemon_path)
eq_('@{HOME}/newrelic/daemon.sock', nr.socket_path)
eq_('@{HOME}/newrelic/daemon.pid', nr.pid_path)
@with_setup(setup=setUp, teardown=tearDown)
def testModifyPhpIni(self):
ctx = utils.FormattedDict({
'BP_DIR': self.buildpack_dir,
'BUILD_DIR': self.build_dir,
'NEWRELIC_LICENSE': 'JUNK_LICENSE',
'VCAP_APPLICATION': {
'name': 'app-name-1'
},
'PHP_VM': 'php'
})
nr = newrelic.NewRelicInstaller(ctx)
nr.modify_php_ini()
with open(os.path.join(self.php_dir, 'php.ini'), 'rt') as php_ini:
lines = php_ini.readlines()
eq_(True, lines.index('extension=%s\n' % nr.newrelic_so) >= 0)
eq_(True, lines.index('[newrelic]\n') >= 0)
eq_(True, lines.index('newrelic.license=@{NEWRELIC_LICENSE}\n') >= 0)
eq_(True, lines.index('newrelic.appname=%s\n' % nr.app_name) >= 0)
class TestNewRelicCompiled(BaseCompileApp):
def __init__(self):
self.app_name = 'app-1'
def setUp(self):
BaseCompileApp.setUp(self)
os.environ['NEWRELIC_LICENSE'] = 'JUNK_LICENSE'
os.environ['VCAP_APPLICATION'] = json.dumps({
'name': 'app-name-1'
})
def test_with_httpd_and_newrelic(self):
# helpers to confirm the environment
bp = BuildPackAssertHelper()
nr = NewRelicAssertHelper()
httpd = HttpdAssertHelper()
php = PhpAssertHelper()
# set web server to httpd, since that's what we're expecting here
self.opts.set_web_server('httpd')
# run the compile step of the build pack
output = ErrorHelper().compile(self.bp)
# confirm downloads
DownloadAssertHelper(3, 2).assert_downloads_from_output(output)
# confirm start script
bp.assert_start_script_is_correct(self.build_dir)
httpd.assert_start_script_is_correct(self.build_dir)
php.assert_start_script_is_correct(self.build_dir)
# confirm bp utils installed
bp.assert_scripts_are_installed(self.build_dir)
bp.assert_config_options(self.build_dir)
# check env & proc files
httpd.assert_contents_of_procs_file(self.build_dir)
httpd.assert_contents_of_env_file(self.build_dir)
php.assert_contents_of_procs_file(self.build_dir)
php.assert_contents_of_env_file(self.build_dir)
# webdir exists
httpd.assert_web_dir_exists(self.build_dir, self.opts.get_webdir())
# check php & httpd installed
httpd.assert_files_installed(self.build_dir)
php.assert_files_installed(self.build_dir)
nr.assert_files_installed(self.build_dir)
class TestNewRelicWithApp5(BaseCompileApp):
def __init__(self):
self.app_name = 'app-5'
def setUp(self):
BaseCompileApp.setUp(self)
os.environ['NEWRELIC_LICENSE'] = 'JUNK_LICENSE'
os.environ['VCAP_APPLICATION'] = json.dumps({
'name': 'app-name-1'
})
def test_standalone(self):
# helpers to confirm the environment
bp = BuildPackAssertHelper()
php = PhpAssertHelper()
none = NoWebServerAssertHelper()
nr = NewRelicAssertHelper()
# no web server
self.opts.set_web_server('none')
# run the compile step of the build pack
output = ErrorHelper().compile(self.bp)
# confirm downloads
DownloadAssertHelper(2, 1).assert_downloads_from_output(output)
# confirm httpd and nginx are not installed
none.assert_no_web_server_is_installed(self.build_dir)
# confirm start script
bp.assert_start_script_is_correct(self.build_dir)
php.assert_start_script_is_correct(self.build_dir)
# confirm bp utils installed
bp.assert_scripts_are_installed(self.build_dir)
# check env & proc files
none.assert_contents_of_procs_file(self.build_dir)
php.assert_contents_of_env_file(self.build_dir)
# webdir exists
none.assert_no_web_dir(self.build_dir, self.opts.get_webdir())
# check php cli installed
none.assert_files_installed(self.build_dir)
nr.assert_files_installed(self.build_dir)
BAD_MANIFEST = '''\
---
language: php
default_versions:
- name: newrelic
version: 99.3.0.161
dependencies:
- name: newrelic
version: 7.4.0.198
uri: https://download.newrelic.com/php_agent/archive/7.4.0.198/newrelic-php5-7.4.0.198-linux.tar.gz
cf_stacks:
- cflinuxfs3
sha256: 3640d3cad6b5199f54a6b54a627235d6
- name: newrelic
version: 6.4.0.99
uri: https://download.newrelic.com/php_agent/archive/6.4.0.99/newrelic-php5-6.4.0.99-linux.tar.gz
cf_stacks:
- cflinuxfs3
sha256: a5d5178f0f8133a65baf942a07408ba6
'''
GOOD_MANIFEST = '''\
---
language: php
default_versions:
- name: newrelic
version: 6.4.0.99
dependencies:
- name: newrelic
version: 7.4.0.198
uri: https://download.newrelic.com/php_agent/archive/7.4.0.198/newrelic-php5-7.4.0.198-linux.tar.gz
cf_stacks:
- cflinuxfs3
sha256: 3640d3cad6b5199f54a6b54a627235d6
- name: newrelic
version: 6.4.0.99
uri: https://download.newrelic.com/php_agent/archive/6.4.0.99/newrelic-php5-6.4.0.99-linux.tar.gz
cf_stacks:
- cflinuxfs3
sha256: a5d5178f0f8133a65baf942a07408ba6
'''
| apache-2.0 | 1,364,594,228,700,609,300 | 36.391176 | 101 | 0.602454 | false |
zyga/debian.plainbox | plainbox/impl/ctrl.py | 1 | 35608 | # This file is part of Checkbox.
#
# Copyright 2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.ctrl` -- Controller Classes
===============================================
Session controller classes implement the glue between models (jobs, whitelists,
session state) and the rest of the application. They encapsulate knowledge that
used to be special-cased and sprinkled around various parts of both plainbox
and particular plainbox-using applications.
Execution controllers are used by the :class:`~plainbox.impl.runner.JobRunner`
class to select the best method to execute a command of a particular job. This
is mostly applicable to jobs that need to run as another user, typically as
root, as the method that is used to effectively gain root differs depending on
circumstances.
"""
import abc
import contextlib
import grp
import itertools
import logging
import os
import posix
import tempfile
from plainbox.abc import IExecutionController
from plainbox.abc import IJobResult
from plainbox.abc import ISessionStateController
from plainbox.impl.depmgr import DependencyDuplicateError
from plainbox.impl.depmgr import DependencyMissingError
from plainbox.impl.job import JobDefinition
from plainbox.impl.job import JobOutputTextSource
from plainbox.impl.resource import ExpressionCannotEvaluateError
from plainbox.impl.resource import ExpressionFailedError
from plainbox.impl.resource import Resource
from plainbox.impl.secure.config import Unset
from plainbox.impl.secure.providers.v1 import Provider1
from plainbox.impl.secure.rfc822 import RFC822SyntaxError
from plainbox.impl.secure.rfc822 import gen_rfc822_records
from plainbox.impl.session.jobs import JobReadinessInhibitor
__all__ = [
'CheckBoxSessionStateController',
'RootViaPTL1ExecutionController',
'RootViaPkexecExecutionController',
'RootViaSudoExecutionController',
'UserJobExecutionController',
'checkbox_session_state_ctrl',
]
logger = logging.getLogger("plainbox.ctrl")
class CheckBoxSessionStateController(ISessionStateController):
"""
A combo controller for CheckBox-like jobs.
This controller implements the following features:
* A job may depend on another job, this is expressed via the 'depends'
attribute. Cyclic dependencies are not allowed. A job will become
inhibited if any of its dependencies have outcome other than
OUTCOME_PASS
* A job may require that a particular resource expression evaluates to
true. This is expressed via the 'requires' attribute. A job will
become inhibited if any of the requirement programs evaluates to
value other than True.
* A job may have the attribute 'plugin' equal to "local" which will
cause the controller to interpret the stdout of the command as a set
of job definitions.
* A job may have the attribute 'plugin' equal to "resource" which will
cause the controller to interpret the stdout of the command as a set
of resource definitions.
"""
def get_dependency_set(self, job):
"""
Get the set of direct dependencies of a particular job.
:param job:
A IJobDefinition instance that is to be visited
:returns:
set of pairs (dep_type, job_name)
Returns a set of pairs (dep_type, job_name) that describe all
dependencies of the specified job. The first element in the pair,
dep_type, is either DEP_TYPE_DIRECT or DEP_TYPE_RESOURCE. The second
element is the name of the job.
"""
direct = DependencyMissingError.DEP_TYPE_DIRECT
resource = DependencyMissingError.DEP_TYPE_RESOURCE
return set(itertools.chain(
zip(itertools.repeat(direct), job.get_direct_dependencies()),
zip(itertools.repeat(resource), job.get_resource_dependencies())))
def get_inhibitor_list(self, session_state, job):
"""
Get a list of readiness inhibitors that inhibit a particular job.
:param session_state:
A SessionState instance that is used to interrogate the
state of the session where it matters for a particular
job. Currently this is used to access resources and job
results.
:param job:
A JobDefinition instance
:returns:
List of JobReadinessInhibitor
"""
# Check if all job resource requirements are met
prog = job.get_resource_program()
inhibitors = []
if prog is not None:
try:
prog.evaluate_or_raise(session_state.resource_map)
except ExpressionCannotEvaluateError as exc:
# Lookup the related job (the job that provides the
# resources needed by the expression that cannot be
# evaluated)
related_job = session_state.job_state_map[
exc.expression.resource_name].job
# Add A PENDING_RESOURCE inhibitor as we are unable to
# determine if the resource requirement is met or not. This
# can happen if the resource job did not ran for any reason
# (it can either be prevented from running by normal means
# or simply be on the run_list but just was not executed
# yet).
inhibitor = JobReadinessInhibitor(
cause=JobReadinessInhibitor.PENDING_RESOURCE,
related_job=related_job,
related_expression=exc.expression)
inhibitors.append(inhibitor)
except ExpressionFailedError as exc:
# Lookup the related job (the job that provides the
# resources needed by the expression that failed)
related_job = session_state.job_state_map[
exc.expression.resource_name].job
# Add a FAILED_RESOURCE inhibitor as we have all the data
# to run the requirement program but it simply returns a
# non-True value. This typically indicates a missing
# software package or necessary hardware.
inhibitor = JobReadinessInhibitor(
cause=JobReadinessInhibitor.FAILED_RESOURCE,
related_job=related_job,
related_expression=exc.expression)
inhibitors.append(inhibitor)
# Check if all job dependencies ran successfully
for dep_name in sorted(job.get_direct_dependencies()):
dep_job_state = session_state.job_state_map[dep_name]
# If the dependency did not have a chance to run yet add the
# PENDING_DEP inhibitor.
if dep_job_state.result.outcome == IJobResult.OUTCOME_NONE:
inhibitor = JobReadinessInhibitor(
cause=JobReadinessInhibitor.PENDING_DEP,
related_job=dep_job_state.job)
inhibitors.append(inhibitor)
# If the dependency is anything but successful add the
# FAILED_DEP inhibitor. In theory the PENDING_DEP code above
# could be discarded but this would loose context and would
# prevent the operator from actually understanding why a job
# cannot run.
elif dep_job_state.result.outcome != IJobResult.OUTCOME_PASS:
inhibitor = JobReadinessInhibitor(
cause=JobReadinessInhibitor.FAILED_DEP,
related_job=dep_job_state.job)
inhibitors.append(inhibitor)
return inhibitors
def observe_result(self, session_state, job, result):
"""
Notice the specified test result and update readiness state.
:param session_state:
A SessionState object
:param job:
A JobDefinition object
:param result:
A IJobResult object
This function updates the internal result collection with the data from
the specified test result. Results can safely override older results.
Results also change the ready map (jobs that can run) because of
dependency relations.
Some results have deeper meaning, those are results for local and
resource jobs. They are discussed in detail below:
Resource jobs produce resource records which are used as data to run
requirement expressions against. Each time a result for a resource job
is presented to the session it will be parsed as a collection of RFC822
records. A new entry is created in the resource map (entirely replacing
any old entries), with a list of the resources that were parsed from
the IO log.
Local jobs produce more jobs. Like with resource jobs, their IO log is
parsed and interpreted as additional jobs. Unlike in resource jobs
local jobs don't replace anything. They cannot replace an existing job
with the same name.
"""
# Store the result in job_state_map
session_state.job_state_map[job.name].result = result
session_state.on_job_state_map_changed()
session_state.on_job_result_changed(job, result)
# Treat some jobs specially and interpret their output
if job.plugin == "resource":
self._process_resource_result(session_state, job, result)
elif job.plugin == "local":
self._process_local_result(session_state, job, result)
def _process_resource_result(self, session_state, job, result):
"""
Analyze a result of a CheckBox "resource" job and generate
or replace resource records.
"""
new_resource_list = []
for record in gen_rfc822_records_from_io_log(job, result):
# XXX: Consider forwarding the origin object here. I guess we
# should have from_frc822_record as with JobDefinition
resource = Resource(record.data)
logger.info("Storing resource record %r: %s", job.name, resource)
new_resource_list.append(resource)
# Replace any old resources with the new resource list
session_state.set_resource_list(job.name, new_resource_list)
def _process_local_result(self, session_state, job, result):
"""
Analyze a result of a CheckBox "local" job and generate
additional job definitions
"""
# First parse all records and create a list of new jobs (confusing
# name, not a new list of jobs)
new_job_list = []
for record in gen_rfc822_records_from_io_log(job, result):
new_job = job.create_child_job_from_record(record)
new_job_list.append(new_job)
# Then for each new job, add it to the job_list, unless it collides
# with another job with the same name.
for new_job in new_job_list:
try:
added_job = session_state.add_job(new_job, recompute=False)
except DependencyDuplicateError as exc:
# XXX: there should be a channel where such errors could be
# reported back to the UI layer. Perhaps update_job_result()
# could simply return a list of problems in a similar manner
# how update_desired_job_list() does.
logger.warning(
("Local job %s produced job %r that collides with"
" an existing job %s (from %s), the new job was"
" discarded"),
job, exc.duplicate_job, exc.job, exc.job.origin)
else:
# Patch the origin of the existing job so that it traces
# back to the job that "generated" it again. This is
# basically required to get __category__ jobs to associate
# themselves with their children.
if added_job is not new_job:
added_job.update_origin(new_job.origin)
def gen_rfc822_records_from_io_log(job, result):
"""
Convert io_log from a job result to a sequence of rfc822 records
"""
logger.debug("processing output from a job: %r", job)
# Select all stdout lines from the io log
line_gen = (record[2].decode('UTF-8', errors='replace')
for record in result.get_io_log()
if record[1] == 'stdout')
# Allow the generated records to be traced back to the job that defined
# the command which produced (printed) them.
source = JobOutputTextSource(job)
try:
# Parse rfc822 records from the subsequent lines
for record in gen_rfc822_records(line_gen, source=source):
yield record
except RFC822SyntaxError as exc:
# When this exception happens we will _still_ store all the
# preceding records. This is worth testing
logger.warning(
"local script %s returned invalid RFC822 data: %s",
job, exc)
checkbox_session_state_ctrl = CheckBoxSessionStateController()
class SymLinkNest:
"""
A class for setting up a control directory with symlinked executables
"""
def __init__(self, dirname):
self._dirname = dirname
def add_provider(self, provider):
"""
Add all of the executables associated a particular provider
:param provider:
A Provider1 instance
"""
for filename in provider.get_all_executables():
self.add_executable(filename)
def add_executable(self, filename):
"""
Add a executable to the control directory
"""
logger.debug(
"Adding executable %s to nest %s",
filename, self._dirname)
os.symlink(
filename, os.path.join(
self._dirname, os.path.basename(filename)))
class CheckBoxExecutionController(IExecutionController):
"""
Base class for checkbox-like execution controllers.
This abstract class provides common features for all checkbox execution
controllers.
"""
def __init__(self, session_dir, provider_list):
"""
Initialize a new CheckBoxExecutionController
:param session_dir:
Base directory of the session this job will execute in.
This directory is used to co-locate some data that is unique to
this execution as well as data that is shared by all executions.
:param provider_list:
A list of Provider1 objects that will be available for script
dependency resolutions. Currently all of the scripts are makedirs
available but this will be refined to the minimal set later.
"""
self._session_dir = session_dir
self._provider_list = provider_list
def execute_job(self, job, config, extcmd_popen):
"""
Execute the specified job using the specified subprocess-like object
:param job:
The JobDefinition to execute
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. It is used to
provide values for missing environment variables that are required
by the job (as expressed by the environ key in the job definition
file).
:param extcmd_popen:
A subprocess.Popen like object
:returns:
The return code of the command, as returned by subprocess.call()
"""
# CHECKBOX_DATA is where jobs can share output.
# It has to be an directory that scripts can assume exists.
if not os.path.isdir(self.CHECKBOX_DATA):
os.makedirs(self.CHECKBOX_DATA)
# Setup the executable nest directory
with self.configured_filesystem(job, config) as nest_dir:
# Get the command and the environment.
# of this execution controller
cmd = self.get_execution_command(job, config, nest_dir)
env = self.get_execution_environment(job, config, nest_dir)
# run the command
logger.debug("job[%s] executing %r with env %r",
job.name, cmd, env)
return extcmd_popen.call(cmd, env=env)
@contextlib.contextmanager
def configured_filesystem(self, job, config):
"""
Context manager for handling filesystem aspects of job execution.
:param job:
The JobDefinition to execute
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. It is used to
provide values for missing environment variables that are required
by the job (as expressed by the environ key in the job definition
file).
:returns:
Pathname of the executable symlink nest directory.
"""
# Create a nest for all the private executables needed for execution
prefix = 'nest-'
suffix = '.{}'.format(job.checksum)
with tempfile.TemporaryDirectory(suffix, prefix) as nest_dir:
logger.debug("Symlink nest for executables: %s", nest_dir)
nest = SymLinkNest(nest_dir)
# Add all providers executables to PATH
for provider in self._provider_list:
nest.add_provider(provider)
logger.debug("Symlink nest for executables: %s", nest_dir)
yield nest_dir
def get_score(self, job):
"""
Compute how applicable this controller is for the specified job.
:returns:
A numeric score, or None if the controller cannot run this job.
The higher the value, the more applicable this controller is.
"""
if isinstance(job, JobDefinition):
return self.get_checkbox_score(job)
else:
return -1
@abc.abstractmethod
def get_checkbox_score(self, job):
"""
Compute how applicable this controller is for the specified job.
The twist is that it is always a checkbox job definition so we can be
more precise.
:returns:
A number that specifies how applicable this controller is for the
specified job (the higher the better) or None if it cannot be used
at all
"""
@abc.abstractmethod
def get_execution_command(self, job, config, nest_dir):
"""
Get the command to execute the specified job
:param job:
job definition with the command and environment definitions
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. It is used to
provide values for missing environment variables that are required
by the job (as expressed by the environ key in the job definition
file).
:param nest_dir:
A directory with a nest of symlinks to all executables required to
execute the specified job. This argument may or may not be used,
depending on how PATH is passed to the command (via environment or
via the commant line)
:returns:
List of command arguments
"""
def get_execution_environment(self, job, config, nest_dir):
"""
Get the environment required to execute the specified job:
:param job:
job definition with the command and environment definitions
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. It is used to
provide values for missing environment variables that are required
by the job (as expressed by the environ key in the job definition
file).
:param nest_dir:
A directory with a nest of symlinks to all executables required to
execute the specified job. This argument may or may not be used,
depending on how PATH is passed to the command (via environment or
via the commant line)
:return:
dictionary with the environment to use.
This returned environment has additional PATH, PYTHONPATH entries. It
also uses fixed LANG so that scripts behave as expected. Lastly it
sets CHECKBOX_SHARE and CHECKBOX_DATA that may be required by some
scripts.
"""
# Get a proper environment
env = dict(os.environ)
# Use non-internationalized environment
env['LANG'] = 'C.UTF-8'
if 'LANGUAGE' in env:
del env['LANGUAGE']
for name in list(env.keys()):
if name.startswith("LC_"):
del env[name]
# Use PATH that can lookup checkbox scripts
if job.provider.extra_PYTHONPATH:
env['PYTHONPATH'] = os.pathsep.join(
[job.provider.extra_PYTHONPATH]
+ env.get("PYTHONPATH", "").split(os.pathsep))
# Inject nest_dir into PATH
env['PATH'] = os.pathsep.join(
[nest_dir]
+ env.get("PATH", "").split(os.pathsep))
# Add CHECKBOX_SHARE that is needed by one script
env['CHECKBOX_SHARE'] = job.provider.CHECKBOX_SHARE
# Add CHECKBOX_DATA (temporary checkbox data)
env['CHECKBOX_DATA'] = self.CHECKBOX_DATA
# Inject additional variables that are requested in the config
if config is not None and config.environment is not Unset:
for env_var in config.environment:
# Don't override anything that is already present in the
# current environment. This will allow users to customize
# variables without editing any config files.
if env_var in env:
continue
# If the environment section of the configuration file has a
# particular variable then copy it over.
env[env_var] = config.environment[env_var]
return env
@property
def CHECKBOX_DATA(self):
"""
value of the CHECKBOX_DATA environment variable.
This variable names a sub-directory of the session directory
where jobs can share data between invocations.
"""
return os.path.join(self._session_dir, "CHECKBOX_DATA")
class UserJobExecutionController(CheckBoxExecutionController):
"""
An execution controller that works for jobs invoked as the current user.
"""
def get_execution_command(self, job, config, nest_dir):
"""
Get the command to execute the specified job
:param job:
job definition with the command and environment definitions
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. Ignored.
:param nest_dir:
A directory with a nest of symlinks to all executables required to
execute the specified job. Ingored.
:returns:
List of command arguments
This basically returns ['bash', '-c', job.command]
"""
return ['bash', '-c', job.command]
def get_checkbox_score(self, job):
"""
Compute how applicable this controller is for the specified job.
:returns:
one for jobs without a user override, -1 otherwise
"""
if job.user is None:
return 1
else:
return -1
class CheckBoxDifferentialExecutionController(CheckBoxExecutionController):
"""
A CheckBoxExecutionController subclass that uses differential environment.
This special subclass has a special :meth:`get_execution_environment()`
method that always returns None. Instead the new method
:meth:`get_differential_execution_environment()` that returns the
difference between the target environment and the current environment.
"""
def get_differential_execution_environment(self, job, config, nest_dir):
"""
Get the environment required to execute the specified job:
:param job:
job definition with the command and environment definitions
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. It is used to
provide values for missing environment variables that are required
by the job (as expressed by the environ key in the job definition
file).
:param nest_dir:
A directory with a nest of symlinks to all executables required to
execute the specified job. This is simply passed to
:meth:`get_execution_environment()` directly.
:returns:
Differential environment (see below).
This implementation computes the desired environment (as it was
computed in the base class) and then discards all of the environment
variables that are identical in both sets. The exception are variables
that are mentioned in
:meth:`plainbox.impl.job.JobDefinition.get_environ_settings()` which
are always retained.
"""
base_env = os.environ
target_env = super().get_execution_environment(job, config, nest_dir)
return {
key: value
for key, value in target_env.items()
if key not in base_env or base_env[key] != value
or key in job.get_environ_settings()
}
def get_execution_environment(self, job, config, nest_dir):
"""
Get the environment required to execute the specified job:
:param job:
job definition with the command and environment definitions.
Ignored.
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. Ignored.
:param nest_dir:
A directory with a nest of symlinks to all executables required to
execute the specified job. Ignored.
:returns:
None
This implementation always returns None since the environment is always
passed in via :meth:`get_execution_command()`
"""
return None
class RootViaPTL1ExecutionController(CheckBoxDifferentialExecutionController):
"""
Execution controller that gains root using plainbox-trusted-launcher-1
"""
def get_execution_command(self, job, config, nest_dir):
"""
Get the command to invoke.
:param job:
job definition with the command and environment definitions
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. Passed to
:meth:`get_differential_execution_environment()`.
:param nest_dir:
A directory with a nest of symlinks to all executables required to
execute the specified job. Passed to
:meth:`get_differential_execution_environment()`.
This overridden implementation returns especially crafted command that
uses pkexec to run the plainbox-trusted-launcher-1 as the desired user
(typically root). It passes the checksum of the job definition as
argument, along with all of the required environment key-value pairs.
If a job is generated it also passes the special via attribute to let
the trusted launcher discover the generated job. Currently it supports
at most one-level of generated jobs.
"""
# Run plainbox-trusted-launcher-1 as the required user
cmd = ['pkexec', '--user', job.user, 'plainbox-trusted-launcher-1',
'--hash', job.checksum]
# Append all environment data
env = self.get_differential_execution_environment(
job, config, nest_dir)
cmd += ["{key}={value}".format(key=key, value=value)
for key, value in sorted(env.items())]
# Append the --via flag for generated jobs
if job.via is not None:
cmd += ['--via', job.via]
return cmd
def get_checkbox_score(self, job):
"""
Compute how applicable this controller is for the specified job.
:returns:
two for jobs with an user override that can be invoked by the
trusted launcher, zero for jobs without an user override that can
be invoked by the trusted launcher, -1 otherwise
"""
# Only works with jobs coming from the Provider1 instance
if not isinstance(job.provider, Provider1):
return -1
# Only works with jobs loaded from the secure PROVIDERPATH
if not job.provider.secure:
return -1
# Only makes sense with jobs that need to run as another user
if job.user is not None:
return 2
else:
return 0
class RootViaPkexecExecutionController(
CheckBoxDifferentialExecutionController):
"""
Execution controller that gains root by using pkexec.
This controller should be used for jobs that need root but cannot be
executed by the plainbox-trusted-launcher-1. This happens whenever the job
is not in the system-wide provider location.
In practice it is used when working with the special
'checkbox-in-source-tree' provider as well as for jobs that need to run as
root from the non-system-wide location.
"""
def get_execution_command(self, job, config, nest_dir):
"""
Get the command to invoke.
:param job:
job definition with the command and environment definitions
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. Passed to
:meth:`get_differential_execution_environment()`.
:param nest_dir:
A directory with a nest of symlinks to all executables required to
execute the specified job. Passed to
:meth:`get_differential_execution_environment()`.
Since we cannot pass environment in the ordinary way while using
pkexec(1) (pkexec starts new processes in a sanitized, pristine,
environment) we're relying on env(1) to pass some of the environment
variables that we require.
"""
# Run env(1) as the required user
cmd = ['pkexec', '--user', job.user, 'env']
# Append all environment data
env = self.get_differential_execution_environment(
job, config, nest_dir)
cmd += ["{key}={value}".format(key=key, value=value)
for key, value in sorted(env.items())]
# Lastly use bash -c, to run our command
cmd += ['bash', '-c', job.command]
return cmd
def get_checkbox_score(self, job):
"""
Compute how applicable this controller is for the specified job.
:returns:
one for jobs with a user override, zero otherwise
"""
if job.user is not None:
return 1
else:
return 0
class RootViaSudoExecutionController(
CheckBoxDifferentialExecutionController):
"""
Execution controller that gains root by using sudo.
This controller should be used for jobs that need root but cannot be
executed by the plainbox-trusted-launcher-1.
This happens whenever the job is not in the system-wide provider location.
In practice it is used when working with the special
'checkbox-in-source-tree' provider as well as for jobs that need to run as
root from the non-system-wide location.
Using this controller is preferable to pkexec if running on command line as
unlike pkexec, it retains 'memory' and doesn't ask for the password over
and over again.
"""
def __init__(self, session_dir, provider_list):
"""
Initialize a new RootViaSudoExecutionController
:param session_dir:
Base directory of the session this job will execute in.
This directory is used to co-locate some data that is unique to
this execution as well as data that is shared by all executions.
"""
super().__init__(session_dir, provider_list)
# Check if the user can use 'sudo' on this machine. This check is a bit
# Ubuntu specific and can be wrong due to local configuration but
# without a better API all we can do is guess.
#
# Shamelessly stolen from command-not-found
try:
in_sudo_group = grp.getgrnam("sudo").gr_gid in posix.getgroups()
except KeyError:
in_sudo_group = False
try:
in_admin_group = grp.getgrnam("admin").gr_gid in posix.getgroups()
except KeyError:
in_admin_group = False
self.user_can_sudo = in_sudo_group or in_admin_group
def get_execution_command(self, job, config, nest_dir):
"""
Get the command to invoke.
:param job:
job definition with the command and environment definitions
:param config:
A PlainBoxConfig instance which can be used to load missing
environment definitions that apply to all jobs. Ignored.
:param nest_dir:
A directory with a nest of symlinks to all executables required to
execute the specified job. Ingored.
Since we cannot pass environment in the ordinary way while using
sudo(8) (even passing -E doesn't get us everything due to security
features built into sudo itself) we're relying on env(1) to pass some
of the environment variables that we require.
"""
# Run env(1) as the required user
cmd = ['sudo', '-u', job.user, 'env']
# Append all environment data
env = self.get_differential_execution_environment(
job, config, nest_dir)
cmd += ["{key}={value}".format(key=key, value=value)
for key, value in sorted(env.items())]
# Lastly use bash -c, to run our command
cmd += ['bash', '-c', job.command]
return cmd
def get_checkbox_score(self, job):
"""
Compute how applicable this controller is for the specified job.
:returns:
-1 if the job does not have a user override or the user cannot use
sudo and 2 otherwise
"""
# Only makes sense with jobs that need to run as another user
if job.user is not None and self.user_can_sudo:
return 2
else:
return -1
| gpl-3.0 | -7,808,282,389,679,753,000 | 40.891765 | 79 | 0.636711 | false |
mcaleavya/bcc | examples/tracing/stack_buildid_example.py | 1 | 3105 | #!/usr/bin/python
#
# An example usage of stack_build_id
# Most of the code here is borrowed from tools/profile.py
#
# Steps for using this code
# 1) Start ping program in one terminal eg invocation: ping google.com -i0.001
# 2) Change the path of libc specified in b.add_module() below
# 3) Invoke the script as 'python stack_buildid_example.py'
# 4) o/p of the tool is as shown below
# python example/tracing/stack_buildid_example.py
# sendto
# - ping (5232)
# 2
#
# REQUIRES: Linux 4.17+ (BPF_BUILD_ID support)
# Licensed under the Apache License, Version 2.0 (the "License")
# 03-Jan-2019 Vijay Nag
from __future__ import print_function
from bcc import BPF, PerfType, PerfSWConfig
from sys import stderr
from time import sleep
import argparse
import signal
import os
import subprocess
import errno
import multiprocessing
import ctypes as ct
def Get_libc_path():
# A small helper function that returns full path
# of libc in the system
cmd = 'cat /proc/self/maps | grep libc | awk \'{print $6}\' | uniq'
output = subprocess.check_output(cmd, shell=True)
if not isinstance(output, str):
output = output.decode()
return output.split('\n')[0]
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <uapi/linux/bpf_perf_event.h>
#include <linux/sched.h>
struct key_t {
u32 pid;
int user_stack_id;
char name[TASK_COMM_LEN];
};
BPF_HASH(counts, struct key_t);
BPF_STACK_TRACE_BUILDID(stack_traces, 128);
int do_perf_event(struct bpf_perf_event_data *ctx) {
u32 pid = bpf_get_current_pid_tgid() >> 32;
// create map key
struct key_t key = {.pid = pid};
bpf_get_current_comm(&key.name, sizeof(key.name));
key.user_stack_id = stack_traces.get_stackid(&ctx->regs, BPF_F_USER_STACK);
if (key.user_stack_id >= 0) {
counts.increment(key);
}
return 0;
}
"""
b = BPF(text=bpf_text)
b.attach_perf_event(ev_type=PerfType.SOFTWARE,
ev_config=PerfSWConfig.CPU_CLOCK, fn_name="do_perf_event",
sample_period=0, sample_freq=49, cpu=0)
# Add the list of libraries/executables to the build sym cache for sym resolution
# Change the libc path if it is different on a different machine.
# libc.so and ping are added here so that any symbols pertaining to
# libc or ping are resolved. More executables/libraries can be added here.
b.add_module(Get_libc_path())
b.add_module("/usr/sbin/sshd")
b.add_module("/bin/ping")
counts = b.get_table("counts")
stack_traces = b.get_table("stack_traces")
duration = 2
def signal_handler(signal, frame):
print()
try:
sleep(duration)
except KeyboardInterrupt:
# as cleanup can take some time, trap Ctrl-C:
signal.signal(signal.SIGINT, signal_ignore)
user_stack=[]
for k,v in sorted(counts.items(), key=lambda counts: counts[1].value):
user_stack = [] if k.user_stack_id < 0 else \
stack_traces.walk(k.user_stack_id)
user_stack=list(user_stack)
for addr in user_stack:
print(" %s" % b.sym(addr, k.pid).decode('utf-8', 'replace'))
print(" %-16s %s (%d)" % ("-", k.name.decode('utf-8', 'replace'), k.pid))
print(" %d\n" % v.value)
| apache-2.0 | 8,691,926,209,330,545,000 | 28.571429 | 81 | 0.681804 | false |
BhattiMarry/AddressBook | tests/tests.py | 1 | 2102 | import unittest
from AddressBook import AddressBook
from AddressBookHandler import AddressBookHandler
class AddressBookTests(unittest.TestCase):
def setUp(self):
self.obj = AddressBookHandler()
def test_address_book_object(self):
assert not AddressBook("Umair", "Bhatti", "GHQ", "[email protected]", "5001676", "Human") == None
def test_add_group(self):
self.obj.add_group('Human')
def test_add_person(self):
self.obj.add_person("Jawad", "Aslam", "PDK", "[email protected]", "5519726", "Human")
assert self.obj.count_persons() > 0
self.obj.add_person("Umair", "Bhatti", "GHQ", "[email protected]", "5001676", "Human")
assert self.obj.count_persons() == 2
def test_get_address_book(self):
assert type(self.obj.get_address_book()) == list
def test_ptrint_short_info(self):
allPersons = self.obj.get_address_book()
if len(allPersons) > 0:
self.obj.print_short_info(allPersons[0])
def test_get_persons_in_group(self):
allGroups = self.obj.get_all_groups()
if len(allGroups) > 0:
self.obj.get_persons_in_group(self.obj.get_all_groups()[0])
else:
self.obj.get_persons_in_group("Default")
def test_print_person_info(self):
allPersons = self.obj.get_address_book()
if len(allPersons) > 0:
self.obj.print_person_info(allPersons[0])
def test_get_group_info_of_person(self):
allPersons = self.obj.get_address_book()
if len(allPersons) > 0:
self.obj.get_group_info_of_person(allPersons[0].fname)
def test_get_person_info_by_name(self):
allPersons = self.obj.get_address_book()
if len(allPersons) > 0:
self.obj.get_person_info_by_name(allPersons[0].fname)
def test_get_person_info_by_email(self):
allPersons = self.obj.get_address_book()
if len(allPersons) > 0:
self.obj.get_person_info_by_name(allPersons[0].email)
def tearDown(self):
self.obj = None
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 239,645,535,401,772,380 | 33.459016 | 103 | 0.621313 | false |
BriData/DBus | dbus-mongo-extractor/tests/test_doc_manager_base.py | 1 | 10440 | # Copyright 2017 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path[0:0] = [""]
from mongo_connector.connector import (get_mininum_mongodb_version,
update_mininum_mongodb_version)
from mongo_connector.doc_managers.doc_manager_base import (DocManagerBase,
UpdateDoesNotApply)
from mongo_connector.test_utils import TESTARGS
from mongo_connector.version import Version
from tests import unittest
UPDATE_SUCCESS_TEST_CASES = [
{
"description": "Update whole document.",
"doc": {"a": 1},
"update_spec": {"a": 2, "b": 3},
"result": {"a": 2, "b": 3}
},
{
"description": "Update by un-setting an existing field.",
"doc": {"a": 1, "b": 2},
"update_spec": {"$unset": {"a": True}},
"result": {"b": 2}
},
{
"description": "Update by un-setting an existing nested field.",
"doc": {"a": {"b": 2, "c": 3}},
"update_spec": {"$unset": {"a.b": True}},
"result": {"a": {"c": 3}}
},
{
"description": "Update by un-setting an array element.",
"doc": {"a": 1, "b": [0, 1, 2, 3]},
"update_spec": {"$unset": {"b.1": True}},
"result": {"a": 1, "b": [0, None, 2, 3]}
},
{
"description": "Update by adding a field.",
"doc": {"a": 1},
"update_spec": {"$set": {"b": [{"c": 1}, {"d": 2}]}},
"result": {"a": 1, "b": [{"c": 1}, {"d": 2}]}
},
{
"description": "Update by adding a nested field.",
"doc": {"a": 1},
"update_spec": {"$set": {"b.c.d": 2}},
"result": {"a": 1, "b": {"c": {"d": 2}}}
},
{
"description": "Update by adding and removing a fields.",
"doc": {"a": 1, "c": 3},
"update_spec": {"$unset": {"a": True, "c": True},
"$set": {"b": 2, "d": 4}},
"result": {"b": 2, "d": 4}
},
{
"description": "Update by setting an element far beyond the end of"
"an array.",
"doc": {"a": 1, "b": [{"c": 1}]},
"update_spec": {"$set": {"b.4": {"c": 2}}},
"result": {"a": 1, "b": [{"c": 1}, None, None, None, {"c": 2}]}
},
{
"description": "Update by setting an element right beyond the end "
"of an array.",
"doc": {"a": 1, "b": [{"c": 1}]},
"update_spec": {"$set": {"b.1": {"c": 2}}},
"result": {"a": 1, "b": [{"c": 1}, {"c": 2}]}
},
{
"description": "Update by setting an attribute of a sub-document far "
"beyond the end of an array.",
"doc": {"a": 1, "b": [{"c": 1}]},
"update_spec": {"$set": {"b.4.c": 2}},
"result": {"a": 1, "b": [{"c": 1}, None, None, None, {"c": 2}]}
},
{
"description": "Update by setting an attribute of a sub-document "
"right beyond the end of an array.",
"doc": {"a": 1, "b": [{"c": 1}]},
"update_spec": {"$set": {"b.1.c": 2}},
"result": {"a": 1, "b": [{"c": 1}, {"c": 2}]}
},
{
"description": "Update by changing a field within an array element.",
"doc": {"a": 1, "b": [{"c": 1}]},
"update_spec": {"$set": {"b.0.c": 2}},
"result": {"a": 1, "b": [{"c": 2}]}
},
{
"description": "Update by adding a field within an array element.",
"doc": {"a": 1, "b": [{"c": 1}, {"d": 2}]},
"update_spec": {"$set": {"b.1.c": 3}},
"result": {"a": 1, "b": [{"c": 1}, {"c": 3, "d": 2}]}
},
{
"description": "Update by replacing an array element.",
"doc": {"a": 1, "b": [0, 1, 2, 3]},
"update_spec": {"$set": {"b.2": {"new": 2}}},
"result": {"a": 1, "b": [0, 1, {"new": 2}, 3]}
},
{
"description": "Update by replacing an array with a document with "
"int field.",
"doc": {"a": 1, "b": [{"c": 1}, {"d": 2}]},
"update_spec": {"$set": {"b": {"0": {"e": 100}}}},
"result": {"a": 1, "b": {"0": {"e": 100}}}
},
]
UNSET_FAILURE_TEST_CASES = [
{
"description": "Update by un-setting a non-existent field.",
"doc": {"a": 1, "b": 2},
"update_spec": {"$unset": {"not-present": True}},
"result": {"a": 1, "b": 2}
},
{
"description": "Update by un-setting a non-existent nested field.",
"doc": {"a": 1, "b": {"c": {"d": 1}}},
"update_spec": {"$unset": {"b.not-present.foo": True}},
"result": {"a": 1, "b": {"c": {"d": 1}}}
},
{
"description": "Update by un-setting invalid array index.",
"doc": {"a": 1, "b": [0, 1, 2, 3]},
"update_spec": {"$unset": {"b.not-an-index": True}},
"result": {"a": 1, "b": [0, 1, 2, 3]}
},
{
"description": "Update by un-setting invalid nested array index.",
"doc": {"a": 1, "b": [0, 1, 2, 3]},
"update_spec": {"$unset": {"b.not-an-index.not-present": True}},
"result": {"a": 1, "b": [0, 1, 2, 3]}
},
{
"description": "Update by un-setting a non-existent array element.",
"doc": {"a": 1, "b": [0, 1, 2]},
"update_spec": {"$unset": {"b.4": True}},
"result": {"a": 1, "b": [0, 1, 2]}
},
{
"description": "Update by un-setting a non-existent field in an array"
"element.",
"doc": {"a": 1, "b": [0, {"c": 1}, 2]},
"update_spec": {"$unset": {"b.1.not-present": True}},
"result": {"a": 1, "b": [0, {"c": 1}, 2]}
},
{
"description": "Update by adding and removing a non-existent field.",
"doc": {"a": 1},
"update_spec": {"$unset": {"a": True, "not-present": True},
"$set": {"b": 2}},
"result": {"b": 2}
}
]
UPDATE_FAILURE_TEST_CASES = [
{
"description": "Using array notation on non-array field.",
"doc": {"a": 1},
"update_spec": {"$set": {"a.0": 2}}
},
{
"description": "Using array notation on non-array field.",
"doc": {"a": 1},
"update_spec": {"$set": {"a.0.1": 2}}
},
{
"description": "Using nested field notation on non-object.",
"doc": {"a": 1},
"update_spec": {"$set": {"a.b": 2}}
},
{
"description": "Using deeply nested field notation on non-object.",
"doc": {"a": 1},
"update_spec": {"$set": {"a.b.c.b": 2}}
},
{
"description": "Setting a field on an array field.",
"doc": {"a": [{"c": 1}, {"c": 2}]},
"update_spec": {"$set": {"a.c": 2}}
},
{
"description": "Setting a field on a null array element.",
"doc": {"a": [None, None]},
"update_spec": {"$set": {"a.0.c": 2}}
},
]
class TestDocManagerBase(unittest.TestCase):
"""Unit tests for DocManagerBase"""
def setUp(self):
self.base = DocManagerBase()
def assertUpdateTestSucceeds(self, test):
self.assertEqual(
self.base.apply_update(test["doc"], test["update_spec"]),
test["result"], msg=test["description"])
def assertUpdateTestFails(self, test):
try:
doc = self.base.apply_update(test["doc"], test["update_spec"])
self.fail(
"UpdateDoesNotApply on MongoDB verison %s not raised for "
"test: %s, applied %r to %r and got %r" % (
get_mininum_mongodb_version(), test["description"],
test["update_spec"], test["doc"], doc))
except UpdateDoesNotApply:
pass
def test_apply_update(self):
for test in UPDATE_SUCCESS_TEST_CASES:
self.assertUpdateTestSucceeds(test)
def test_apply_update_fails(self):
for test in UPDATE_FAILURE_TEST_CASES:
self.assertUpdateTestFails(test)
def test_apply_update_unset_failures(self):
# Reset the minimum MongoDB version at the start and end.
update_mininum_mongodb_version(None)
for mock_mongodb_version in [(3, 4), (3, 2), (3, 0), (2, 6), (2, 4),
None]:
if mock_mongodb_version is None:
update_mininum_mongodb_version(None)
else:
update_mininum_mongodb_version(Version(*mock_mongodb_version))
for test in UNSET_FAILURE_TEST_CASES:
if mock_mongodb_version == (2, 4):
self.assertUpdateTestSucceeds(test)
else:
self.assertUpdateTestFails(test)
def test_bulk_upsert(self):
with self.assertRaises(NotImplementedError):
self.base.bulk_upsert([{}], *TESTARGS)
def test_update(self):
with self.assertRaises(NotImplementedError):
self.base.update({}, {}, *TESTARGS)
def test_upsert(self):
with self.assertRaises(NotImplementedError):
self.base.upsert({}, *TESTARGS)
def test_remove(self):
with self.assertRaises(NotImplementedError):
self.base.remove(1, *TESTARGS)
def test_insert_file(self):
with self.assertRaises(NotImplementedError):
self.base.insert_file(None, *TESTARGS)
def test_handle_command(self):
with self.assertRaises(NotImplementedError):
self.base.handle_command({}, *TESTARGS)
def test_search(self):
with self.assertRaises(NotImplementedError):
self.base.search(0, 1)
def test_commit(self):
with self.assertRaises(NotImplementedError):
self.base.commit()
def test_get_last_doc(self):
with self.assertRaises(NotImplementedError):
self.base.get_last_doc()
def test_stop(self):
with self.assertRaises(NotImplementedError):
self.base.stop()
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 688,907,162,444,742,700 | 34.389831 | 78 | 0.48908 | false |
maschwanden/boxsimu | boxsimu/visualize.py | 1 | 24184 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 13 15:57:03 2017
@author: Mathias Aschwanden ([email protected])
"""
import os
import re
import copy
import importlib
import svgwrite
from svgwrite import cm, mm
import numpy as np
from . import utils as bs_utils
class BoxModelSystemSvgHelper:
"""Helper Class to visualize/plot a BoxModelSystem."""
def __init__(self):
self.box_rect_width = 300
self.box_rect_height = 300
self.system_boxes_arrangement_type = 'circle'
self.system_boxes_arrangement_radius = None
self.system_boxes_arrangement_factor = 1.7
self.system_boxes_arrangement_angle_offset = 0
self.flow_stroke_width = 4
self.flow_color = 'darkblue'
self.flow_arrow_triangle_size = 4
self.flux_stroke_width = 4
self.flux_color = 'darkblue'
self.flux_arrow_triangle_size = 10
self.box_svg_helpers = None
self.dwg = None
def save_system_as_svg(self, system, filename):
"""Save the visualization of system as a SVG file."""
if system.N_boxes == 2:
self.system_boxes_arrangement_factor = 1.1
elif system.N_boxes == 3:
self.system_boxes_arrangement_factor = 1.2
elif system.N_boxes == 4:
self.system_boxes_arrangement_factor = 1.4
elif system.N_boxes == 5:
self.system_boxes_arrangement_factor = 1.6
elif system.N_boxes == 6:
self.system_boxes_arrangement_factor = 1.8
# self.dwg = svgwrite.Drawing(size=self._get_system_svg_size())
self.dwg = svgwrite.Drawing(size=('32cm', '10cm'), debug=True)
self.dwg.viewbox(-100, 0, 600, 400)
if not self.box_svg_helpers:
self.box_svg_helpers = self._get_system_box_svg_helpers(system)
system_svg_group = self.get_system_svg_group(system)
self._save_group_as_svg(system_svg_group, filename)
def get_system_svg_group(self, system):
"""Return a SVG representation of the BoxModelSystem instance."""
if not self.box_svg_helpers:
self.box_svg_helpers = self._get_system_box_svg_helpers(system)
group_id = bs_utils.get_valid_svg_id_from_string(system.name)
group = self.dwg.g(id=group_id)
for box_svg_helper in self.box_svg_helpers:
group.add(box_svg_helper.as_svg_group())
for flow in system.flows:
group.add(self._get_flow_arrow(flow))
return group
def save_box_as_svg(self, box, filename=None):
"""Return a SVG representation of the Box instance."""
self.dwg = svgwrite.Drawing(size=self._get_box_svg_size())
self._save_group_as_svg(self.get_box_svg_group(box), filename)
def get_box_svg_group(self, box):
"""Return the SVG representation of the Box instance."""
group_id = bs_utils.get_valid_svg_id_from_string(box.name)
group = self.dwg.g(id=group_id)
box_svg_helper = self._get_box_svg_helper(box)
group.add(box_svg_helper.as_svg_group())
return group
# HELPER functions
def _save_group_as_svg(self, group, filename):
"""Save a svgwrite group instance as a SVG file."""
# dwg = svgwrite.Drawing(filename=filename)
dwg = copy.deepcopy(self.dwg)
dwg.filename = filename
dwg.add(group)
dwg.save()
def _get_system_box_svg_helpers(self, system):
"""Return a list of BoxSvgHelper for all boxes of the system."""
box_positions = self._get_box_positions(system.N_boxes)
box_svg_helpers = [None] * system.N_boxes
for box_name, box in system.boxes.items():
x, y = box_positions[box.id]
tmp_box_svg_helper = self._get_box_svg_helper(box, x, y)
box_svg_helpers[box.id] = tmp_box_svg_helper
box_svg_helpers = self._adjust_box_svg_helper_widths(box_svg_helpers)
return box_svg_helpers
def _get_box_svg_helper(self, box, x=0, y=0):
box_group_id = '{}_box'.format(box.name)
box_svg_helper = BoxSvgHelper(
group_id=box_group_id,
x=x, y=y,
width=self.box_rect_width,
height=self.box_rect_height,
text_lines=[
'Fluid: {}'.format(box.fluid.name),
'Mass: {:.3e}'.format(box.mass),
],
title=box.description,
)
procsses_group_id = '{}_processes'.format(box.name)
box_process_names = [p.name for p in box.processes]
while len(box_process_names) < 3:
box_process_names.append('')
processes = box_svg_helper.add_child(
group_id=procsses_group_id,
text_lines=box_process_names,
title='Processes',
)
reaction_group_id = '{}_reactions'.format(box.name)
box_reaction_names = [p.name for p in box.reactions]
while len(box_reaction_names) < 3:
box_reaction_names.append('')
reactions = box_svg_helper.add_child(
group_id=reaction_group_id,
text_lines=box_reaction_names,
title='Reactions',
)
return box_svg_helper
def _get_box_positions(self, N_nodes):
positions = []
angle_offset = self.system_boxes_arrangement_angle_offset
radius = self.system_boxes_arrangement_radius
if not radius:
radius_factor = self.system_boxes_arrangement_factor
radius = radius_factor * max(self.box_rect_width,
self.box_rect_height)
for i in range(N_nodes):
if self.system_boxes_arrangement_type == 'half_circle':
angle = (i * np.pi / (N_nodes-1)) + angle_offset
else: # if self.system_boxes_arrangement_type == 'circle':
angle = (i * 2 * np.pi / (N_nodes)) + angle_offset
x = radius * np.cos(angle)
y = radius * np.sin(angle)
positions.append((x,y))
return positions
def _adjust_box_svg_helper_widths(self, helpers):
"""Adjust all box_svg_helpers to the same width."""
max_width = 0
for helper in helpers:
if helper.width > max_width:
max_width = helper.width
for helper in helpers:
helper._width = max_width
return helpers
def _distance_sort_corners(self, helper, reference_point):
"""Return corners sorted on the distance to a point."""
reference_point = np.array(reference_point)
corners = helper.get_box_rect_corner_coordinates()
np_corners = [np.array(c) for c in corners]
print('corners', corners)
distances = [np.linalg.norm(c-reference_point) for c in np_corners]
sorted_corners = [c for (distance,c) in sorted(zip(distances,corners))]
return sorted_corners
def _get_center_between_points(self, p1, p2):
p1 = np.array(p1)
p2 = np.array(p2)
return (p1 + p2)/2
def _get_conncection_point_relative_to_reference_point(self, helper,
reference_point):
"""Return connection point for flow lines relative to ref point."""
sorted_corners = self._distance_sort_corners(helper, reference_point)
p1, p2 = sorted_corners[:2]
connection_point = self._get_center_between_points(p1, p2)
return connection_point
def _get_flow_arrow(self, flow):
src_point = None
trg_point = None
if not flow.source_box:
helper = self.box_svg_helpers[flow.target_box.id]
box_center = np.array(
helper.get_box_rect_center_cooridnates())
box_connection_point_to_origin = np.array(
self._get_conncection_point_relative_to_reference_point(
helper, (0,0)))
v1 = box_center - box_connection_point_to_origin
trg_point = box_center + v1
src_point = trg_point + 0.5 * v1
elif not flow.target_box:
helper = self.box_svg_helpers[flow.source_box.id]
box_center = np.array(
helper.get_box_rect_center_cooridnates())
box_connection_point_to_origin = np.array(
self._get_conncection_point_relative_to_reference_point(
helper, (0,0)))
v1 = box_center - box_connection_point_to_origin
src_point = box_center + v1
trg_point = src_point + 0.5 * v1
else:
src_helper = self.box_svg_helpers[flow.source_box.id]
trg_helper = self.box_svg_helpers[flow.target_box.id]
src_point = self._get_conncection_point_relative_to_reference_point(
src_helper, (0,0))
trg_point = self._get_conncection_point_relative_to_reference_point(
trg_helper, (0,0))
arrow = self._get_arrow(start=src_point, end=trg_point,
stroke_color=self.flow_color,
stroke_width=self.flow_stroke_width,
triangle_size=self.flow_arrow_triangle_size)
return arrow
def _get_arrow(self, start, end, stroke_color, stroke_width,
triangle_size):
arrow_vector = end - start
arrow_unit_vector = arrow_vector / np.linalg.norm(arrow_vector)
rot90_matrix = self._get_rot90_matrix()
arrow_unit_normal_vector = np.dot(rot90_matrix, arrow_unit_vector)
triangle_point1 = triangle_size * arrow_unit_vector
triangle_point2 = 0.5 * triangle_size * arrow_unit_normal_vector
triangle_point3 = -0.5 * triangle_size * arrow_unit_normal_vector
end[0] += triangle_size
arrow = self.dwg.line(start=start, end=end, stroke=stroke_color,
stroke_width=stroke_width)
marker = self.dwg.marker(insert=0.75*arrow_unit_vector*triangle_size,
size=(triangle_size, triangle_size))
marker.add(self.dwg.polygon([triangle_point1, triangle_point2,
triangle_point3], fill=stroke_color))
self.dwg.defs.add(marker)
arrow.set_markers((None, None, marker))
return arrow
def _get_rot90_matrix(self):
angle = np.deg2rad(90)
return np.array([[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]])
def _get_system_svg_size(self):
return (100, 100)
def _get_box_svg_size(self):
return (100, 100)
class BoxSvgHelper:
def __init__(self, group_id, x, y, width, height=None,
text_lines=None, title=None):
text_lines = text_lines or []
if not height and len(text_lines) == 0:
raise ValueError('Either height or text_lines must be given.')
self.group_id = bs_utils.get_valid_svg_id_from_string(group_id)
self._x = x
self._y = y
self._height = height
self._width = width
self.text_lines = text_lines
self.text_font_size = 12
self.text_font_color = 'black'
self.text_alignement = 'left'
self.title = title
self.title_font_size = 24
self.title_font_color = 'black'
self.title_alignement = 'middle'
self.child_title_font_size = 15
self.child_title_font_color = 'black'
self.child_title_alignement = 'left'
self.title_extern = True
self.child_title_extern = True
self.color = 'lightgrey'
self.opacity = 0.7
self.stroke_color = 'black'
self.stroke_width = 5
self.stroke_opacity = 1
self.child_relative_width = 0.925
self.child_color = 'darkgrey'
self.child_opacity = 0.5
self.child_stroke_color = 'white'
self.child_stroke_width = 3
self.child_stroke_opacity = 1
self._content_absolute_margin = 10
# Maximal widht of the character 'W' in the title and text
self.title_max_W_width = self.title_font_size
self.text_max_W_width = self.text_font_size
self.title_avg_char_width = 0.8 * self.title_max_W_width
self.text_avg_char_width = 0.8 * self.text_max_W_width
self.children = []
self.dwg = svgwrite.Drawing()
self.group = self.dwg.g(id=group_id)
@property
def width(self):
"""Return width of the instance."""
width = self._width
max_title_width = self.get_max_title_width()
max_text_width = self.get_max_text_width()
max_children_width = self.get_max_children_width()
if max_title_width > width:
width = max_title_width
if max_text_width > width:
width = max_text_width
if max_children_width > width:
width = max_children_width
self._width = width
self._adjust_children_width()
return self._width
@property
def height(self):
"""Return height of the instance."""
height = 0
if self._height:
height = self._height
element_height = (self.get_text_height() + self.get_title_height() +
self.get_children_height())
if element_height > height:
height = element_height
return height
@property
def x(self):
"""Return left edge of the instance."""
return self._x
@property
def y(self):
"""Return top edge of the instance."""
return self._y
@property
def content_absolute_margin(self):
if not self._content_absolute_margin:
width_relative = self.child_relative_width
self._content_absolute_margin = ((1-width_relative)/2 * self._width)
return self._content_absolute_margin
# PUBLIC functions
def as_svg_group(self):
"""Return the SVG representation of the instance."""
self._adjust_children_width()
self.group.add(self._get_svg_rect_element())
title = self._get_svg_title_element()
if title:
self.group.add(title)
text = self._get_svg_text_element()
if text:
self.group.add(text)
children = self._get_svg_children_element()
if children:
self.group.add(children)
return self.group
def add_child(self, group_id, text_lines=None, height=None,
width_relative=None, title=None):
"""Add a child instance."""
text_lines = text_lines or []
if not height and len(text_lines) == 0:
raise ValueError('Either height or text_lines must be given.')
width_relative = self.child_relative_width
x = self.x + self.content_absolute_margin
y = self.get_children_bottom_y()
width = width_relative * self.width
child = self.__class__(group_id, x, y, width,
height=height, text_lines=text_lines, title=title)
child.title_extern = self.child_title_extern
child.color = self.child_color
child.opacity = self.child_opacity
child.stroke_color = self.child_stroke_color
child.stroke_width = self.child_stroke_width
child.stroke_opacity = self.child_stroke_opacity
child.title_font_size = self.child_title_font_size
child.title_font_color = self.child_title_font_color
child.title_alignement = self.child_title_alignement
self.children.append(child)
return child
# RECT info functions
def get_rect_height(self):
"""Return height of the rect element."""
height = self.height
if self.title_extern:
height = self.height - self.get_title_height()
return height
def get_rect_top_y(self):
"""Return upper edge of the rect element."""
y = self.y
if self.title_extern:
y = self.get_title_bottom_y()
return y
def get_rect_bottom_y(self):
"""Return bottom edge of the rect element."""
y = self.get_rect_top_y() + self.get_rect_height()
return y
# TITLE info functions
def get_max_title_width(self):
"""Return approx. maximal width (px) of title text."""
max_width = 0
if self.title:
max_width = len(self.title.strip()) * self.title_avg_char_width
return max_width
def get_title_height(self):
"""Return total height (with margins) of the title element."""
height = 0
if self.title:
height = 1.5 * self.title_font_size
return height
def get_title_top_y(self):
"""Return upper edge of title."""
y = self.y
return y
def get_title_bottom_y(self):
"""Return bottom edge of title."""
y = self.get_title_top_y() + self.get_title_height()
return y
# TEXT info functions
def get_max_text_width(self):
"""Return approx. maximal width (px) of all text lines."""
max_width = 0
if self.text_lines:
for text in self.text_lines:
tmp_width = len(text.strip()) * self.text_avg_char_width
if tmp_width > max_width:
max_width = tmp_width
return max_width
def get_text_height(self):
"""Return total height (with margins) of the text lines."""
height = 0
if self.text_lines:
height = ((len(self.text_lines) * 1.5 + 0.5) *
self.text_font_size)
return height
def get_text_top_y(self):
"""Return upper edge of text lines."""
y = self.get_title_bottom_y()
return y
def get_text_bottom_y(self):
"""Return bottom edge of text lines."""
y = self.get_text_top_y() + self.get_text_height()
return y
# CHILD info functions
def get_max_children_width(self):
"""Return approx. maximal width (px) of the all children."""
max_width = 0
if self.children:
for boxrect in self.children:
boxrect_width = boxrect.width
needed_width = boxrect_width + 2 * self.content_absolute_margin
if needed_width > max_width:
max_width = needed_width
return max_width
def get_children_height(self):
"""Return total height (with margins) of all children."""
height = 0
if self.children:
for rect in self.children:
# increase children height by the height of the child_rect plus
# a margin equal to the text_font_size
height += rect.height + self.text_font_size
return height
def get_children_top_y(self):
"""Return upper edge of children."""
y = self.get_text_bottom_y()
return y
def get_children_bottom_y(self):
"""Return bottom edge of children."""
y = self.get_children_top_y() + self.get_children_height()
return y
def get_box_rect_corner_coordinates(self):
"""Return coordinates of corners of the rect-element of the instance.
Return coordinates as a list of tuples begining with the top left,
followed by the top right, bottom right, and bottom left corner.
Return:
corner_coords (list of tuple of floats):
[(tl_x, tl_y), (tr_x, tr_y), (br_x, br_y), (bl_x, bl_y)]
"""
# top left corner
tl = (self.x, self.y)
tr = (self.x + self.width, self.y)
br = (self.x + self.width, self.y + self.height)
bl = (self.x, self.y + self.height)
return [tl, tr, br, bl]
def get_box_rect_center_cooridnates(self):
"""Return coordinates of the center of the rect-element."""
return (self.x + 0.5 * self.width, self.y + 0.5 * self.height)
# HELPER functions
def _get_svg_rect_element(self):
"""Return a rect svg element of the instance."""
rect_id = '{}_rect'.format(self.group_id)
y = self.get_rect_top_y()
height = self.get_rect_height()
return self._rect(self.x, y, self.width, height, rect_id)
def _get_svg_title_element(self):
"""Return a text svg element of the instance's title."""
if self.title:
if self.title_alignement == 'middle':
x = self.x + 0.5 * self.width
elif self.title_alignement == 'left':
if self.title_extern:
x = self.x
else:
x = self.x + self.content_absolute_margin
else:
raise ValueError('title_alignement must be "middle" or "left".')
y = self.get_title_top_y() + 0.5 * self.get_title_height()
title_id = '{}_title'.format(self.group_id)
return self._title(self.title.strip(), x, y, title_id)
def _get_svg_text_element(self):
"""Return a svg group with all text lines as text svg elements."""
if self.text_lines:
if self.text_alignement == 'middle':
x = self.x + 0.5 * self.width
elif self.text_alignement == 'left':
x = self.x + self.content_absolute_margin
else:
raise ValueError('text_alignement must be "middle" or "left".')
text_group = self.dwg.g(id='{}_text'.format(self.group_id))
rel_text_y = 1.0 / (len(self.text_lines) + 1)
for i, text in enumerate(self.text_lines):
rel_pos = (i + 1) * rel_text_y
y = self.get_text_top_y() + rel_pos * self.get_text_height()
text_group.add(self._text(text.strip(), x, y))
return text_group
def _get_svg_children_element(self):
"""Return the complete svg-representation of all children."""
children_group = self.dwg.g(id='{}_children'.format(self.group_id))
for child_rect in self.children:
children_group.add(child_rect.as_svg_group())
return children_group
def _rect(self, x, y, width, height, rect_id=None):
return self.dwg.rect(
insert=(x, y),
size=(width, height),
fill=self.color,
opacity=self.opacity,
stroke=self.stroke_color,
stroke_width=self.stroke_width,
stroke_opacity=self.stroke_opacity,
id=rect_id
)
def _title(self, string, x, y, title_id=None):
style_template = 'text-anchor:{}; dominant-baseline:mathematical'
style = style_template.format(self.title_alignement)
return self.dwg.text(
string,
insert=(x,y),
fill=self.title_font_color,
font_size=self.title_font_size,
style=style,
id=title_id,
)
def _text(self, string, x, y, text_id=None):
style_template = 'text-anchor:{}; dominant-baseline:mathematical'
style = style_template.format(self.text_alignement)
return self.dwg.text(
string,
insert=(x,y),
fill=self.text_font_color,
font_size=self.text_font_size,
style=style,
id=text_id,
)
def _adjust_children_width(self):
"""Correct/Adjust the width and x-pos of all child boxrects.
Due to the dynamic width/height of the master-box, child boxes that
are generated at different times can differ in their width. That's
why before the final svg element is generated, all width and x-pos
of the child boxrects are corrected first.
"""
width = copy.copy(self._width)
child_width = width - 2 * self.content_absolute_margin
child_x = self.x + self.content_absolute_margin
for boxrect in self.children:
boxrect._width = child_width
boxrect._x = child_x
boxrect._content_absolute_margin = self.content_absolute_margin
boxrect._adjust_children_width()
| mit | -172,437,947,065,888,160 | 35.476621 | 80 | 0.571204 | false |
OCM-Lab-PUC/switch-chile | python_utility_scripts/existing_projects_plant_grouping.py | 1 | 5767 | # -*- coding: utf-8 -*-
# Copyright 2016 The Switch-Chile Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
# Operations, Control and Markets laboratory at Pontificia Universidad
# Católica de Chile.
"""
Groups generation units by plant to reduce number of variables. This is
not adequate when performing UC or OPF, but is acceptable when considering
long term planning.
"""
from csv import reader, writer
from getpass import getpass
import sys
import psycopg2
############
# Parameters
# Name of the ungrouped file
csv_infile = 'centrales.csv'
# Type of grouping. More than one may be chosen, to get multiple outputs
group_by_plant_name = True
plants = [
'abanico','angostura','antilhue','arauco','atacama',
'bocamina','callao','candelaria','casblanca','chuyaca',
'cmpc_pacifico','cochrane','colihues','arica','enaex',
'iquique','diesel_zofri','emelda','escuadron','esperanza_u',
'estandartes_zofri','florida','guacolda','hidrobonito',
'huasco_tg','isla','laguna_verde','laja_u','lalackama','lautaro',
'loma_los_colorados','los_corrales','los_morros','los_quilos',
'lousiana_pacific','maitenes','multiexport','munilque',
'pilmaiquen','pozo_almonte_solar','puntilla','quilleco',
'quintero','salmofood','san_lorenzo_de_d_de_almagro','santa_marta',
'skretting','solar_jama','taltal_','angamos','mejillones',
'norgener','tocopilla','tomaval','ujina','ventanas_','watt',
'yungay'
]
#atacama tiene dos consumos distintos, quedarse con el segundo'
#tocopilla tiene varios grupos. elegir con cuidado
def plant_name(name):
for p_name in plants:
if p_name in name:
return p_name
return ''
ok_to_group = False
def group_plant(units):
max_power = 0
n_units = 0
spec_cons = 0
for u in units:
n_units += int(u[2])
max_power += float(u[5]) * int(u[2])
if u[12] != '':
spec_cons += float(u[12])
else:
spec_cons = None
# Average specific consumption rate of fuel
if spec_cons:
spec_cons = spec_cons/n_units
units[-1][0] = 'central_'+plant_name(units[-1][0])
units[-1][2] = n_units
units[-1][5] = max_power
units[-1][12] = spec_cons
return units[-1]
grouped_units = []
with open(csv_infile, 'r') as f:
all_units = []
read = reader(f)
for row in read:
all_units.append(row)
all_units.sort()
aux_plant = []
for index, unit in enumerate(all_units):
name = unit[0]
if plant_name(name) == 'tocopilla' or plant_name(name) == 'mejillones':
grouped_units.append(unit)
continue
if plant_name(name) != '' and plant_name(name) != plant_name(all_units[index-1][0]):
# If its the first plant to be grouped, skip grouping
if ok_to_group == True:
# Group previous plant
grouped_units.append(group_plant(aux_plant))
# And start storing the new one
aux_plant = [unit]
else:
ok_to_group = True
elif plant_name(name) != '':
aux_plant.append(unit)
else:
grouped_units.append(unit)
# Group the last plant
grouped_units.append(group_plant(aux_plant))
with open('grouped_plants.csv', 'w') as out:
csv_writer = writer(out, delimiter = ',')
for plant in grouped_units:
csv_writer.writerow(plant)
##############################
####### UPLOAD TO DB #########
projects_for_db = []
with open('grouped_plants.csv', 'r') as f:
read = reader(f)
for row in read:
for i in range(11, 20):
# Enter null values if fuel info not present
if not row[i]:
row[i] = None
projects_for_db.append(row)
##############
# DB Conection
username = 'bmaluenda'
passw = getpass('Enter database password for user %s' % username)
try:
# Remember to enter and/or modify connection parameters accordingly to your
# setup
con = psycopg2.connect(database='switch_chile', user=username,
host='localhost', port='5915',
password=passw)
print ("Connection to database established...")
except:
sys.exit("Error connecting to the switch_chile database...")
cur = con.cursor()
# Clean database
try:
cleaning = "DELETE FROM chile_new.geo_existing_projects"
cur.execute(cleaning)
print("Table erased")
except psycopg2.DatabaseError as e:
if con:
con.rollback()
print(e)
# Load new data
try:
values_str = ','.join(cur.mogrify("(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
project) for project in projects_for_db)
query_str = "INSERT INTO chile_new.geo_existing_projects (db_name, system, units, main_energy_source, start_date, max_net_power, min_net_power, connection_point, voltage_connection, easting, northing, fuel_1, specific_consumption_1, units_specific_consumption_1, fuel_2, specific_consumption_2, units_specific_consumption_2, fuel_3, specific_consumption_3, units_specific_consumption_3) VALUES "+values_str+";"
cur.execute(query_str)
con.commit()
print ("New existing project data has been uploaded to the DB.")
except psycopg2.DatabaseError as e:
if con:
con.rollback()
print(e)
# Update geometry column with new coordinates
try:
query_str = "UPDATE chile_new.geo_existing_projects SET geom = ST_SetSrid(ST_MakePoint(easting, northing), 32718)"
cur.execute(query_str)
con.commit()
print ("Updated geometry column with new data.")
except psycopg2.DatabaseError as e:
if con:
con.rollback()
print(e)
if cur:
cur.close()
if con:
con.close()
| apache-2.0 | -6,822,856,848,889,920,000 | 31.761364 | 414 | 0.624176 | false |
marinho/PyNFe | pynfe/processamento/serializacao.py | 1 | 17276 | # -*- coding: utf-8 -*-
try:
set
except:
from sets import Set as set
from pynfe.entidades import Emitente, Cliente, Produto, Transportadora, NotaFiscal
from pynfe.excecoes import NenhumObjetoEncontrado, MuitosObjetosEncontrados
from pynfe.utils import etree, so_numeros, obter_municipio_por_codigo, obter_pais_por_codigo
from pynfe.utils.flags import CODIGOS_ESTADOS, VERSAO_PADRAO
class Serializacao(object):
"""Classe abstrata responsavel por fornecer as funcionalidades basicas para
exportacao e importacao de Notas Fiscais eletronicas para formatos serializados
de arquivos. Como XML, JSON, binario, etc.
Nao deve ser instanciada diretamente!"""
_fonte_dados = None
_ambiente = 1 # 1 = Produção, 2 = Homologação
_nome_aplicacao = 'PyNFe'
def __new__(cls, *args, **kwargs):
if cls == Serializacao:
raise Exception('Esta classe nao pode ser instanciada diretamente!')
else:
return super(Serializacao, cls).__new__(cls, *args, **kwargs)
def __init__(self, fonte_dados, homologacao=False):
self._fonte_dados = fonte_dados
self._ambiente = homologacao and 2 or 1
def exportar(self, destino, **kwargs):
"""Gera o(s) arquivo(s) de exportacao a partir da Nofa Fiscal eletronica
ou lista delas."""
raise Exception('Metodo nao implementado')
def importar(self, origem):
"""Fabrica que recebe o caminho ou objeto de origem e instancia os objetos
da PyNFe"""
raise Exception('Metodo nao implementado')
class SerializacaoXML(Serializacao):
_versao = VERSAO_PADRAO
def exportar(self, destino=None, retorna_string=False, **kwargs):
"""Gera o(s) arquivo(s) de Nofa Fiscal eletronica no padrao oficial da SEFAZ
e Receita Federal, para ser(em) enviado(s) para o webservice ou para ser(em)
armazenado(s) em cache local."""
# No raiz do XML de saida
raiz = etree.Element('NFe', xmlns="http://www.portalfiscal.inf.br/nfe")
# Carrega lista de Notas Fiscais
notas_fiscais = self._fonte_dados.obter_lista(_classe=NotaFiscal, **kwargs)
for nf in notas_fiscais:
raiz.append(self._serializar_notas_fiscal(nf, retorna_string=False))
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def importar(self, origem):
"""Cria as instancias do PyNFe a partir de arquivos XML no formato padrao da
SEFAZ e Receita Federal."""
raise Exception('Metodo nao implementado')
def _serializar_emitente(self, emitente, tag_raiz='emit', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Dados do emitente
etree.SubElement(raiz, 'CNPJ').text = so_numeros(emitente.cnpj)
etree.SubElement(raiz, 'xNome').text = emitente.razao_social
etree.SubElement(raiz, 'xFant').text = emitente.nome_fantasia
etree.SubElement(raiz, 'IE').text = emitente.inscricao_estadual
# Endereço
endereco = etree.SubElement(raiz, 'enderEmit')
etree.SubElement(endereco, 'xLgr').text = emitente.endereco_logradouro
etree.SubElement(endereco, 'nro').text = emitente.endereco_numero
etree.SubElement(endereco, 'xCpl').text = emitente.endereco_complemento
etree.SubElement(endereco, 'xBairro').text = emitente.endereco_bairro
etree.SubElement(endereco, 'cMun').text = emitente.endereco_municipio
etree.SubElement(endereco, 'xMun').text = obter_municipio_por_codigo(
emitente.endereco_municipio, emitente.endereco_uf,
)
etree.SubElement(endereco, 'UF').text = emitente.endereco_uf
etree.SubElement(endereco, 'CEP').text = so_numeros(emitente.endereco_cep)
etree.SubElement(endereco, 'cPais').text = emitente.endereco_pais
etree.SubElement(endereco, 'xPais').text = obter_pais_por_codigo(emitente.endereco_pais)
etree.SubElement(endereco, 'fone').text = emitente.endereco_telefone
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_cliente(self, cliente, tag_raiz='dest', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Dados do cliente
etree.SubElement(raiz, cliente.tipo_documento).text = so_numeros(cliente.numero_documento)
etree.SubElement(raiz, 'xNome').text = cliente.razao_social
etree.SubElement(raiz, 'IE').text = cliente.inscricao_estadual
# Endereço
endereco = etree.SubElement(raiz, 'enderDest')
etree.SubElement(endereco, 'xLgr').text = cliente.endereco_logradouro
etree.SubElement(endereco, 'nro').text = cliente.endereco_numero
etree.SubElement(endereco, 'xCpl').text = cliente.endereco_complemento
etree.SubElement(endereco, 'xBairro').text = cliente.endereco_bairro
etree.SubElement(endereco, 'cMun').text = cliente.endereco_municipio
etree.SubElement(endereco, 'xMun').text = obter_municipio_por_codigo(
cliente.endereco_municipio, cliente.endereco_uf,
)
etree.SubElement(endereco, 'UF').text = cliente.endereco_uf
etree.SubElement(endereco, 'CEP').text = so_numeros(cliente.endereco_cep)
etree.SubElement(endereco, 'cPais').text = cliente.endereco_pais
etree.SubElement(endereco, 'xPais').text = obter_pais_por_codigo(cliente.endereco_pais)
etree.SubElement(endereco, 'fone').text = cliente.endereco_telefone
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_transportadora(self, transportadora, tag_raiz='transporta', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Dados da transportadora
etree.SubElement(raiz, transportadora.tipo_documento).text = so_numeros(transportadora.numero_documento)
etree.SubElement(raiz, 'xNome').text = transportadora.razao_social
etree.SubElement(raiz, 'IE').text = transportadora.inscricao_estadual
# Endereço
etree.SubElement(raiz, 'xEnder').text = transportadora.endereco_logradouro
etree.SubElement(raiz, 'cMun').text = transportadora.endereco_municipio
etree.SubElement(raiz, 'xMun').text = obter_municipio_por_codigo(
transportadora.endereco_municipio, transportadora.endereco_uf,
)
etree.SubElement(raiz, 'UF').text = transportadora.endereco_uf
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_entrega_retirada(self, entrega_retirada, tag_raiz='entrega', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Dados da entrega/retirada
etree.SubElement(raiz, entrega_retirada.tipo_documento).text = so_numeros(entrega_retirada.numero_documento)
# Endereço
etree.SubElement(raiz, 'xLgr').text = entrega_retirada.endereco_logradouro
etree.SubElement(raiz, 'nro').text = entrega_retirada.endereco_numero
etree.SubElement(raiz, 'xCpl').text = entrega_retirada.endereco_complemento
etree.SubElement(raiz, 'xBairro').text = entrega_retirada.endereco_bairro
etree.SubElement(raiz, 'cMun').text = entrega_retirada.endereco_municipio
etree.SubElement(raiz, 'xMun').text = obter_municipio_por_codigo(
entrega_retirada.endereco_municipio, entrega_retirada.endereco_uf,
)
etree.SubElement(raiz, 'UF').text = entrega_retirada.endereco_uf
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_produto_servico(self, produto_servico, tag_raiz='det', retorna_string=True):
raiz = etree.Element(tag_raiz)
# Produto
prod = etree.SubElement(raiz, 'prod')
etree.SubElement(prod, 'cProd').text = str(produto_servico.codigo)
etree.SubElement(prod, 'cEAN').text = produto_servico.ean
etree.SubElement(prod, 'xProd').text = produto_servico.descricao
etree.SubElement(prod, 'CFOP').text = produto_servico.cfop
etree.SubElement(prod, 'uCom').text = produto_servico.unidade_comercial
etree.SubElement(prod, 'qCom').text = str(produto_servico.quantidade_comercial or 0)
etree.SubElement(prod, 'vUnCom').text = str(produto_servico.valor_unitario_comercial or 0)
etree.SubElement(prod, 'vProd').text = str(produto_servico.valor_total_bruto or 0)
etree.SubElement(prod, 'cEANTrib').text = produto_servico.ean_tributavel
etree.SubElement(prod, 'uTrib').text = produto_servico.unidade_tributavel
etree.SubElement(prod, 'qTrib').text = str(produto_servico.quantidade_tributavel)
etree.SubElement(prod, 'vUnTrib').text = str(produto_servico.valor_unitario_tributavel)
# Imposto
imposto = etree.SubElement(raiz, 'imposto')
icms = etree.SubElement(imposto, 'ICMS')
icms_item = etree.SubElement(icms, 'ICMS'+produto_servico.icms_situacao_tributaria)
etree.SubElement(icms_item, 'orig').text = str(produto_servico.icms_origem)
etree.SubElement(icms_item, 'CST').text = produto_servico.icms_situacao_tributaria
etree.SubElement(icms_item, 'modBC').text = str(produto_servico.icms_modalidade_determinacao_bc)
etree.SubElement(icms_item, 'vBC').text = str(produto_servico.icms_valor_base_calculo)
etree.SubElement(icms_item, 'pICMS').text = str(produto_servico.icms_aliquota)
etree.SubElement(icms_item, 'vICMS').text = str(produto_servico.icms_valor)
pis = etree.SubElement(imposto, 'PIS')
pis_item = etree.SubElement(pis, 'PISAliq')
etree.SubElement(pis_item, 'CST').text = str(produto_servico.pis_situacao_tributaria)
etree.SubElement(pis_item, 'vBC').text = str(produto_servico.pis_valor_base_calculo)
etree.SubElement(pis_item, 'pPIS').text = str(produto_servico.pis_aliquota_percentual)
etree.SubElement(pis_item, 'vPIS').text = str(produto_servico.pis_valor)
cofins = etree.SubElement(imposto, 'COFINS')
cofins_item = etree.SubElement(cofins, 'COFINSAliq')
etree.SubElement(cofins_item, 'CST').text = str(produto_servico.cofins_situacao_tributaria)
etree.SubElement(cofins_item, 'vBC').text = str(produto_servico.cofins_valor_base_calculo)
etree.SubElement(cofins_item, 'pCOFINS').text = str(produto_servico.cofins_aliquota_percentual)
etree.SubElement(cofins_item, 'vCOFINS').text = str(produto_servico.cofins_valor)
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
def _serializar_notas_fiscal(self, nota_fiscal, tag_raiz='infNFe', retorna_string=True):
raiz = etree.Element(tag_raiz, versao=self._versao)
# Dados da Nota Fiscal
ide = etree.SubElement(raiz, 'ide')
etree.SubElement(ide, 'cUF').text = CODIGOS_ESTADOS[nota_fiscal.uf]
etree.SubElement(ide, 'cNF').text = nota_fiscal.codigo_numerico_aleatorio
etree.SubElement(ide, 'natOp').text = nota_fiscal.natureza_operacao
etree.SubElement(ide, 'indPag').text = str(nota_fiscal.forma_pagamento)
etree.SubElement(ide, 'mod').text = str(nota_fiscal.modelo)
etree.SubElement(ide, 'serie').text = nota_fiscal.serie
etree.SubElement(ide, 'nNF').text = str(nota_fiscal.numero_nf)
etree.SubElement(ide, 'dEmi').text = nota_fiscal.data_emissao.strftime('%Y-%m-%d')
etree.SubElement(ide, 'dSaiEnt').text = nota_fiscal.data_saida_entrada.strftime('%Y-%m-%d')
etree.SubElement(ide, 'tpNF').text = str(nota_fiscal.tipo_documento)
etree.SubElement(ide, 'cMunFG').text = nota_fiscal.municipio
etree.SubElement(ide, 'tpImp').text = str(nota_fiscal.tipo_impressao_danfe)
etree.SubElement(ide, 'tpEmis').text = str(nota_fiscal.forma_emissao)
etree.SubElement(ide, 'cDV').text = nota_fiscal.dv_codigo_numerico_aleatorio
etree.SubElement(ide, 'tpAmb').text = str(self._ambiente)
etree.SubElement(ide, 'finNFe').text = str(nota_fiscal.finalidade_emissao)
etree.SubElement(ide, 'procEmi').text = str(nota_fiscal.processo_emissao)
etree.SubElement(ide, 'verProc').text = '%s %s'%(self._nome_aplicacao,
nota_fiscal.versao_processo_emissao)
# Emitente
raiz.append(self._serializar_emitente(nota_fiscal.emitente, retorna_string=False))
# Destinatário
raiz.append(self._serializar_cliente(nota_fiscal.cliente, retorna_string=False))
# Retirada
if nota_fiscal.retirada:
raiz.append(self._serializar_entrega_retirada(
nota_fiscal.retirada,
retorna_string=False,
tag_raiz='retirada',
))
# Entrega
if nota_fiscal.entrega:
raiz.append(self._serializar_entrega_retirada(
nota_fiscal.entrega,
retorna_string=False,
tag_raiz='entrega',
))
# Itens
for num, item in enumerate(nota_fiscal.produtos_e_servicos):
det = self._serializar_produto_servico(item, retorna_string=False)
det.attrib['nItem'] = str(num+1)
raiz.append(det)
# Totais
total = etree.SubElement(raiz, 'total')
icms_total = etree.SubElement(total, 'ICMSTot')
etree.SubElement(icms_total, 'vBC').text = str(nota_fiscal.totais_icms_base_calculo)
etree.SubElement(icms_total, 'vICMS').text = str(nota_fiscal.totais_icms_total)
etree.SubElement(icms_total, 'vBCST').text = str(nota_fiscal.totais_icms_st_base_calculo)
etree.SubElement(icms_total, 'vST').text = str(nota_fiscal.totais_icms_st_total)
etree.SubElement(icms_total, 'vProd').text = str(nota_fiscal.totais_icms_total_produtos_e_servicos)
etree.SubElement(icms_total, 'vFrete').text = str(nota_fiscal.totais_icms_total_frete)
etree.SubElement(icms_total, 'vSeg').text = str(nota_fiscal.totais_icms_total_seguro)
etree.SubElement(icms_total, 'vDesc').text = str(nota_fiscal.totais_icms_total_desconto)
etree.SubElement(icms_total, 'vII').text = str(nota_fiscal.totais_icms_total_ii)
etree.SubElement(icms_total, 'vIPI').text = str(nota_fiscal.totais_icms_total_ipi)
etree.SubElement(icms_total, 'vPIS').text = str(nota_fiscal.totais_icms_pis)
etree.SubElement(icms_total, 'vCOFINS').text = str(nota_fiscal.totais_icms_cofins)
etree.SubElement(icms_total, 'vOutro').text = str(nota_fiscal.totais_icms_outras_despesas_acessorias)
etree.SubElement(icms_total, 'vNF').text = str(nota_fiscal.totais_icms_total_nota)
# Transporte
transp = etree.SubElement(raiz, 'transp')
etree.SubElement(transp, 'modFrete').text = str(nota_fiscal.transporte_modalidade_frete)
# Transportadora
transp.append(self._serializar_transportadora(
nota_fiscal.transporte_transportadora,
retorna_string=False,
))
# Veículo
veiculo = etree.SubElement(transp, 'veicTransp')
etree.SubElement(veiculo, 'placa').text = nota_fiscal.transporte_veiculo_placa
etree.SubElement(veiculo, 'UF').text = nota_fiscal.transporte_veiculo_uf
etree.SubElement(veiculo, 'RNTC').text = nota_fiscal.transporte_veiculo_rntc
# Reboque
reboque = etree.SubElement(transp, 'reboque')
etree.SubElement(reboque, 'placa').text = nota_fiscal.transporte_reboque_placa
etree.SubElement(reboque, 'UF').text = nota_fiscal.transporte_reboque_uf
etree.SubElement(reboque, 'RNTC').text = nota_fiscal.transporte_reboque_rntc
# Volumes
for volume in nota_fiscal.transporte_volumes:
vol = etree.SubElement(transp, 'vol')
etree.SubElement(vol, 'qVol').text = str(volume.quantidade)
etree.SubElement(vol, 'esp').text = volume.especie
etree.SubElement(vol, 'marca').text = volume.marca
etree.SubElement(vol, 'nVol').text = volume.numeracao
etree.SubElement(vol, 'pesoL').text = str(volume.peso_liquido)
etree.SubElement(vol, 'pesoB').text = str(volume.peso_bruto)
# Lacres
lacres = etree.SubElement(vol, 'lacres')
for lacre in volume.lacres:
etree.SubElement(lacres, 'nLacre').text = lacre.numero_lacre
# Informações adicionais
info_ad = etree.SubElement(raiz, 'infAdic')
etree.SubElement(info_ad, 'infAdFisco').text = nota_fiscal.informacoes_adicionais_interesse_fisco
etree.SubElement(info_ad, 'infCpl').text = nota_fiscal.informacoes_complementares_interesse_contribuinte
# 'Id' da tag raiz
# Ex.: NFe35080599999090910270550010000000011518005123
raiz.attrib['Id'] = nota_fiscal.identificador_unico
if retorna_string:
return etree.tostring(raiz, pretty_print=True)
else:
return raiz
| lgpl-3.0 | -4,061,621,420,979,028,000 | 48.895954 | 116 | 0.663925 | false |
t3dev/odoo | addons/website/controllers/backend.py | 5 | 2937 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http
from odoo.http import request
from odoo.tools.translate import _
class WebsiteBackend(http.Controller):
@http.route('/website/fetch_dashboard_data', type="json", auth='user')
def fetch_dashboard_data(self, website_id, date_from, date_to):
Website = request.env['website']
has_group_system = request.env.user.has_group('base.group_system')
has_group_designer = request.env.user.has_group('website.group_website_designer')
dashboard_data = {
'groups': {
'system': has_group_system,
'website_designer': has_group_designer
},
'currency': request.env.user.company_id.currency_id.id,
'dashboards': {
'visits': {},
}
}
current_website = website_id and Website.browse(website_id) or Website.get_current_website()
multi_website = request.env.user.has_group('website.group_multi_website')
dashboard_data['websites'] = (multi_website and request.env['website'].search([]) or current_website).read(['id', 'name'])
for website in dashboard_data['websites']:
if website['id'] == current_website.id:
website['selected'] = True
if has_group_designer:
if current_website.google_management_client_id and current_website.google_analytics_key:
dashboard_data['dashboards']['visits'] = dict(
ga_client_id=current_website.google_management_client_id or '',
ga_analytics_key=current_website.google_analytics_key or '',
)
return dashboard_data
@http.route('/website/dashboard/set_ga_data', type='json', auth='user')
def website_set_ga_data(self, website_id, ga_client_id, ga_analytics_key):
if not request.env.user.has_group('base.group_system'):
return {
'error': {
'title': _('Access Error'),
'message': _('You do not have sufficient rights to perform that action.'),
}
}
if not ga_analytics_key or not ga_client_id.endswith('.apps.googleusercontent.com'):
return {
'error': {
'title': _('Incorrect Client ID / Key'),
'message': _('The Google Analytics Client ID or Key you entered seems incorrect.'),
}
}
Website = request.env['website']
current_website = website_id and Website.browse(website_id) or Website.get_current_website()
request.env['res.config.settings'].create({
'google_management_client_id': ga_client_id,
'google_analytics_key': ga_analytics_key,
'website_id': current_website.id,
}).execute()
return True
| gpl-3.0 | -7,889,350,832,139,283,000 | 43.5 | 130 | 0.582567 | false |
ctools/ctools | modules/comscripts/comlixfit.py | 1 | 10461 | #! /usr/bin/env python
# ==========================================================================
# Perform SRCLIX model fitting of COMPTEL observations
#
# Copyright (C) 2021 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import os
import sys
import gammalib
import ctools
# =============== #
# comlixfit class #
# =============== #
class comlixfit(ctools.cslikelihood):
"""
Perform SRCLIX model fitting of COMPTEL observations
"""
# Constructor
def __init__(self, *argv):
"""
Constructor
"""
# Initialise application by calling the base class constructor
self._init_cslikelihood(self.__class__.__name__, ctools.__version__, argv)
# Return
return
# Private methods
def _get_parameters(self):
"""
Get parameters from parfile
"""
# Set observation if not done before
if self.obs().is_empty():
self.obs().load(self['inobs'].filename())
# Set models if we have none
if self.obs().models().is_empty():
self.obs().models(self['inmodel'].filename())
# Query parameters
self['max_iter'].integer()
self['like_accuracy'].real()
self['fix_spat_for_ts'].boolean()
# Get parameters
bkgmethod = self['bkgmethod'].string()
nrunav = self['nrunav'].integer()
navgr = self['navgr'].integer()
nincl = self['nincl'].integer()
nexcl = self['nexcl'].integer()
# Check for incorrect parameters
if nexcl < 0 or nexcl >= nincl:
msg = 'Incorrect value %d for nexcl (bins to exclude).' % nexcl
raise RuntimeError(msg)
if nexcl != 0 and 2*int(nexcl/2) == nexcl :
msg = 'nexcl=%d (bins to exclude) should be zero or odd number.' % nexcl
raise RuntimeError(msg)
if nincl < 3 or 2*int(nincl/2) == nincl:
msg = 'nincl=%d (bins to include) should be odd and >= 3.' % nincl
raise RuntimeError(msg)
if navgr < 1 or 2*int(navgr/2) == navgr :
msg = 'navgr=%d should be odd and >= 1.' % navgr
raise RuntimeError(msg)
# Query ahead output model filename
if self._read_ahead():
self['suffix'].string()
self['outfolder'].string()
self['outobs'].filename()
self['outmodel'].filename()
# Write input parameters into logger
self._log_parameters(gammalib.TERSE)
# Return
return
def _update_obs(self):
"""
Update background model in observation container
The method updated the background model in the observation container
by taking into account the current source models in the BGDLIXA
model generation algorithm.
"""
# Get task parameters
bkgmethod = self['bkgmethod'].string()
nrunav = self['nrunav'].integer()
navgr = self['navgr'].integer()
nincl = self['nincl'].integer()
nexcl = self['nexcl'].integer()
# Extract source models from observation model container
models = gammalib.GModels()
for model in self.obs().models():
if model.classname() == 'GModelSky':
models.append(model)
# Loop over all observations
for obs in self.obs():
# Skip non-COMPTEL observations
if obs.classname() != 'GCOMObservation':
continue
# Compute DRM
drm = obs.drm(models)
# Compute background model
obs.compute_drb(bkgmethod, drm, nrunav, navgr, nincl, nexcl)
# Signal that DRB file was not yet saved
obs.drbname('')
# Return
return
def _final_model_fit(self):
"""
Perform final model fit using ctlike
"""
# Create instance of model fitting tool
like = ctools.ctlike(self.obs())
like['fix_spat_for_ts'] = self['fix_spat_for_ts'].boolean()
# Run ctlike
like.run()
# Recover results
self.opt(like.opt())
self.obs(like.obs())
# Return
return
def _get_obs_header(self, obs):
"""
Get observation header
"""
# Set header
header = obs.instrument() + ' observation'
# If observation name is not empty then add name
if obs.name() is not '':
header += ' \"' + obs.name() + '\"'
# If observation ID is not empty then add ID
if obs.id() is not '':
header += ' (id=' + obs.id() + ')'
# Return header
return header
# Public methods
def run(self):
"""
Run the script
"""
# Switch screen logging on in debug mode
if self._logDebug():
self._log.cout(True)
# Get parameters
self._get_parameters()
# Log header
self._log_header1(gammalib.NORMAL, 'Input observations')
# Log input observations
self._log_string(gammalib.NORMAL, str(self.obs()))
# Get parameters and initialise some variables
niter = self['max_iter'].integer()
eps = self['like_accuracy'].real()
delta = 0.0
# Write header
self._log_header1(gammalib.NORMAL,
'Iterative maximum likelihood model fitting')
# Loop over iterations
for iter in range(niter):
# Update observations
self._update_obs()
# Fit model
self.obs().optimize(self.opt())
# Compute logL difference after first iteration
if iter > 0:
delta = logL - self.opt().value()
# Store maximum likelihood value
logL = self.opt().value()
# Log maximum likelihood
if iter == 0:
result = '%.5f' % (logL)
else:
result = '%.5f (%.5f)' % (logL, delta)
self._log_value(gammalib.NORMAL, 'logL after iteration %d' % (iter+1),
result)
# Check for convergence
if iter > 0:
if delta < eps:
break
# Do final model fit
self._final_model_fit()
# Compute logL difference and store maximum likelihood value
delta = logL - self.opt().value()
logL = self.opt().value()
# Log final maximum likelihood
result = '%.5f (%.5f)' % (logL, delta)
self._log_value(gammalib.NORMAL, 'logL after final iteration',
result)
# Log header
self._log_header1(gammalib.NORMAL,
'Maximum likelihood optimisation results')
self._log_string(gammalib.NORMAL, str(self.opt()))
self._log_string(gammalib.NORMAL, str(self.obs().models()))
# Return
return
def save(self):
"""
Save observation definition file
"""
# Write header
self._log_header1(gammalib.TERSE, 'Save observations')
# Get output filenames
outobs = self['outobs'].filename()
outmodel = self['outmodel'].filename()
# If file exists and clobber flag is false then raise an exception
if outobs.exists() and not self['clobber'].boolean():
msg = ('Cannot save "'+outobs.url()+'": File already exists. '
'Use parameter clobber=yes to allow overwriting of files.')
raise RuntimeError(msg)
elif outmodel.exists() and not self['clobber'].boolean():
msg = ('Cannot save "'+outmodel.url()+'": File already exists. '
'Use parameter clobber=yes to allow overwriting of files.')
raise RuntimeError(msg)
# Otherwise log filename and save file
else:
# Get DRB file suffix and set outfolder
suffix = self['suffix'].string()
outfolder = self['outfolder'].string()
# Create outfolder directory
try:
os.makedirs(gammalib.expand_env(outfolder))
except OSError:
pass
# Loop over all observations
for obs in self.obs():
# Skip non-COMPTEL observations
if obs.classname() != 'GCOMObservation':
continue
# Store background filename
drename = '%s/%s' % (outfolder, os.path.basename(obs.drename().url()))
if suffix == '':
drbname = drename.replace('dre', 'drb')
else:
drbname = drename.replace('dre', 'drb-%s' % suffix)
obs.drbname(drbname)
# Save DRB file
obs.drb().save(drbname, self['clobber'].boolean())
# Log saving
self._log_value(gammalib.NORMAL, 'DRB file', drbname)
# Log observation definition filename
self._log_value(gammalib.NORMAL, 'Obs. definition XML file',
outobs.url())
# Save observations
self.obs().save(outobs)
# Log model definition filename
self._log_value(gammalib.NORMAL, 'Model definition XML file',
outmodel.url())
# Save models
self.obs().models().save(outmodel)
# Return
return
# ======================== #
# Main routine entry point #
# ======================== #
if __name__ == '__main__':
# Create instance of application
app = comlixfit(sys.argv)
# Execute application
app.execute()
| gpl-3.0 | 556,582,991,032,804,600 | 30.414414 | 86 | 0.531402 | false |
wackerl91/luna | tests/testgame.py | 1 | 1774 | import unittest
from resources.lib.model.game import Game
class TestGame(unittest.TestCase):
def testGameMergeSelfNone(self):
game1 = Game('Name')
game2 = Game('Name')
game2.genre = ['Action', 'Adventure']
game2.posters = ['/path/to/poster']
game2.fanarts = ['path/to/art/1', 'path/to/art/2', 'path/to/art/3']
game1.merge(game2)
self.assertEqual(game1.genre, ['Action', 'Adventure'])
self.assertEqual(game1.posters, ['/path/to/poster'])
self.assertEqual(game1.fanarts, ['path/to/art/1', 'path/to/art/2', 'path/to/art/3'])
self.assertEqual(game1.get_selected_fanart(), 'path/to/art/1')
self.assertEqual(game1.get_selected_poster(), '/path/to/poster')
def testGameMergeSelfFilled(self):
game1 = Game('Name')
game1.genre = ['Shooter', 'Adventure']
game1.posters = ['/path/to/poster/original']
game1.fanarts = ['path/to/art/1-1', 'path/to/art/1-2', 'path/to/art/1-3']
game2 = Game('Name')
game2.genre = ['Action', 'Adventure']
game2.posters = ['/path/to/poster']
game2.fanarts = ['path/to/art/1', 'path/to/art/2', 'path/to/art/1-3']
game1.merge(game2)
self.assertEqual(game1.genre, ['Action', 'Adventure', 'Shooter'])
self.assertEqual('/path/to/poster/original' in game1.posters, True)
self.assertEqual('/path/to/poster' in game1.posters, True)
self.assertEqual('path/to/art/1' in game1.fanarts, True)
self.assertEqual('path/to/art/2' in game1.fanarts, True)
self.assertEqual('path/to/art/1-1' in game1.fanarts, True)
self.assertEqual('path/to/art/1-2' in game1.fanarts, True)
self.assertEqual('path/to/art/1-3' in game1.fanarts, True)
| gpl-3.0 | 8,846,565,560,580,218,000 | 40.255814 | 92 | 0.619504 | false |
yudingding6197/fin_script | static_present.py | 1 | 5506 | #!/usr/bin/env python
# -*- coding:gbk -*-
import sys
import re
import os
import time
import string
import datetime
import tushare as ts
from internal.ts_common import *
from decimal import Decimal
today = datetime.date.today()
#˵Ã÷show_flag
#0£º²»»ñµÃÿһֻµÄÁ÷ͨÅÌ£¬²»»á¼ÆËã»»ÊÖÂÊ
#1£º»ñµÃÿһֻµÄÁ÷ͨÅÌ£¬²¢ÇÒ¼ÆËã»»ÊÖÂÊ
#2£ºÏÔʾÿһֻ×îеÄÐÂÎÅ£¬µ±ÌìµÄÐÂÎÅÈ«²¿ÏÔʾ£¬µ±ÌìûÓÐÖ»ÏÔʾһÌõnews
pindex = len(sys.argv)
LOOP_COUNT=0
st_today_base = None
while LOOP_COUNT<3:
try:
st_today_base = ts.get_today_all()
except:
LOOP_COUNT += 1
time.sleep(0.5)
else:
break;
if st_today_base is None:
print "Timeout to get stock basic info"
exit(0)
st_today = st_today_base.sort_values(['changepercent'], 0, False)
#new_st_list = list(st_today[st_today.changepercent>11]['code'])
new_st_list = []
for index,row in st_today.iterrows():
code = row[0].encode('gbk')
if row['changepercent']>11:
new_st_list.append(code)
print ''
#print new_st_list
LOOP_COUNT=0
st_bas = None
while LOOP_COUNT<3:
try:
st_bas = ts.get_stock_basics()
except:
LOOP_COUNT += 1
time.sleep(0.5)
else:
break;
if st_bas is None:
print "Timeout to get stock basic info"
exit(0)
st_pb_base = st_bas[st_bas.pb!=0]
st_pb_base = st_pb_base.sort_values(['timeToMarket'], 0, False)
st_index = st_pb_base.index
st_bas_list=list(st_index)
#st_bas.to_excel("a_stock_base.xlsx")
#st_pb_base.to_excel("a_stock_pb_base.xlsx")
#print st_pb_base.head(10)
st_list = []
for i in range(0, len(new_st_list)):
if new_st_list[i] in st_bas_list[0:10]:
pass
else:
st_list.append(new_st_list[i])
st_list.extend(st_bas_list)
'''
st_list = st_list[0:60]
st_list.append('300175')
st_list.append('603558')
#st_list=['600828','002819','300611']
#print st_list
'''
number = len(st_list)
if number<=0:
exit(0)
today_open = []
stcsItem=statisticsItem()
b_get_data = 1
#ZTÒ»´ÎÈ¡³ö base ¸ö
#½ØÈ¡list£¬Í¨¹ýÅäÖÃÆðʼλÖÃ
base = 23
loop_ct = number/base
if number%base!=0:
loop_ct += 1
pd_list = []
for i in range(0, loop_ct):
end_idx = min(base*(i+1), number)
cur_list = st_list[i*base:end_idx]
if len(cur_list)==0:
break
#print cur_list
excecount = 0
stdf = None
while excecount<5:
try:
stdf = ts.get_realtime_quotes(cur_list)
except:
print "Get except:"
time.sleep(0.5)
excecount += 1
if excecount<5:
continue
stdf = None
break
else:
break
if stdf is None:
print "Get list fail at:", cur_list
continue
#print stdf
yzcx_flag = 0
for index,row in stdf.iterrows():
stockInfo = []
code = cur_list[index]
index += 1
name = row[0]
pre_close = float(row['pre_close'])
price = float(row['price'])
#ͨ¹ý»ñµÃKÏßÊý¾Ý£¬ÅжÏÊÇ·ñYZZTйÉ
if b_get_data == 1:
#»ñµÃÿֻ¸ö¹ÉÿÌì½»Ò×Êý¾Ý
day_info_df = ts.get_k_data(code)
#print day_info_df
trade_days = len(day_info_df)
b_open=0
yzzt_day = 0
if trade_days==1:
stcsItem.s_new += 1
yzcx_flag = 1
for tdidx,tdrow in day_info_df.iterrows():
open = tdrow[1]
close = tdrow[2]
high = tdrow['high']
low = tdrow['low']
if high!=low:
if yzzt_day!=0:
if (yzzt_day+1)==trade_days:
chg_perc = round((price-pre_close)*100/pre_close,2)
open_list = [code, name, chg_perc, price, yzzt_day]
today_open.append(open_list)
b_open = 1
break
#µ±ZT´ò¿ª£¬¾Í»ábreak for Ñ»·
yzzt_day += 1
pre_close = close
if b_open==0:
dt_str=day_info_df.iloc[trade_days-1,0]
last_date = datetime.datetime.strptime(dt_str, '%Y-%m-%d').date()
cmp_delta = today-last_date
if cmp_delta.days==0:
stcsItem.s_cx_yzzt += 1
yzcx_flag = 1
#ÈÏΪYZZT²»»á³¬¹ý 33 ¸ö½»Ò×ÈÕ
if trade_days>33:
b_get_data = 0
stk_type = analyze_status(code, name, row, stcsItem, yzcx_flag, pd_list)
#if i>2:
# break
#if len(pd_list)>0:
# df_tdy = pd.DataFrame(pd_list)
# df_tdy1 = df_tdy.sort_values([0], 0, False)
str_opn = "[%d %d %d %d]" % (stcsItem.s_open_zt,stcsItem.s_close_zt,stcsItem.s_open_T_zt,stcsItem.s_dk_zt)
print "%4d-ZT %4d-DT %d-X %d--%s" % (stcsItem.s_zt,stcsItem.s_dt,stcsItem.s_new,stcsItem.s_yzzt, str_opn)
print "%4d-CG %4d-FT KD:[%s] %2d-YIN" %(stcsItem.s_zthl,stcsItem.s_dtft,','.join(stcsItem.lst_kd),stcsItem.s_zt_o_gt_c)
print "%4d(%4d) ZERO:%4d %4d(%4d)" %(stcsItem.s_open_sz, stcsItem.s_open_dz, stcsItem.s_open_pp, stcsItem.s_open_xd, stcsItem.s_open_dd)
print "%4d(%4d) ZERO:%4d %4d(%4d)" %(stcsItem.s_close_sz, stcsItem.s_close_dz, stcsItem.s_close_pp, stcsItem.s_close_xd, stcsItem.s_close_dd)
print "4%%:%4d %4d" %(stcsItem.s_high_zf,stcsItem.s_low_df)
#print today_open
str = ''
list = today_open
if len(list)>0:
print "CXKB:"
for i in range(0, len(list)):
itm_lst = list[i]
if itm_lst[2]>9.9:
str1 = "%s(%d, ZT), " % (itm_lst[1], itm_lst[4])
elif itm_lst[2]<-9.9:
str1 = "%s(%d, DT), " % (itm_lst[1], itm_lst[4])
else:
str1 = "%s(%d, %.2f%%), " % (itm_lst[1], itm_lst[4],itm_lst[2])
str += str1
print str
else:
print "CXKB:====="
print ''
str = ''
list = stcsItem.lst_nb
if len(list)>0:
print "NB:"
for i in range(0, len(list)):
itm_lst = list[i]
str1 = "%s(%.2f%%, %.2f%%), " % (itm_lst[1], itm_lst[2], itm_lst[4])
str += str1
print str
else:
print "NB:====="
print ''
str = ''
list = stcsItem.lst_jc
if len(list)>0:
print "JC:"
for i in range(0, len(list)):
itm_lst = list[i]
str1 = "%s(%.2f%%, %.2f%%), " % (itm_lst[1], itm_lst[2], itm_lst[4])
str += str1
print str
else:
print "JC:====="
#print '\n'.join(['%s:%s' % item for item in stcsItem.__dict__.items()])
| gpl-2.0 | 7,300,797,230,191,250,000 | 22.732759 | 141 | 0.622594 | false |
jhaux/tensorflow | tensorflow/contrib/learn/python/learn/estimators/run_config.py | 1 | 16013 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run Config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import six
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import run_config as core_run_config
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
# A list of the property names in RunConfig user allows to change. They will
# not affect the execution framework, so when execution framework checks the
# `uid` of the RunConfig, it should be ingored.
_DEFAULT_UID_WHITE_LIST = [
'tf_random_seed',
'save_summary_steps',
'save_checkpoints_steps',
'save_checkpoints_secs',
'session_config',
'keep_checkpoint_max',
'keep_checkpoint_every_n_hours',
]
class Environment(object):
# For running general distributed training.
CLOUD = 'cloud'
# For running Google-internal distributed training.
GOOGLE = 'google'
# For running on local desktop.
LOCAL = 'local'
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class ClusterConfig(object):
"""This class specifies the configurations for a distributed run.
If you're using `tf.learn` `Estimators`, you should probably use the subclass
RunConfig instead.
"""
def __init__(self, master=None, evaluation_master=None):
"""Constructor.
Sets the properties `cluster_spec`, `is_chief`, `master` (if `None` in the
args), `num_ps_replicas`, `task_id`, and `task_type` based on the
`TF_CONFIG` environment variable, if the pertinent information is
present. The `TF_CONFIG` environment variable is a JSON object with
attributes: `cluster`, `environment`, and `task`.
`cluster` is a JSON serialized version of `ClusterSpec`'s Python dict from
`server_lib.py`, mapping task types (usually one of the TaskType enums) to a
list of task addresses.
`environment` specifies the runtime environment for the job (usually one of
the `Environment` enums). Defaults to `LOCAL`.
`task` has two attributes: `type` and `index`, where `type` can be any of
the task types in `cluster`. When `TF_CONFIG` contains said information, the
following properties are set on this class:
* `task_type` is set to `TF_CONFIG['task']['type']`. Defaults to `None`.
* `task_id` is set to `TF_CONFIG['task']['index']`. Defaults to 0.
* `cluster_spec` is parsed from `TF_CONFIG['cluster']`. Defaults to {}.
* `master` is determined by looking up `task_type` and `task_id` in the
`cluster_spec`. Defaults to ''.
* `num_ps_replicas` is set by counting the number of nodes listed
in the `ps` attribute of `cluster_spec`. Defaults to 0.
* `num_worker_replicas` is set by counting the number of nodes listed
in the `worker` attribute of `cluster_spec`. Defaults to 0.
* `is_chief` is deteremined based on `task_type`, `type_id`, and
`environment`.
Example:
```
cluster = {'ps': ['host1:2222', 'host2:2222'],
'worker': ['host3:2222', 'host4:2222', 'host5:2222']}
os.environ['TF_CONFIG'] = json.dumps(
{'cluster': cluster,
'task': {'type': 'worker', 'index': 1}})
config = ClusterConfig()
assert config.master == 'host4:2222'
assert config.task_id == 1
assert config.num_ps_replicas == 2
assert config.num_worker_replicas == 3
assert config.cluster_spec == server_lib.ClusterSpec(cluster)
assert config.task_type == 'worker'
assert not config.is_chief
```
Args:
master: TensorFlow master. Defaults to empty string for local.
evaluation_master: The master on which to perform evaluation.
"""
# If not explicitly specified in the constructor and the TF_CONFIG
# environment variable is present, load cluster_spec from TF_CONFIG.
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
# Set task_type and task_id if the TF_CONFIG environment variable is
# present. Otherwise, use the respective default (None / 0).
task_env = config.get('task', {})
self._task_type = task_env.get('type', None)
self._task_id = self.get_task_id()
self._cluster_spec = server_lib.ClusterSpec(config.get('cluster', {}))
self._master = (master if master is not None else
_get_master(self._cluster_spec, self._task_type,
self._task_id) or '')
self._num_ps_replicas = _count_ps(self._cluster_spec) or 0
self._num_worker_replicas = _count_worker(self._cluster_spec) or 0
# Set is_chief.
self._environment = config.get('environment', Environment.LOCAL)
self._is_chief = None
if self._task_type is None:
self._is_chief = (self._task_id == 0)
elif self._environment == Environment.CLOUD:
# When the TF_CONFIG environment variable is set, we can set the
# default of is_chief to 0 when task_type is "master" and task_id is 0.
self._is_chief = (self._task_type == TaskType.MASTER and
self._task_id == 0)
else:
# Legacy behavior is that is_chief is None if task_id == 0.
self._is_chief = (self._task_type == TaskType.WORKER and
self._task_id == 0)
self._evaluation_master = (evaluation_master if evaluation_master is not None else
_get_master(self._cluster_spec, self._task_type,
self._task_id) or '')
@property
def cluster_spec(self):
return self._cluster_spec
@property
def environment(self):
return self._environment
@property
def evaluation_master(self):
return self._evaluation_master
@property
def is_chief(self):
return self._is_chief
@property
def master(self):
return self._master
@property
def num_ps_replicas(self):
return self._num_ps_replicas
@property
def num_worker_replicas(self):
return self._num_worker_replicas
@property
def task_id(self):
return self._task_id
@property
def task_type(self):
return self._task_type
@staticmethod
def get_task_id():
"""Returns task index from `TF_CONFIG` environmental variable.
If you have a ClusterConfig instance, you can just access its task_id
property instead of calling this function and re-parsing the environmental
variable.
Returns:
`TF_CONFIG['task']['index']`. Defaults to 0.
"""
config = json.loads(os.environ.get('TF_CONFIG') or '{}')
task_env = config.get('task', {})
task_index = task_env.get('index')
return int(task_index) if task_index else 0
class RunConfig(ClusterConfig, core_run_config.RunConfig):
"""This class specifies the configurations for an `Estimator` run.
This class is the implementation of ${tf.estimator.RunConfig} interface.
If you're a Google-internal user using command line flags with
`learn_runner.py` (for instance, to do distributed training or to use
parameter servers), you probably want to use `learn_runner.EstimatorConfig`
instead.
"""
_USE_DEFAULT = 0
def __init__(self,
master=None,
num_cores=0,
log_device_placement=False,
gpu_memory_fraction=1,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_secs=_USE_DEFAULT,
save_checkpoints_steps=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
evaluation_master=None,
model_dir=None,
session_config=None):
"""Constructor.
Note that the superclass `ClusterConfig` may set properties like
`cluster_spec`, `is_chief`, `master` (if `None` in the args),
`num_ps_replicas`, `task_id`, and `task_type` based on the `TF_CONFIG`
environment variable. See `ClusterConfig` for more details.
Args:
master: TensorFlow master. Defaults to empty string for local.
num_cores: Number of cores to be used. If 0, the system picks an
appropriate number (default: 0).
log_device_placement: Log the op placement to devices (default: False).
gpu_memory_fraction: Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value allows consistency between reruns.
save_summary_steps: Save summaries every this many steps.
save_checkpoints_secs: Save checkpoints every this many seconds. Can not
be specified with `save_checkpoints_steps`.
save_checkpoints_steps: Save checkpoints every this many steps. Can not be
specified with `save_checkpoints_secs`.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables
the feature.
evaluation_master: the master on which to perform evaluation.
model_dir: directory where model parameters, graph etc are saved. If
`None`, will use `model_dir` property in `TF_CONFIG` environment
variable. If both are set, must have same value. If both are `None`, see
`Estimator` about where the model will be saved.
session_config: a ConfigProto used to set session parameters, or None.
Note - using this argument, it is easy to provide settings which break
otherwise perfectly good models. Use with care.
"""
super(RunConfig, self).__init__(
master=master, evaluation_master=evaluation_master)
gpu_options = config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction)
self._tf_config = config_pb2.ConfigProto(
log_device_placement=log_device_placement,
inter_op_parallelism_threads=num_cores,
intra_op_parallelism_threads=num_cores,
gpu_options=gpu_options)
self._tf_random_seed = tf_random_seed
self._save_summary_steps = save_summary_steps
self._save_checkpoints_secs = save_checkpoints_secs
self._session_config = session_config
if save_checkpoints_secs == RunConfig._USE_DEFAULT:
if save_checkpoints_steps is None:
self._save_checkpoints_secs = 600
else:
self._save_checkpoints_secs = None
self._save_checkpoints_steps = save_checkpoints_steps
# TODO(weiho): Remove these after ModelFn refactoring, when users can
# create Scaffold and Saver in their model_fn to set these.
self._keep_checkpoint_max = keep_checkpoint_max
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._model_dir = _get_model_dir(model_dir)
@experimental
def uid(self, whitelist=None):
"""Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = _DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = ordered_state['_cluster_spec'].as_dict()
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
@property
def model_dir(self):
return self._model_dir
@property
def tf_config(self):
return self._tf_config
@property
def tf_random_seed(self):
return self._tf_random_seed
@property
def save_summary_steps(self):
return self._save_summary_steps
@property
def save_checkpoints_secs(self):
return self._save_checkpoints_secs
@property
def save_checkpoints_steps(self):
return self._save_checkpoints_steps
@property
def session_config(self):
return self._session_config
@property
def keep_checkpoint_max(self):
return self._keep_checkpoint_max
@property
def keep_checkpoint_every_n_hours(self):
return self._keep_checkpoint_every_n_hours
def _count_ps(cluster_spec):
"""Counts the number of parameter servers in cluster_spec."""
return len(cluster_spec.as_dict().get('ps', [])) if cluster_spec else 0
def _count_worker(cluster_spec):
"""Counts the number of workers in cluster_spec."""
return len(cluster_spec.as_dict().get('worker', [])) if cluster_spec else 0
def _get_master(cluster_spec, task_type, task_id):
"""Returns the appropriate string for the TensorFlow master."""
if not cluster_spec:
return ''
# If there is only one node in the cluster, do things locally.
jobs = cluster_spec.jobs
if len(jobs) == 1 and len(cluster_spec.job_tasks(jobs[0])) == 1:
return ''
# Lookup the master in cluster_spec using task_type and task_id,
# if possible.
if task_type:
if task_type not in jobs:
raise ValueError(
'%s is not a valid task_type in the cluster_spec:\n'
'%s\n\n'
'Note that these values may be coming from the TF_CONFIG environment '
'variable.' % (task_type, cluster_spec))
addresses = cluster_spec.job_tasks(task_type)
if task_id >= len(addresses) or task_id < 0:
raise ValueError(
'%d is not a valid task_id for task_type %s in the '
'cluster_spec:\n'
'%s\n\n'
'Note that these value may be coming from the TF_CONFIG environment '
'variable.' % (task_id, task_type, cluster_spec))
return 'grpc://' + addresses[task_id]
# For backwards compatibility, we return empty string if task_type was
# not set (task_type did not previously exist).
return ''
def _get_model_dir(model_dir):
"""Returns `model_dir` based user provided `model_dir` or `TF_CONFIG`."""
model_dir_in_tf_config = json.loads(
os.environ.get('TF_CONFIG') or '{}').get('model_dir', None)
if model_dir_in_tf_config is not None:
if model_dir is not None and model_dir_in_tf_config != model_dir:
raise ValueError(
'`model_dir` provided in RunConfig construct, if set, '
'must have the same value as the model_dir in TF_CONFIG. '
'model_dir: {}\nTF_CONFIG["model_dir"]: {}.\n'.format(
model_dir, model_dir_in_tf_config))
logging.info('Using model_dir in TF_CONFIG: %s', model_dir_in_tf_config)
return model_dir or model_dir_in_tf_config
| apache-2.0 | -3,153,779,885,378,558,000 | 36.06713 | 86 | 0.663711 | false |
Angakkuit/asiaq-aws | test/unit/test_disco_elb.py | 1 | 8576 | """Tests of disco_elb"""
from unittest import TestCase
from mock import MagicMock
from moto import mock_elb
from disco_aws_automation import DiscoELB
TEST_ENV_NAME = 'unittestenv'
TEST_HOSTCLASS = 'mhcunit'
TEST_VPC_ID = 'vpc-56e10e3d' # the hard coded VPC Id that moto will always return
TEST_DOMAIN_NAME = 'test.example.com'
def _get_vpc_mock():
vpc_mock = MagicMock()
vpc_mock.environment_name = TEST_ENV_NAME
vpc_mock.vpc = MagicMock()
vpc_mock.vpc.id = TEST_VPC_ID
return vpc_mock
class DiscoELBTests(TestCase):
"""Test DiscoELB"""
def setUp(self):
self.disco_elb = DiscoELB(_get_vpc_mock(), route53=MagicMock(), acm=MagicMock(), iam=MagicMock())
self.disco_elb.acm.get_certificate_arn = MagicMock(return_value="arn:aws:acm::123:blah")
self.disco_elb.iam.get_certificate_arn = MagicMock(return_value="arn:aws:iam::123:blah")
def _create_elb(self, hostclass=None, public=False, tls=False,
idle_timeout=None, connection_draining_timeout=None,
sticky_app_cookie=None):
return self.disco_elb.get_or_create_elb(
hostclass=hostclass or TEST_HOSTCLASS,
security_groups=['sec-1'],
subnets=['sub-1'],
hosted_zone_name=TEST_DOMAIN_NAME,
health_check_url="/",
instance_protocol="HTTP",
instance_port=80,
elb_protocol="HTTPS" if tls else "HTTP",
elb_port=443 if tls else 80,
elb_public=public,
sticky_app_cookie=sticky_app_cookie,
idle_timeout=idle_timeout,
connection_draining_timeout=connection_draining_timeout)
@mock_elb
def test_get_certificate_arn_prefers_acm(self):
'''get_certificate_arn() prefers an ACM provided certificate'''
self.assertEqual(self.disco_elb.get_certificate_arn("dummy"), "arn:aws:acm::123:blah")
@mock_elb
def test_get_certificate_arn_fallback_to_iam(self):
'''get_certificate_arn() uses an IAM certificate if no ACM cert available'''
self.disco_elb.acm.get_certificate_arn = MagicMock(return_value=None)
self.assertEqual(self.disco_elb.get_certificate_arn("dummy"), "arn:aws:iam::123:blah")
@mock_elb
def test_get_cname(self):
'''Make sure get_cname returns what we expect'''
self.assertEqual(self.disco_elb.get_cname(TEST_HOSTCLASS, TEST_DOMAIN_NAME),
"mhcunit-unittestenv.test.example.com")
@mock_elb
def test_get_elb_with_create(self):
"""Test creating a ELB"""
self._create_elb()
self.assertEquals(
len(self.disco_elb.elb_client.describe_load_balancers()['LoadBalancerDescriptions']), 1)
@mock_elb
def test_get_elb_with_update(self):
"""Updating an ELB doesn't add create a new ELB"""
self._create_elb()
self._create_elb()
self.assertEquals(
len(self.disco_elb.elb_client.describe_load_balancers()['LoadBalancerDescriptions']), 1)
@mock_elb
def test_get_elb_internal(self):
"""Test creation an internal private ELB"""
elb_client = self.disco_elb.elb_client
elb_client.create_load_balancer = MagicMock(wraps=elb_client.create_load_balancer)
self._create_elb()
self.disco_elb.elb_client.create_load_balancer.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
Listeners=[{
'Protocol': 'HTTP',
'LoadBalancerPort': 80,
'InstanceProtocol': 'HTTP',
'InstancePort': 80,
'SSLCertificateId': 'arn:aws:acm::123:blah'
}],
Subnets=['sub-1'],
SecurityGroups=['sec-1'],
Scheme='internal')
@mock_elb
def test_get_elb_internal_no_tls(self):
"""Test creation an internal private ELB"""
self.disco_elb.acm.get_certificate_arn = MagicMock(return_value=None)
self.disco_elb.iam.get_certificate_arn = MagicMock(return_value=None)
elb_client = self.disco_elb.elb_client
elb_client.create_load_balancer = MagicMock(wraps=elb_client.create_load_balancer)
self._create_elb()
elb_client.create_load_balancer.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
Listeners=[{
'Protocol': 'HTTP',
'LoadBalancerPort': 80,
'InstanceProtocol': 'HTTP',
'InstancePort': 80,
'SSLCertificateId': ''
}],
Subnets=['sub-1'],
SecurityGroups=['sec-1'],
Scheme='internal')
@mock_elb
def test_get_elb_external(self):
"""Test creation a publically accessible ELB"""
elb_client = self.disco_elb.elb_client
elb_client.create_load_balancer = MagicMock(wraps=elb_client.create_load_balancer)
self._create_elb(public=True)
elb_client.create_load_balancer.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
Listeners=[{
'Protocol': 'HTTP',
'LoadBalancerPort': 80,
'InstanceProtocol': 'HTTP',
'InstancePort': 80,
'SSLCertificateId': 'arn:aws:acm::123:blah'
}],
Subnets=['sub-1'],
SecurityGroups=['sec-1'])
@mock_elb
def test_get_elb_with_tls(self):
"""Test creation an ELB with TLS"""
elb_client = self.disco_elb.elb_client
elb_client.create_load_balancer = MagicMock(wraps=elb_client.create_load_balancer)
self._create_elb(tls=True)
elb_client.create_load_balancer.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
Listeners=[{
'Protocol': 'HTTPS',
'LoadBalancerPort': 443,
'InstanceProtocol': 'HTTP',
'InstancePort': 80,
'SSLCertificateId': 'arn:aws:acm::123:blah'
}],
Subnets=['sub-1'],
SecurityGroups=['sec-1'],
Scheme='internal')
@mock_elb
def test_get_elb_with_idle_timeout(self):
"""Test creating an ELB with an idle timeout"""
client = self.disco_elb.elb_client
client.modify_load_balancer_attributes = MagicMock(wraps=client.modify_load_balancer_attributes)
self._create_elb(idle_timeout=100)
client.modify_load_balancer_attributes.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
LoadBalancerAttributes={'ConnectionDraining': {'Enabled': False, 'Timeout': 0},
'ConnectionSettings': {'IdleTimeout': 100}}
)
@mock_elb
def test_get_elb_with_connection_draining(self):
"""Test creating ELB with connection draining"""
client = self.disco_elb.elb_client
client.modify_load_balancer_attributes = MagicMock(wraps=client.modify_load_balancer_attributes)
self._create_elb(connection_draining_timeout=100)
client.modify_load_balancer_attributes.assert_called_once_with(
LoadBalancerName='unittestenv-mhcunit',
LoadBalancerAttributes={'ConnectionDraining': {'Enabled': True, 'Timeout': 100}}
)
@mock_elb
def test_delete_elb(self):
"""Test deleting an ELB"""
self._create_elb()
self.disco_elb.delete_elb(TEST_HOSTCLASS)
load_balancers = self.disco_elb.elb_client.describe_load_balancers()['LoadBalancerDescriptions']
self.assertEquals(len(load_balancers), 0)
@mock_elb
def test_get_existing_elb(self):
"""Test get_elb for a hostclass"""
self._create_elb()
self.assertIsNotNone(self.disco_elb.get_elb(TEST_HOSTCLASS))
@mock_elb
def test_list(self):
"""Test getting the list of ELBs"""
self._create_elb(hostclass='mhcbar')
self._create_elb(hostclass='mhcfoo')
self.assertEquals(len(self.disco_elb.list()), 2)
@mock_elb
def test_elb_delete(self):
"""Test deletion of ELBs"""
self._create_elb(hostclass='mhcbar')
self.disco_elb.delete_elb(hostclass='mhcbar')
self.assertEquals(len(self.disco_elb.list()), 0)
@mock_elb
def test_destroy_all_elbs(self):
"""Test deletion of all ELBs"""
self._create_elb(hostclass='mhcbar')
self._create_elb(hostclass='mhcfoo')
self.disco_elb.destroy_all_elbs()
self.assertEquals(len(self.disco_elb.list()), 0)
| bsd-2-clause | -2,801,640,775,529,815,000 | 38.520737 | 105 | 0.608092 | false |
IfengAutomation/uitester | uitester/test_manager/rpc_server.py | 1 | 6380 | from socketserver import ThreadingTCPServer, StreamRequestHandler
import json
from threading import Thread
import logging
import queue
import traceback
logger = logging.getLogger('Tester')
Timeout = 120
Port = 11800
class RPCServer(ThreadingTCPServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
self.allow_reuse_address = True
super().__init__(server_address, RequestHandlerClass, bind_and_activate)
self._agents = {}
def add_agent(self, agent):
self._agents[agent.device_id] = agent
def rm_agent(self, device_id):
self._agents.pop(device_id)
def get_agent(self, device_id):
return self._agents.get(device_id)
class RPCHandler(StreamRequestHandler):
def __init__(self, request, client_address, server):
self.has_register = False
self.agent_proxy = None
super().__init__(request, client_address, server)
def handle(self):
logger.debug('RPCHandler: client handler start')
while True:
line = self.rfile.readline().decode().strip()
if len(line) == 0:
logger.debug('RPCHandler: client disconnected.')
break
try:
msg = RPCMessage.from_json(line)
if not self.has_register:
self.handle_register(msg)
elif msg.msg_type == RPCMessage.RPC_KILL_SIGNAL:
self.handle_unregister()
else:
self.handle_message(msg)
except Exception:
logger.debug('RPCHandler: catch exception\n%s' % traceback.format_exc())
continue
logger.debug('RPCHandler: client handler stop')
def handle_register(self, msg):
if msg.msg_type == RPCMessage.RPC_CALL and msg.name == 'register':
if len(msg.args) < 1:
res = self._make_error_msg()
self.wfile.write(res.to_bytes())
self.agent_proxy = RPCAgent()
self.agent_proxy.device_id = msg.args[0]
self.agent_proxy.wfile = self.wfile
self.agent_proxy.connection = self.connection
self.server.add_agent(self.agent_proxy)
self.has_register = True
self.wfile.write(self._make_ok_msg().to_bytes())
else:
self.wfile.write(self._make_error_msg().to_bytes())
def _make_ok_msg(self):
ok_msg = RPCMessage()
ok_msg.msg_type = RPCMessage.RPC_RESULT
ok_msg.args = [True]
ok_msg.name = 'ok'
return ok_msg
def _make_error_msg(self):
err_msg = RPCMessage()
err_msg.msg_type = RPCMessage.RPC_RESULT
err_msg.args = [False]
err_msg.name = 'error'
return err_msg
def handle_unregister(self):
self.server.rm_agent(self.agent_proxy.device_id)
self.connection.close()
self.agent_proxy.is_closed = True
def handle_message(self, msg):
self.agent_proxy.responses.put(msg)
class RPCAgent:
def __init__(self):
self.device_id = ''
self.is_closed = False
self.msg_id = 0
self.wfile = None
self.connection = None
self.responses = queue.Queue()
def call(self, name, *args, timeout=Timeout, **kwargs):
"""
kwargs:
1) version
version=1 use normal rpc call
version=2 use reflection rpc call
default is version=1
reflection rpc call:
name('call', 'call_static', 'new', 'delete') : reflection method name
*args:
'call' method need at least 2 arguments. 1)instance 2)method name 3)method arguments
'call_static' need at least 2 arguments. 1)class name 2)method name 3)method arguments
'new' need at least 1 argument. 1)class name 2)constructor arguments
'delete' need at least 1 argument. 1)instance
:return RemoteObject remote object contains 2 attr : hash and class . If remote object is android View,
it have attr :'res-id' and 'content-des'. If remote object is TextView ,it have attr 'text'.
Timeout:
RPC Call has 30 sec timeout by default. You'll get a TimeoutError after 30sec.
"""
self.msg_id += 1
msg = RPCMessage()
msg.msg_id = self.msg_id
msg.msg_type = RPCMessage.RPC_CALL
msg.name = name
msg.args = args
if 'version' in kwargs:
msg.version = kwargs['version']
self.wfile.write(msg.to_bytes())
try:
res = self.responses.get(timeout=timeout)
return res
except queue.Empty:
raise TimeoutError("RPC Call timeout")
@property
def closed(self):
return self.wfile.closed
def close(self):
msg = RPCMessage.get_kill_signal()
self.wfile.write(msg.to_bytes())
class RPCMessage:
RPC_CALL = 1
RPC_RESULT = 2
RPC_KILL_SIGNAL = 99
def __init__(self):
self.msg_type = None
self.msg_id = None
self.version = 1
self.name = None
self.args = []
@classmethod
def get_kill_signal(cls):
msg = cls()
msg.name = 'kill'
msg.msg_type = RPCMessage.RPC_KILL_SIGNAL
return msg
@classmethod
def from_json(cls, json_str):
msg_dict = json.loads(decode(json_str))
if type(msg_dict) is not dict:
raise TypeError('Json is not a dict, can\'t create rpc message')
instance = cls()
instance.__dict__ = msg_dict
return instance
def to_json(self):
return encode(json.dumps(self.__dict__))
def to_bytes(self):
return (self.to_json() + '\n').encode()
def get_server(port):
return RPCServer(('0.0.0.0', port), RPCHandler)
def start(port):
server = get_server(port)
t = Thread(target=server.serve_forever)
t.setDaemon(True)
t.start()
logger.debug('RPC Server started')
return server
def encode(msg):
"""转译,规则为:
% -> %e
\n -> %n
"""
msg = msg.replace("%", "%e")
msg = msg.replace("\n", "%n")
return msg
def decode(msg):
"""反转译,规则为:
%n -> \n
%e -> %
"""
msg = msg.replace("%n", "\n")
msg = msg.replace("%e", "%")
return msg | apache-2.0 | 5,114,537,863,154,361,000 | 28.133028 | 111 | 0.574331 | false |
MasterScrat/PostMonitor | monitor.py | 1 | 3168 | #!/usr/bin/python
import sys
import json
import urllib
import urllib2
import time
import logging
from tinydb import TinyDB, where
from apscheduler.schedulers.blocking import BlockingScheduler
# Multiple Projects, each with multiple Events (release, blog post...), each with multiple Links (Reddit, HN, FB, Twitter...)
# A Record is a set of numbers related to a Link at a point in time.
# TODO
# - add FB support (requires auth!)
# - add Twitter support (https://github.com/bear/python-twitter)
# - add WP support
# - keep conf in DB
# - add schedueling info per event? plus default value per project?
# - generalise score/num_comments to array of metrics?
# - generalise target/section to dict of meta?
logging.basicConfig()
db = TinyDB('data/records.json')
sched = BlockingScheduler()
def main():
sched.add_job(get_records, 'interval', id='monitor', seconds=60, max_instances=1)
sched.start()
#get_records()
def get_records():
print sched.print_jobs()
conf = load_config()
timestamp = time.time()
print
print '===', conf['monitor_name'], '==='
for project in conf['projects']:
print
print '=', project['project_name'], '='
for event in project['events']:
print '[', event['event_name'], ']'
for url in event['urls']:
record = get_record(url)
record.timestamp = timestamp
record.project = project['project_name']
record.event = event['event_name']
record.url = url
db.insert(record.to_json())
print record
class Record:
def __init__(self, score=0, num_comments=0):
self.score = score
self.num_comments = num_comments
self.timestamp = 0
self.site = ''
self.project = ''
self.event = ''
self.url = ''
self.target = '' # TODO
self.section = '' # TODO
def __str__(self):
return self.site + ': ' + str(self.score) + ' points, ' + str(self.num_comments) + ' comments'
def to_json(self):
return json.loads(json.dumps(self, default=lambda o: o.__dict__))
def get_record(url):
if "reddit.com" in url:
if ".json" not in url:
url = url + '.json'
record = reddit_stats(url)
record.site = "Reddit"
elif "hacker-news.firebaseio.com" in url:
record = hn_stats(url)
record.site = "HackerNews"
elif "news.ycombinator.com" in url:
record = hn_stats('https://hacker-news.firebaseio.com/v0/item/' + url.split("=")[1] + '.json')
record.site = "HackerNews"
elif "api.github.com" in url:
record = gh_stats(url)
record.site = "GitHub"
else:
raise NameError('Unkown site URL ' + url)
return record
def reddit_stats(url):
data = json.loads(read_url(url))
data = data[0]['data']['children'][0]['data']
return Record(data['score'], data['num_comments'])
def hn_stats(url):
data = json.loads(read_url(url))
return Record(data['score'], data['descendants'])
def gh_stats(url):
data = json.loads(read_url(url))
return Record(data['watchers_count'], data['subscribers_count'])
def read_url(url):
hdr = { 'User-Agent' : 'PostMonitor' }
req = urllib2.Request(url, headers=hdr)
return urllib2.urlopen(req).read()
def load_config():
with open('conf.json', 'r') as f:
conf = json.loads(f.read())
f.closed
return conf
if __name__ == "__main__":
main() | mit | -8,944,656,295,161,008,000 | 22.649254 | 125 | 0.665088 | false |
xuru/pyvisdk | pyvisdk/do/and_alarm_expression.py | 1 | 1027 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def AndAlarmExpression(vim, *args, **kwargs):
'''A data object type that links multiple alarm expressions with AND operators.'''
obj = vim.client.factory.create('ns0:AndAlarmExpression')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'expression' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | 5,071,025,375,882,246,000 | 30.151515 | 124 | 0.599805 | false |
olafhauk/mne-python | mne/viz/backends/renderer.py | 1 | 9968 | """Core visualization operations."""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Joan Massich <[email protected]>
# Guillaume Favelier <[email protected]>
#
# License: Simplified BSD
from contextlib import contextmanager
import importlib
from ._utils import VALID_3D_BACKENDS
from ...utils import logger, verbose, get_config, _check_option
MNE_3D_BACKEND = None
MNE_3D_BACKEND_TESTING = False
MNE_3D_BACKEND_INTERACTIVE = False
_backend_name_map = dict(
mayavi='._pysurfer_mayavi',
pyvista='._pyvista',
notebook='._notebook',
)
backend = None
def _reload_backend(backend_name):
global backend
backend = importlib.import_module(name=_backend_name_map[backend_name],
package='mne.viz.backends')
logger.info('Using %s 3d backend.\n' % backend_name)
def _get_renderer(*args, **kwargs):
set_3d_backend(_get_3d_backend(), verbose=False)
return backend._Renderer(*args, **kwargs)
@verbose
def set_3d_backend(backend_name, verbose=None):
"""Set the backend for MNE.
The backend will be set as specified and operations will use
that backend.
Parameters
----------
backend_name : str
The 3d backend to select. See Notes for the capabilities of each
backend.
%(verbose)s
Notes
-----
This table shows the capabilities of each backend ("✓" for full support,
and "-" for partial support):
.. table::
:widths: auto
+--------------------------------------+--------+---------+
| 3D function: | mayavi | pyvista |
+======================================+========+=========+
| :func:`plot_vector_source_estimates` | ✓ | ✓ |
+--------------------------------------+--------+---------+
| :func:`plot_source_estimates` | ✓ | ✓ |
+--------------------------------------+--------+---------+
| :func:`plot_alignment` | ✓ | ✓ |
+--------------------------------------+--------+---------+
| :func:`plot_sparse_source_estimates` | ✓ | ✓ |
+--------------------------------------+--------+---------+
| :func:`plot_evoked_field` | ✓ | ✓ |
+--------------------------------------+--------+---------+
| :func:`plot_sensors_connectivity` | ✓ | ✓ |
+--------------------------------------+--------+---------+
| :func:`snapshot_brain_montage` | ✓ | ✓ |
+--------------------------------------+--------+---------+
| :func:`link_brains` | | ✓ |
+--------------------------------------+--------+---------+
+--------------------------------------+--------+---------+
| **3D feature:** |
+--------------------------------------+--------+---------+
| Large data | ✓ | ✓ |
+--------------------------------------+--------+---------+
| Opacity/transparency | ✓ | ✓ |
+--------------------------------------+--------+---------+
| Support geometric glyph | ✓ | ✓ |
+--------------------------------------+--------+---------+
| Jupyter notebook | ✓ | ✓ |
+--------------------------------------+--------+---------+
| Interactivity in Jupyter notebook | ✓ | ✓ |
+--------------------------------------+--------+---------+
| Smooth shading | ✓ | ✓ |
+--------------------------------------+--------+---------+
| Subplotting | ✓ | ✓ |
+--------------------------------------+--------+---------+
| Save offline movie | ✓ | ✓ |
+--------------------------------------+--------+---------+
| Point picking | | ✓ |
+--------------------------------------+--------+---------+
.. note::
In the case of `plot_vector_source_estimates` with PyVista, the glyph
size is not consistent with Mayavi, it is also possible that a dark
filter is visible on the mesh when depth peeling is not available.
"""
global MNE_3D_BACKEND
try:
MNE_3D_BACKEND
except NameError:
MNE_3D_BACKEND = backend_name
_check_option('backend_name', backend_name, VALID_3D_BACKENDS)
if MNE_3D_BACKEND != backend_name:
_reload_backend(backend_name)
MNE_3D_BACKEND = backend_name
def get_3d_backend():
"""Return the backend currently used.
Returns
-------
backend_used : str | None
The 3d backend currently in use. If no backend is found,
returns ``None``.
"""
try:
backend = _get_3d_backend()
except RuntimeError:
return None
return backend
def _get_3d_backend():
"""Load and return the current 3d backend."""
global MNE_3D_BACKEND
if MNE_3D_BACKEND is None:
MNE_3D_BACKEND = get_config(key='MNE_3D_BACKEND', default=None)
if MNE_3D_BACKEND is None: # try them in order
for name in VALID_3D_BACKENDS:
try:
_reload_backend(name)
except ImportError:
continue
else:
MNE_3D_BACKEND = name
break
else:
raise RuntimeError(f'Could not load any valid 3D backend: '
f'{", ".join(VALID_3D_BACKENDS)}')
else:
_check_option('MNE_3D_BACKEND', MNE_3D_BACKEND, VALID_3D_BACKENDS)
_reload_backend(MNE_3D_BACKEND)
else:
_check_option('MNE_3D_BACKEND', MNE_3D_BACKEND, VALID_3D_BACKENDS)
return MNE_3D_BACKEND
@contextmanager
def use_3d_backend(backend_name):
"""Create a viz context.
Parameters
----------
backend_name : str
The 3d backend to use in the context.
"""
old_backend = _get_3d_backend()
set_3d_backend(backend_name)
try:
yield
finally:
try:
set_3d_backend(old_backend)
except Exception:
pass
@contextmanager
def _use_test_3d_backend(backend_name, interactive=False):
"""Create a testing viz context.
Parameters
----------
backend_name : str
The 3d backend to use in the context.
interactive : bool
If True, ensure interactive elements are accessible.
"""
global MNE_3D_BACKEND_TESTING
orig_testing = MNE_3D_BACKEND_TESTING
MNE_3D_BACKEND_TESTING = True
try:
with use_3d_backend(backend_name):
with backend._testing_context(interactive):
yield
finally:
MNE_3D_BACKEND_TESTING = orig_testing
def set_3d_view(figure, azimuth=None, elevation=None,
focalpoint=None, distance=None, roll=None,
reset_camera=True):
"""Configure the view of the given scene.
Parameters
----------
figure : object
The scene which is modified.
azimuth : float
The azimuthal angle of the view.
elevation : float
The zenith angle of the view.
focalpoint : tuple, shape (3,)
The focal point of the view: (x, y, z).
distance : float
The distance to the focal point.
roll : float
The view roll.
reset_camera : bool
If True, reset the camera properties beforehand.
"""
backend._set_3d_view(figure=figure, azimuth=azimuth,
elevation=elevation, focalpoint=focalpoint,
distance=distance, roll=roll,
reset_camera=reset_camera)
def set_3d_title(figure, title, size=40):
"""Configure the title of the given scene.
Parameters
----------
figure : object
The scene which is modified.
title : str
The title of the scene.
size : int
The size of the title.
"""
backend._set_3d_title(figure=figure, title=title, size=size)
def create_3d_figure(size, bgcolor=(0, 0, 0), smooth_shading=True,
handle=None, scene=True):
"""Return an empty figure based on the current 3d backend.
.. warning:: Proceed with caution when the renderer object is
returned (with ``scene=False``) because the _Renderer
API is not necessarily stable enough for production,
it's still actively in development.
Parameters
----------
size : tuple
The dimensions of the 3d figure (width, height).
bgcolor : tuple
The color of the background.
smooth_shading : bool
If True, smooth shading is enabled. Defaults to True.
handle : int | None
The figure identifier.
scene : bool
Specify if the returned object is the scene. If False,
the renderer object is returned. Defaults to True.
Returns
-------
figure : object
The requested empty scene or the renderer object if
``scene=False``.
"""
renderer = _get_renderer(
fig=handle,
size=size,
bgcolor=bgcolor,
smooth_shading=smooth_shading,
)
if scene:
return renderer.scene()
else:
return renderer
def get_brain_class():
"""Return the proper Brain class based on the current 3d backend.
Returns
-------
brain : object
The Brain class corresponding to the current 3d backend.
"""
if get_3d_backend() == "mayavi":
from surfer import Brain
else: # PyVista
from ...viz._brain import Brain
return Brain
| bsd-3-clause | 8,546,429,251,597,297,000 | 32.117057 | 78 | 0.474248 | false |
all-of-us/raw-data-repository | rdr_service/genomic/genomic_job_components.py | 1 | 134110 | """
Component Classes for Genomic Jobs
Components are assembled by the JobController for a particular Genomic Job
"""
import csv
import logging
import re
import pytz
from collections import deque, namedtuple
from copy import deepcopy
from dateutil.parser import parse
import sqlalchemy
from rdr_service import clock
from rdr_service.dao.bq_genomics_dao import bq_genomic_set_member_update, bq_genomic_gc_validation_metrics_update, \
bq_genomic_set_update, bq_genomic_file_processed_update, \
bq_genomic_manifest_file_update, bq_genomic_set_member_batch_update
from rdr_service.dao.code_dao import CodeDao
from rdr_service.genomic.genomic_data import GenomicQueryClass
from rdr_service.genomic.genomic_state_handler import GenomicStateHandler
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.code import Code
from rdr_service.model.participant_summary import ParticipantRaceAnswers, ParticipantSummary
from rdr_service.model.config_utils import get_biobank_id_prefix
from rdr_service.resource.generators.genomics import genomic_set_member_update, genomic_gc_validation_metrics_update, \
genomic_set_update, genomic_file_processed_update, genomic_manifest_file_update, genomic_set_member_batch_update
from rdr_service.services.jira_utils import JiraTicketHandler
from rdr_service.api_util import (
open_cloud_file,
copy_cloud_file,
delete_cloud_file,
list_blobs,
get_blob)
from rdr_service.model.genomics import (
GenomicSet,
GenomicSetMember,
GenomicGCValidationMetrics,
GenomicSampleContamination,
GenomicAW1Raw,
GenomicAW2Raw)
from rdr_service.participant_enums import (
WithdrawalStatus,
QuestionnaireStatus,
SampleStatus,
Race,
SuspensionStatus,
ParticipantCohort)
from rdr_service.genomic_enums import GenomicSetStatus, GenomicSetMemberStatus, GenomicValidationFlag, GenomicJob, \
GenomicWorkflowState, GenomicSubProcessStatus, GenomicSubProcessResult, GenomicManifestTypes, \
GenomicContaminationCategory, GenomicQcStatus, GenomicIncidentCode
from rdr_service.dao.genomics_dao import (
GenomicGCValidationMetricsDao,
GenomicSetMemberDao,
GenomicFileProcessedDao,
GenomicSetDao,
GenomicJobRunDao,
GenomicManifestFeedbackDao,
GenomicManifestFileDao,
GenomicAW1RawDao,
GenomicAW2RawDao)
from rdr_service.dao.biobank_stored_sample_dao import BiobankStoredSampleDao
from rdr_service.dao.site_dao import SiteDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.genomic.genomic_biobank_manifest_handler import (
create_and_upload_genomic_biobank_manifest_file,
)
from rdr_service.genomic.validation import (
GENOMIC_VALID_AGE,
)
from rdr_service.offline.sql_exporter import SqlExporter
from rdr_service.config import (
getSetting,
GENOMIC_CVL_RECONCILIATION_REPORT_SUBFOLDER,
CVL_W1_MANIFEST_SUBFOLDER,
CVL_W3_MANIFEST_SUBFOLDER,
GENOMIC_GEM_A1_MANIFEST_SUBFOLDER,
GENOMIC_GEM_A3_MANIFEST_SUBFOLDER,
GENOME_TYPE_ARRAY,
GENOME_TYPE_WGS,
GAE_PROJECT,
GENOMIC_AW3_ARRAY_SUBFOLDER,
GENOMIC_AW3_WGS_SUBFOLDER,
BIOBANK_AW2F_SUBFOLDER,
)
from rdr_service.code_constants import COHORT_1_REVIEW_CONSENT_YES_CODE
from sqlalchemy.orm import aliased
class GenomicFileIngester:
"""
This class ingests a file from a source GC bucket into the destination table
"""
def __init__(self, job_id=None,
job_run_id=None,
bucket=None,
archive_folder=None,
sub_folder=None,
_controller=None,
target_file=None):
self.controller = _controller
self.job_id = job_id
self.job_run_id = job_run_id
self.file_obj = None
self.file_queue = deque()
self.target_file = target_file
self.bucket_name = bucket
self.archive_folder_name = archive_folder
self.sub_folder_name = sub_folder
# Sub Components
self.file_validator = GenomicFileValidator(
job_id=self.job_id,
controller=self.controller
)
self.file_mover = GenomicFileMover(archive_folder=self.archive_folder_name)
self.metrics_dao = GenomicGCValidationMetricsDao()
self.file_processed_dao = GenomicFileProcessedDao()
self.member_dao = GenomicSetMemberDao()
self.job_run_dao = GenomicJobRunDao()
self.sample_dao = BiobankStoredSampleDao()
self.feedback_dao = GenomicManifestFeedbackDao()
self.manifest_dao = GenomicManifestFileDao()
def generate_file_processing_queue(self):
"""
Creates the list of files to be ingested in this run.
Ordering is currently arbitrary;
"""
# Check Target file is set.
# It will not be set in cron job, but will be set by tool when run manually
_manifest_file_id = None
try:
_manifest_file_id = self.controller.task_data.manifest_file.id
except AttributeError:
pass
if self.target_file is not None:
if self.controller.storage_provider is not None:
_blob = self.controller.storage_provider.get_blob(self.bucket_name, self.target_file)
else:
_blob = get_blob(self.bucket_name, self.target_file)
files = [(self.target_file, _blob.updated)]
else:
files = self._get_new_file_names_and_upload_dates_from_bucket()
if files == GenomicSubProcessResult.NO_FILES:
return files
else:
for file_data in files:
new_file_record = self.file_processed_dao.insert_file_record(
self.job_run_id,
f'{self.bucket_name}/{file_data[0]}',
self.bucket_name,
file_data[0].split('/')[-1],
upload_date=file_data[1],
manifest_file_id=_manifest_file_id)
# For BQ/PDR
bq_genomic_file_processed_update(new_file_record.id, project_id=self.controller.bq_project_id)
genomic_file_processed_update(new_file_record.id)
self.file_queue.append(new_file_record)
def _get_new_file_names_and_upload_dates_from_bucket(self):
"""
Searches the bucket for un-processed files.
:return: list of (filenames, upload_date) or NO_FILES result code
"""
# Setup date
timezone = pytz.timezone('Etc/Greenwich')
date_limit_obj = timezone.localize(self.controller.last_run_time)
# Look for new files with valid filenames
bucket = '/' + self.bucket_name
files = list_blobs(bucket, prefix=self.sub_folder_name)
files = [(s.name, s.updated) for s in files
if s.updated > date_limit_obj
and self.file_validator.validate_filename(s.name)]
if not files:
logging.info('No files in cloud bucket {}'.format(self.bucket_name))
return GenomicSubProcessResult.NO_FILES
return files
def generate_file_queue_and_do_ingestion(self):
"""
Main method of the ingestor component,
generates a queue and processes each file
:return: result code
"""
file_queue_result = self.generate_file_processing_queue()
if file_queue_result == GenomicSubProcessResult.NO_FILES:
logging.info('No files to process.')
return file_queue_result
else:
logging.info('Processing files in queue.')
results = []
while len(self.file_queue):
try:
ingestion_result = self._ingest_genomic_file(
self.file_queue[0])
file_ingested = self.file_queue.popleft()
results.append(ingestion_result == GenomicSubProcessResult.SUCCESS)
if ingestion_result:
ingestion_message = f'Ingestion attempt for {file_ingested.fileName}: {ingestion_result}'
if 'invalid' in ingestion_result.name.lower():
logging.warning(ingestion_message)
else:
logging.info(ingestion_message)
self.file_processed_dao.update_file_record(
file_ingested.id,
GenomicSubProcessStatus.COMPLETED,
ingestion_result
)
# For BQ/PDR
bq_genomic_file_processed_update(file_ingested.id, self.controller.bq_project_id)
genomic_file_processed_update(file_ingested.id)
except IndexError:
logging.info('No files left in file queue.')
return GenomicSubProcessResult.SUCCESS if all(results) \
else GenomicSubProcessResult.ERROR
def _ingest_genomic_file(self, file_obj):
"""
Reads a file object from bucket and inserts into DB
:param: file_obj: A genomic file object
:return: A GenomicSubProcessResultCode
"""
self.file_obj = file_obj
data_to_ingest = self._retrieve_data_from_path(self.file_obj.filePath)
if data_to_ingest == GenomicSubProcessResult.ERROR:
return GenomicSubProcessResult.ERROR
elif data_to_ingest:
logging.info(f'Ingesting data from {self.file_obj.fileName}')
logging.info("Validating file.")
self.file_validator.valid_schema = None
validation_result = self.file_validator.validate_ingestion_file(
filename=self.file_obj.fileName,
data_to_validate=data_to_ingest
)
if validation_result != GenomicSubProcessResult.SUCCESS:
return validation_result
ingestion_config = {
GenomicJob.AW1_MANIFEST: {
'method': self._ingest_aw1_manifest
},
GenomicJob.AW1F_MANIFEST: {
'method': self._ingest_aw1_manifest
},
GenomicJob.METRICS_INGESTION: {
'method': self._process_gc_metrics_data_for_insert
},
GenomicJob.GEM_A2_MANIFEST: {
'method': self._ingest_gem_a2_manifest
},
GenomicJob.GEM_METRICS_INGEST: {
'method': self._ingest_gem_metrics_manifest
},
GenomicJob.W2_INGEST: {
'method': self._ingest_cvl_w2_manifest
},
GenomicJob.AW4_ARRAY_WORKFLOW: {
'method': self._ingest_aw4_manifest
},
GenomicJob.AW4_WGS_WORKFLOW: {
'method': self._ingest_aw4_manifest
},
GenomicJob.AW1C_INGEST: {
'method': self._ingest_aw1c_manifest
},
GenomicJob.AW1CF_INGEST: {
'method': self._ingest_aw1c_manifest
},
GenomicJob.AW5_ARRAY_MANIFEST: {
'method': self._ingest_aw5_manifest
},
GenomicJob.AW5_WGS_MANIFEST: {
'method': self._ingest_aw5_manifest
},
}
ingestion_type = ingestion_config[self.job_id]['method']
return ingestion_type(data_to_ingest)
else:
logging.info("No data to ingest.")
return GenomicSubProcessResult.NO_FILES
@staticmethod
def get_aw1_manifest_column_mappings():
return {
'packageId': 'packageid',
'sampleId': 'sampleid',
'gcManifestBoxStorageUnitId': 'boxstorageunitid',
'gcManifestBoxPlateId': 'boxid/plateid',
'gcManifestWellPosition': 'wellposition',
'gcManifestParentSampleId': 'parentsampleid',
'collectionTubeId': 'collectiontubeid',
'gcManifestMatrixId': 'matrixid',
'gcManifestTreatments': 'treatments',
'gcManifestQuantity_ul': 'quantity(ul)',
'gcManifestTotalConcentration_ng_per_ul': 'totalconcentration(ng/ul)',
'gcManifestTotalDNA_ng': 'totaldna(ng)',
'gcManifestVisitDescription': 'visitdescription',
'gcManifestSampleSource': 'samplesource',
'gcManifestStudy': 'study',
'gcManifestTrackingNumber': 'trackingnumber',
'gcManifestContact': 'contact',
'gcManifestEmail': 'email',
'gcManifestStudyPI': 'studypi',
'gcManifestTestName': 'testname',
'gcManifestFailureMode': 'failuremode',
'gcManifestFailureDescription': 'failuremodedesc',
}
@staticmethod
def get_aw1_raw_column_mappings():
return {
"package_id": "packageid",
"biobankid_sample_id": "biobankidsampleid",
"box_storageunit_id": "boxstorageunitid",
"box_id_plate_id": "boxid/plateid",
"well_position": "wellposition",
"sample_id": "sampleid",
"parent_sample_id": "parentsampleid",
"collection_tube_id": "collectiontubeid",
"matrix_id": "matrixid",
"collection_date": "collectiondate",
"biobank_id": "biobankid",
"sex_at_birth": "sexatbirth",
"age": "age",
"ny_state": "nystate(y/n)",
"sample_type": "sampletype",
"treatments": "treatments",
"quantity": "quantity(ul)",
"total_concentration": "totalconcentration(ng/ul)",
"total_dna": "totaldna(ng)",
"visit_description": "visitdescription",
"sample_source": "samplesource",
"study": "study",
"tracking_number": "trackingnumber",
"contact": "contact",
"email": "email",
"study_pi": "studypi",
"test_name": "testname",
"failure_mode": "failuremode",
"failure_mode_desc": "failuremodedesc",
}
@staticmethod
def get_aw2_raw_column_mappings():
return {
"biobank_id": "biobankid",
"sample_id": "sampleid",
"biobankidsampleid": "biobankidsampleid",
"lims_id": "limsid",
"mean_coverage": "meancoverage",
"genome_coverage": "genomecoverage",
"aouhdr_coverage": "aouhdrcoverage",
"contamination": "contamination",
"sex_concordance": "sexconcordance",
"sex_ploidy": "sexploidy",
"aligned_q30_bases": "alignedq30bases",
"array_concordance": "arrayconcordance",
"processing_status": "processingstatus",
"notes": "notes",
"chipwellbarcode": "chipwellbarcode",
"call_rate": "callrate",
}
def _ingest_aw1_manifest(self, data):
"""
AW1 ingestion method: Updates the GenomicSetMember with AW1 data
If the row is determined to be a control sample,
insert a new GenomicSetMember with AW1 data
:param data:
:param _site: gc_site ID
:return: result code
"""
_state = GenomicWorkflowState.AW0
_site = self._get_site_from_aw1()
for row in data['rows']:
row_copy = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
row_copy['site_id'] = _site
# TODO: Disabling this fix but leaving in
# Until verified that this issue has been fixed in manifes
# Fix for invalid parent sample values
# try:
# parent_sample_id = int(row_copy['parentsampleid'])
# except ValueError:
# parent_sample_id = 0
# Skip rows if biobank_id is an empty string (row is empty well)
if row_copy['biobankid'] == "":
continue
# Check if this sample has a control sample parent tube
control_sample_parent = self.member_dao.get_control_sample_parent(
row_copy['testname'],
int(row_copy['parentsampleid'])
)
if control_sample_parent:
logging.warning(f"Control sample found: {row_copy['parentsampleid']}")
# Check if the control sample member exists for this GC, BID, collection tube, and sample ID
# Since the Biobank is reusing the sample and collection tube IDs (which are supposed to be unique)
cntrl_sample_member = self.member_dao.get_control_sample_for_gc_and_genome_type(
_site,
row_copy['testname'],
row_copy['biobankid'],
row_copy['collectiontubeid'],
row_copy['sampleid']
)
if not cntrl_sample_member:
# Insert new GenomicSetMember record if none exists
# for this control sample, genome type, and gc site
member = self.create_new_member_from_aw1_control_sample(row_copy)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
# Skip rest of iteration and go to next row
continue
# Find the existing GenomicSetMember
# Set the member based on collection tube ID
# row_copy['testname'] is the genome type (i.e. aou_array, aou_wgs)
member = self.member_dao.get_member_from_collection_tube(row_copy['collectiontubeid'],
row_copy['testname'])
# Since member not found, and not a control sample,
# check if collection tube id was swapped by Biobank
if member is None:
bid = row_copy['biobankid']
# Strip biobank prefix if it's there
if bid[0] in [get_biobank_id_prefix(), 'T']:
bid = bid[1:]
member = self.member_dao.get_member_from_biobank_id_in_state(bid,
row_copy['testname'],
_state)
# If member found, validate new collection tube ID, set collection tube ID
if member:
if self._validate_collection_tube_id(row_copy['collectiontubeid'], bid):
with self.member_dao.session() as session:
self._record_sample_as_contaminated(session, member.collectionTubeId)
member.collectionTubeId = row_copy['collectiontubeid']
else:
# Couldn't find genomic set member based on either biobank ID or collection tube
_message = f"{self.job_id.name}: Cannot find genomic set member: " \
f"collection_tube_id: {row_copy['collectiontubeid']}, "\
f"biobank id: {bid}, "\
f"genome type: {row_copy['testname']}"
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.UNABLE_TO_FIND_MEMBER.name,
message=_message,
biobank_id=bid,
collection_tube_id=row_copy['collectiontubeid'],
sample_id=row_copy['sampleid'],
)
# Skip rest of iteration and continue processing file
continue
# Process the attribute data
member_changed, member = self._process_aw1_attribute_data(row_copy, member)
if member_changed:
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
def load_raw_awn_file(self):
"""
Loads genomic_aw1_raw/genomic_aw2_raw
with raw data from aw1/aw2 file
:return:
"""
# Set manifest-specific variables
if self.controller.job_id == GenomicJob.LOAD_AW1_TO_RAW_TABLE:
dao = GenomicAW1RawDao()
awn_model = GenomicAW1Raw
columns = self.get_aw1_raw_column_mappings()
elif self.controller.job_id == GenomicJob.LOAD_AW2_TO_RAW_TABLE:
dao = GenomicAW2RawDao()
awn_model = GenomicAW2Raw
columns = self.get_aw2_raw_column_mappings()
else:
logging.error("Job ID not LOAD_AW1_TO_RAW_TABLE or LOAD_AW2_TO_RAW_TABLE")
return GenomicSubProcessResult.ERROR
# look up if any rows exist already for the file
records = dao.get_from_filepath(self.target_file)
if records:
logging.warning(f'File already exists in raw table: {self.target_file}')
return GenomicSubProcessResult.SUCCESS
file_data = self._retrieve_data_from_path(self.target_file)
# Return the error status if there is an error in file_data
if not isinstance(file_data, dict):
return file_data
# Processing raw data in batches
batch_size = 100
item_count = 0
batch = list()
for row in file_data['rows']:
# Standardize fields to lower, no underscores or spaces
row = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
row_obj = self._set_raw_awn_attributes(row, awn_model(), columns)
batch.append(row_obj)
item_count += 1
if item_count == batch_size:
# Insert batch into DB
with dao.session() as session:
session.bulk_save_objects(batch)
# Reset batch
item_count = 0
batch = list()
if item_count:
# insert last batch if needed
with dao.session() as session:
session.bulk_save_objects(batch)
return GenomicSubProcessResult.SUCCESS
def ingest_single_aw1_row_for_member(self, member):
# Open file and pull row based on member.biobankId
with self.controller.storage_provider.open(self.target_file, 'r') as aw1_file:
reader = csv.DictReader(aw1_file, delimiter=',')
row = [r for r in reader if r['BIOBANK_ID'][1:] == str(member.biobankId)][0]
# Alter field names to remove spaces and change to lower case
row = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
ingested_before = member.reconcileGCManifestJobRunId is not None
# Write AW1 data to genomic_set_member table
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
# Set attributes from file
for key in gc_manifest_column_mappings.keys():
try:
member.__setattr__(key, row[gc_manifest_column_mappings[key]])
except KeyError:
member.__setattr__(key, None)
# Set other fields not in AW1 file
member.reconcileGCManifestJobRunId = self.job_run_id
member.aw1FileProcessedId = self.file_obj.id
member.gcSite = self._get_site_from_aw1()
# Only update the member's genomicWorkflowState if it was AW0
if member.genomicWorkflowState == GenomicWorkflowState.AW0:
member.genomicWorkflowState = GenomicWorkflowState.AW1
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
# Update member in DB
self.member_dao.update(member)
# Update AW1 manifest record count
if not ingested_before and not self.controller.bypass_record_count:
self.increment_manifest_file_record_count_from_id()
return GenomicSubProcessResult.SUCCESS
def ingest_single_aw2_row_for_member(self, member: GenomicSetMember) -> GenomicSubProcessResult:
# Open file and pull row based on member.biobankId
with self.controller.storage_provider.open(self.target_file, 'r') as aw1_file:
reader = csv.DictReader(aw1_file, delimiter=',')
row = [r for r in reader if r['Biobank ID'] == str(member.biobankId)][0]
# Alter field names to remove spaces and change to lower case
row = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
# Beging prep aw2 row
row = self.prep_aw2_row_attributes(row, member)
if row == GenomicSubProcessResult.ERROR:
return GenomicSubProcessResult.ERROR
# check whether metrics object exists for that member
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
if existing_metrics_obj is not None:
metric_id = existing_metrics_obj.id
else:
metric_id = None
upserted_obj = self.metrics_dao.upsert_gc_validation_metrics_from_dict(row, metric_id)
# Update GC Metrics for PDR
if upserted_obj:
bq_genomic_gc_validation_metrics_update(upserted_obj.id, project_id=self.controller.bq_project_id)
genomic_gc_validation_metrics_update(upserted_obj.id)
self.update_member_for_aw2(member)
# Update member in DB
self.member_dao.update(member)
# Update AW1 manifest feedback record count
if existing_metrics_obj is None and not self.controller.bypass_record_count:
# For feedback manifest loop
# Get the genomic_manifest_file
manifest_file = self.file_processed_dao.get(member.aw1FileProcessedId)
if manifest_file is not None:
self.feedback_dao.increment_feedback_count(manifest_file.genomicManifestFileId,
_project_id=self.controller.bq_project_id)
return GenomicSubProcessResult.SUCCESS
def increment_manifest_file_record_count_from_id(self):
"""
Increments the manifest record count by 1
"""
manifest_file = self.manifest_dao.get(self.file_obj.genomicManifestFileId)
manifest_file.recordCount += 1
with self.manifest_dao.session() as s:
s.merge(manifest_file)
bq_genomic_manifest_file_update(manifest_file.id, project_id=self.controller.bq_project_id)
genomic_manifest_file_update(manifest_file.id)
def prep_aw2_row_attributes(self, row: dict, member: GenomicSetMember):
"""
Set contamination, contamination category,
call rate, member_id, and file_id on AW2 row dictionary
:param member:
:param row:
:return: row dictionary or ERROR code
"""
row['member_id'] = member.id
row['file_id'] = self.file_obj.id
# Truncate call rate
try:
row['callrate'] = row['callrate'][:10]
except KeyError:
pass
# Convert blank alignedq30bases to none
try:
if row['alignedq30bases'] == '':
row['alignedq30bases'] = None
except KeyError:
pass
# Validate and clean contamination data
try:
row['contamination'] = float(row['contamination'])
# Percentages shouldn't be less than 0
if row['contamination'] < 0:
row['contamination'] = 0
except ValueError:
if row['processingstatus'].lower() != 'pass':
return row
_message = f'{self.job_id.name}: Contamination must be a number for sample_id: {row["sampleid"]}'
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.DATA_VALIDATION_FAILED.name,
message=_message,
biobank_id=member.biobankId,
sample_id=row['sampleid'],
)
return GenomicSubProcessResult.ERROR
# Calculate contamination_category
contamination_value = float(row['contamination'])
category = self.calculate_contamination_category(member.collectionTubeId,
contamination_value, member)
row['contamination_category'] = category
return row
def update_member_for_aw2(self, member: GenomicSetMember):
"""
Updates the aw2FileProcessedId and possibly the genomicWorkflowState
of a GenomicSetMember after AW2 data has been ingested
:param member:
"""
member.aw2FileProcessedId = self.file_obj.id
# Only update the state if it was AW1
if member.genomicWorkflowState == GenomicWorkflowState.AW1:
member.genomicWorkflowState = GenomicWorkflowState.AW2
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
def _ingest_gem_a2_manifest(self, file_data):
"""
Processes the GEM A2 manifest file data
Updates GenomicSetMember object with gem_pass field.
:return: Result Code
"""
try:
for row in file_data['rows']:
sample_id = row['sample_id']
member = self.member_dao.get_member_from_sample_id_with_state(sample_id,
GENOME_TYPE_ARRAY,
GenomicWorkflowState.A1)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.gemPass = row['success']
member.gemA2ManifestJobRunId = self.job_run_id
member.gemDateOfImport = parse(row['date_of_import'])
_signal = 'a2-gem-pass' if member.gemPass.lower() == 'y' else 'a2-gem-fail'
# update state and state modifed time only if changed
if member.genomicWorkflowState != GenomicStateHandler.get_new_state(
member.genomicWorkflowState, signal=_signal):
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_gem_metrics_manifest(self, file_data):
"""
Processes the GEM Metrics manifest file data
Updates GenomicSetMember object with metrics fields.
:return: Result Code
"""
try:
for row in file_data['rows']:
sample_id = row['sample_id']
member = self.member_dao.get_member_from_sample_id_with_state(sample_id,
GENOME_TYPE_ARRAY,
GenomicWorkflowState.GEM_RPT_READY)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.gemMetricsAncestryLoopResponse = row['ancestry_loop_response']
member.gemMetricsAvailableResults = row['available_results']
member.gemMetricsResultsReleasedAt = row['results_released_at']
member.colorMetricsJobRunID = self.job_run_id
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw4_manifest(self, file_data):
"""
Processes the AW4 manifest file data
:param file_data:
:return:
"""
try:
for row in file_data['rows']:
row_copy = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
sample_id = row_copy['sampleid']
genome_type = GENOME_TYPE_ARRAY \
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW else GENOME_TYPE_WGS
member = self.member_dao.get_member_from_aw3_sample(sample_id,
genome_type)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.aw4ManifestJobRunID = self.job_run_id
member.qcStatus = self._get_qc_status_from_value(row_copy['qcstatus'])
metrics = self.metrics_dao.get_metrics_by_member_id(member.id)
if metrics:
metrics.drcSexConcordance = row_copy['drcsexconcordance']
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW:
metrics.drcCallRate = row_copy['drccallrate']
elif self.job_id == GenomicJob.AW4_WGS_WORKFLOW:
metrics.drcContamination = row_copy['drccontamination']
metrics.drcMeanCoverage = row_copy['drcmeancoverage']
metrics.drcFpConcordance = row_copy['drcfpconcordance']
metrics_obj = self.metrics_dao.upsert(metrics)
bq_genomic_gc_validation_metrics_update(metrics_obj.id, project_id=self.controller.bq_project_id)
genomic_gc_validation_metrics_update(metrics_obj.id)
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _retrieve_data_from_path(self, path):
"""
Retrieves the last genomic data file from a bucket
:param path: The source file to ingest
:return: CSV data as a dictionary
"""
try:
filename = path.split('/')[1]
logging.info(
'Opening CSV file from queue {}: {}.'
.format(path.split('/')[1], filename)
)
if self.controller.storage_provider:
with self.controller.storage_provider.open(path, 'r') as csv_file:
return self._read_data_to_ingest(csv_file)
else:
with open_cloud_file(path) as csv_file:
return self._read_data_to_ingest(csv_file)
except FileNotFoundError:
logging.error(f"File path '{path}' not found")
return GenomicSubProcessResult.ERROR
@staticmethod
def _read_data_to_ingest(csv_file):
data_to_ingest = {'rows': []}
csv_reader = csv.DictReader(csv_file, delimiter=",")
data_to_ingest['fieldnames'] = csv_reader.fieldnames
for row in csv_reader:
for key in row:
if not key:
del row[key]
data_to_ingest['rows'].append(row)
return data_to_ingest
def _process_aw1_attribute_data(self, aw1_data, member):
"""
Checks a GenomicSetMember object for changes provided by AW1 data
And mutates the GenomicSetMember object if necessary
:param aw1_data: dict
:param member: GenomicSetMember
:return: (boolean, GenomicSetMember)
"""
# Check if the member needs updating
if self._test_aw1_data_for_member_updates(aw1_data, member):
member = self._set_member_attributes_from_aw1(aw1_data, member)
member = self._set_rdr_member_attributes_for_aw1(aw1_data, member)
return True, member
return False, member
def _test_aw1_data_for_member_updates(self, aw1_data, member):
"""
Checks each attribute provided by Biobank
for changes to GenomicSetMember Object
:param aw1_data: dict
:param member: GenomicSetMember
:return: boolean (true if member requires updating)
"""
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
member_needs_updating = False
# Iterate each value and test whether the strings for each field correspond
for key in gc_manifest_column_mappings.keys():
if str(member.__getattribute__(key)) != str(aw1_data.get(gc_manifest_column_mappings[key])):
member_needs_updating = True
return member_needs_updating
def _set_member_attributes_from_aw1(self, aw1_data, member):
"""
Mutates the GenomicSetMember attributes provided by the Biobank
:param aw1_data: dict
:param member: GenomicSetMember
:return: GenomicSetMember
"""
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
for key in gc_manifest_column_mappings.keys():
member.__setattr__(key, aw1_data.get(gc_manifest_column_mappings[key]))
return member
def _set_rdr_member_attributes_for_aw1(self, aw1_data, member):
"""
Mutates the GenomicSetMember RDR attributes not provided by the Biobank
:param aw1_data: dict
:param member: GenomicSetMember
:return: GenomicSetMember
"""
# Set job run and file processed IDs
member.reconcileGCManifestJobRunId = self.job_run_id
# Don't overwrite aw1_file_processed_id when ingesting an AW1F
if self.job_id == GenomicJob.AW1_MANIFEST:
member.aw1FileProcessedId = self.file_obj.id
# Set the GC site ID (sourced from file-name)
member.gcSiteId = aw1_data['site_id']
# Only update the state if it was AW0 or AW1 (if in failure manifest workflow)
# We do not want to regress a state for reingested data
state_to_update = GenomicWorkflowState.AW0
if self.controller.job_id == GenomicJob.AW1F_MANIFEST:
state_to_update = GenomicWorkflowState.AW1
if member.genomicWorkflowState == state_to_update:
_signal = "aw1-reconciled"
# Set the signal for a failed sample
if aw1_data['failuremode'] is not None and aw1_data['failuremode'] != '':
_signal = 'aw1-failed'
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
return member
def _set_raw_awn_attributes(self, awn_data, awn_row_obj, columns):
"""
Loads GenomicAW1Raw and GenomicAW2Raw attributes from awn_data
:param awn_data: dict
:param awn_row_obj: GenomicAW1Raw/GenomicAW2Raw object
:param mapping_function: function that returns column mappings
:return: GenomicAW1Raw or GenomicAW2Raw
"""
awn_row_obj.file_path = self.target_file
awn_row_obj.created = clock.CLOCK.now()
awn_row_obj.modified = clock.CLOCK.now()
for key in columns.keys():
awn_row_obj.__setattr__(key, awn_data.get(columns[key]))
return awn_row_obj
def _process_gc_metrics_data_for_insert(self, data_to_ingest):
""" Since input files vary in column names,
this standardizes the field-names before passing to the bulk inserter
:param data_to_ingest: stream of data in dict format
:return result code
"""
# iterate over each row from CSV and insert into gc metrics table
for row in data_to_ingest['rows']:
# change all key names to lower
row_copy = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row],
row.values()))
genome_type = self.file_validator.genome_type
member = self.member_dao.get_member_from_sample_id(
int(row_copy['sampleid']),
genome_type
)
if member is not None:
row_copy = self.prep_aw2_row_attributes(row_copy, member)
if row_copy == GenomicSubProcessResult.ERROR:
continue
# check whether metrics object exists for that member
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
if existing_metrics_obj is not None:
if self.controller.skip_updates:
# when running tool, updates can be skipped
continue
else:
metric_id = existing_metrics_obj.id
else:
metric_id = None
upserted_obj = self.metrics_dao.upsert_gc_validation_metrics_from_dict(row_copy, metric_id)
# Update GC Metrics for PDR
if upserted_obj:
bq_genomic_gc_validation_metrics_update(upserted_obj.id, project_id=self.controller.bq_project_id)
genomic_gc_validation_metrics_update(upserted_obj.id)
self.update_member_for_aw2(member)
# For feedback manifest loop
# Get the genomic_manifest_file
manifest_file = self.file_processed_dao.get(member.aw1FileProcessedId)
if manifest_file is not None and existing_metrics_obj is None:
self.feedback_dao.increment_feedback_count(manifest_file.genomicManifestFileId,
_project_id=self.controller.bq_project_id)
else:
bid = row_copy['biobankid']
if bid[0] in [get_biobank_id_prefix(), 'T']:
bid = bid[1:]
# Couldn't find genomic set member based on either biobank ID or sample ID
_message = f"{self.job_id.name}: Cannot find genomic set member for bid, sample_id: "\
f"{row_copy['biobankid']}, {row_copy['sampleid']}"
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.UNABLE_TO_FIND_MEMBER.name,
message=_message,
biobank_id=bid,
sample_id=row_copy['sampleid'],
)
return GenomicSubProcessResult.SUCCESS
def _ingest_cvl_w2_manifest(self, file_data):
"""
Processes the CVL W2 manifest file data
:return: Result Code
"""
try:
for row in file_data['rows']:
# change all key names to lower
row_copy = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row],
row.values()))
biobank_id = row_copy['biobankid']
member = self.member_dao.get_member_from_biobank_id(biobank_id, GENOME_TYPE_WGS)
if member is None:
logging.warning(f'Invalid Biobank ID: {biobank_id}')
continue
member.genomeType = row_copy['testname']
member.cvlW2ManifestJobRunID = self.job_run_id
# update state and state modifed time only if changed
if member.genomicWorkflowState != GenomicStateHandler.get_new_state(
member.genomicWorkflowState, signal='w2-ingestion-success'):
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal='w2-ingestion-success')
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw5_manifest(self, file_data):
try:
for row in file_data['rows']:
row_copy = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
biobank_id = row_copy['biobankid']
biobank_id = biobank_id[1:] if biobank_id[0].isalpha() else biobank_id
sample_id = row_copy['sampleid']
member = self.member_dao.get_member_from_biobank_id_and_sample_id(biobank_id, sample_id,
self.file_validator.genome_type)
if not member:
logging.warning(f'Can not find genomic member record for biobank_id: '
f'{biobank_id} and sample_id: {sample_id}, skipping...')
continue
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
if existing_metrics_obj is not None:
metric_id = existing_metrics_obj.id
else:
logging.warning(f'Can not find metrics record for member id: '
f'{member.id}, skipping...')
continue
updated_obj = self.metrics_dao.update_gc_validation_metrics_deleted_flags_from_dict(row_copy,
metric_id)
# Update GC Metrics for PDR
if updated_obj:
bq_genomic_gc_validation_metrics_update(updated_obj.id, project_id=self.controller.bq_project_id)
genomic_gc_validation_metrics_update(updated_obj.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw1c_manifest(self, file_data):
"""
Processes the CVL AW1C manifest file data
:return: Result Code
"""
try:
for row in file_data['rows']:
row_copy = dict(zip([key.lower().replace(' ', '').replace('_', '')
for key in row], row.values()))
collection_tube_id = row_copy['collectiontubeid']
member = self.member_dao.get_member_from_collection_tube(collection_tube_id, GENOME_TYPE_WGS)
if member is None:
# Currently ignoring invalid cases
logging.warning(f'Invalid collection tube ID: {collection_tube_id}')
continue
# Update the AW1C job run ID and genome_type
member.cvlAW1CManifestJobRunID = self.job_run_id
member.genomeType = row_copy['testname']
# Handle genomic state
_signal = "aw1c-reconciled"
if row_copy['failuremode'] not in (None, ''):
member.gcManifestFailureMode = row_copy['failuremode']
member.gcManifestFailureDescription = row_copy['failuremodedesc']
_signal = 'aw1c-failed'
# update state and state modifed time only if changed
if member.genomicWorkflowState != GenomicStateHandler.get_new_state(
member.genomicWorkflowState, signal=_signal):
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
# Update member for PDR
bq_genomic_set_member_update(member.id, project_id=self.controller.bq_project_id)
genomic_set_member_update(member.id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _get_site_from_aw1(self):
"""
Returns the Genomic Center's site ID from the AW1 filename
:return: GC site ID string
"""
return self.file_obj.fileName.split('/')[-1].split("_")[0].lower()
def _validate_collection_tube_id(self, collection_tube_id, bid):
"""
Returns true if biobank_ID is associated to biobank_stored_sample_id
(collection_tube_id)
:param collection_tube_id:
:param bid:
:return: boolean
"""
sample = self.sample_dao.get(collection_tube_id)
if sample:
return int(sample.biobankId) == int(bid)
return False
@staticmethod
def _get_qc_status_from_value(aw4_value):
"""
Returns the GenomicQcStatus enum value for
:param aw4_value: string from AW4 file (PASS/FAIL)
:return: GenomicQcStatus
"""
if aw4_value.strip().lower() == 'pass':
return GenomicQcStatus.PASS
elif aw4_value.strip().lower() == 'fail':
return GenomicQcStatus.FAIL
else:
logging.warning(f'Value from AW4 "{aw4_value}" is not PASS/FAIL.')
return GenomicQcStatus.UNSET
def create_new_member_from_aw1_control_sample(self, aw1_data: dict) -> GenomicSetMember:
"""
Creates a new control sample GenomicSetMember in RDR based on AW1 data
These will look like regular GenomicSetMember samples
:param aw1_data: dict from aw1 row
:return: GenomicSetMember
"""
# Writing new genomic_set_member based on AW1 data
max_set_id = self.member_dao.get_collection_tube_max_set_id()[0]
# Insert new member with biobank_id and collection tube ID from AW1
new_member_obj = GenomicSetMember(
genomicSetId=max_set_id,
participantId=0,
biobankId=aw1_data['biobankid'],
collectionTubeId=aw1_data['collectiontubeid'],
validationStatus=GenomicSetMemberStatus.VALID,
genomeType=aw1_data['testname'],
genomicWorkflowState=GenomicWorkflowState.AW1
)
# Set member attribures from AW1
new_member_obj = self._set_member_attributes_from_aw1(aw1_data, new_member_obj)
new_member_obj = self._set_rdr_member_attributes_for_aw1(aw1_data, new_member_obj)
return self.member_dao.insert(new_member_obj)
@staticmethod
def _participant_has_potentially_clean_samples(session, biobank_id):
"""Check for any stored sample for the participant that is not contaminated
and is a 1ED04, 1ED10, or 1SAL2 test"""
query = session.query(BiobankStoredSample).filter(
BiobankStoredSample.biobankId == biobank_id,
BiobankStoredSample.status < SampleStatus.SAMPLE_NOT_RECEIVED
).outerjoin(GenomicSampleContamination).filter(
GenomicSampleContamination.id.is_(None),
BiobankStoredSample.test.in_(['1ED04', '1ED10', '1SAL2'])
)
exists_query = session.query(query.exists())
return exists_query.scalar()
def _record_sample_as_contaminated(self, session, sample_id):
session.add(GenomicSampleContamination(
sampleId=sample_id,
failedInJob=self.job_id
))
def calculate_contamination_category(self, sample_id, raw_contamination, member: GenomicSetMember):
"""
Takes contamination value from AW2 and calculates GenomicContaminationCategory
:param sample_id:
:param raw_contamination:
:param member:
:return: GenomicContaminationCategory
"""
ps_dao = ParticipantSummaryDao()
ps = ps_dao.get(member.participantId)
contamination_category = GenomicContaminationCategory.UNSET
# No Extract if contamination <1%
if raw_contamination < 0.01:
contamination_category = GenomicContaminationCategory.NO_EXTRACT
# Only extract WGS if contamination between 1 and 3 % inclusive AND ROR
elif (0.01 <= raw_contamination <= 0.03) and ps.consentForGenomicsROR == QuestionnaireStatus.SUBMITTED:
contamination_category = GenomicContaminationCategory.EXTRACT_WGS
# No Extract if contamination between 1 and 3 % inclusive and GROR is not Yes
elif (0.01 <= raw_contamination <= 0.03) and ps.consentForGenomicsROR != QuestionnaireStatus.SUBMITTED:
contamination_category = GenomicContaminationCategory.NO_EXTRACT
# Extract Both if contamination > 3%
elif raw_contamination > 0.03:
contamination_category = GenomicContaminationCategory.EXTRACT_BOTH
with ps_dao.session() as session:
if raw_contamination >= 0.01:
# Record in the contamination table, regardless of GROR consent
self._record_sample_as_contaminated(session, sample_id)
if contamination_category != GenomicContaminationCategory.NO_EXTRACT and \
not self._participant_has_potentially_clean_samples(session, member.biobankId):
contamination_category = GenomicContaminationCategory.TERMINAL_NO_EXTRACT
return contamination_category
class GenomicFileValidator:
"""
This class validates the Genomic Centers files
"""
GENOME_TYPE_MAPPINGS = {
'gen': GENOME_TYPE_ARRAY,
'seq': GENOME_TYPE_WGS,
}
def __init__(self, filename=None, data=None, schema=None, job_id=None, controller=None):
self.filename = filename
self.data_to_validate = data
self.valid_schema = schema
self.job_id = job_id
self.genome_type = None
self.controller = controller
self.GC_METRICS_SCHEMAS = {
'seq': (
"biobankid",
"sampleid",
"biobankidsampleid",
"limsid",
"meancoverage",
"genomecoverage",
"aouhdrcoverage",
"contamination",
"sexconcordance",
"sexploidy",
"alignedq30bases",
"arrayconcordance",
"processingstatus",
"notes",
),
'gen': (
"biobankid",
"sampleid",
"biobankidsampleid",
"limsid",
"chipwellbarcode",
"callrate",
"sexconcordance",
"contamination",
"processingstatus",
"notes",
),
}
self.VALID_GENOME_CENTERS = ('uw', 'bam', 'bcm', 'bi', 'jh', 'rdr')
self.VALID_CVL_FACILITIES = ('rdr', 'color', 'uw', 'baylor')
self.GC_MANIFEST_SCHEMA = (
"packageid",
"biobankidsampleid",
"boxstorageunitid",
"boxid/plateid",
"wellposition",
"sampleid",
"parentsampleid",
"collectiontubeid",
"matrixid",
"collectiondate",
"biobankid",
"sexatbirth",
"age",
"nystate(y/n)",
"sampletype",
"treatments",
"quantity(ul)",
"totalconcentration(ng/ul)",
"totaldna(ng)",
"visitdescription",
"samplesource",
"study",
"trackingnumber",
"contact",
"email",
"studypi",
"testname",
"failuremode",
"failuremodedesc"
)
self.GEM_A2_SCHEMA = (
"biobankid",
"sampleid",
"success",
"dateofimport",
)
self.GEM_METRICS_SCHEMA = (
"biobankid",
"sampleid",
"ancestryloopresponse",
"availableresults",
"resultsreleasedat",
)
self.CVL_W2_SCHEMA = (
"genomicsetname",
"biobankid",
"sexatbirth",
"nyflag",
"siteid",
"secondaryvalidation",
"datesubmitted",
"testname",
)
self.AW4_ARRAY_SCHEMA = (
"biobankid",
"sampleid",
"sexatbirth",
"siteid",
"redidatpath",
"redidatmd5path",
"greenidatpath",
"greenidatmd5path",
"vcfpath",
"vcfindexpath",
"researchid",
"qcstatus",
"drcsexconcordance",
"drccallrate",
)
self.AW4_WGS_SCHEMA = (
"biobankid",
"sampleid",
"sexatbirth",
"siteid",
"vcfhfpath",
"vcfhfmd5path",
"vcfhfindexpath",
"vcfrawpath",
"vcfrawmd5path",
"vcfrawindexpath",
"crampath",
"crammd5path",
"craipath",
"researchid",
"qcstatus",
"drcsexconcordance",
"drccontamination",
"drcmeancoverage",
"drcfpconcordance",
)
self.AW5_WGS_SCHEMA = {
"biobankid",
"sampleid",
"biobankidsampleid",
"sexatbirth",
"siteid",
"vcfhf",
"vcfhfindex",
"vcfhfmd5",
"vcfraw",
"vcfrawindex",
"vcfrawmd5",
"cram",
"crammd5",
"crai",
"gvcf",
"gvcfmd5",
}
self.AW5_ARRAY_SCHEMA = {
"biobankid",
"sampleid",
"biobankidsampleid",
"sexatbirth",
"siteid",
"redidat",
"redidatmd5",
"greenidat",
"greenidatmd5",
"vcf",
"vcfindex",
"vcfmd5",
}
def validate_ingestion_file(self, *, filename, data_to_validate):
"""
Procedure to validate an ingestion file
:param filename:
:param data_to_validate:
:return: result code
"""
self.filename = filename
file_processed = self.controller.\
file_processed_dao.get_record_from_filename(filename)
if not self.validate_filename(filename):
return GenomicSubProcessResult.INVALID_FILE_NAME
# if not data_to_validate
struct_valid_result, missing_fields, expected = self._check_file_structure_valid(
data_to_validate['fieldnames'])
if struct_valid_result == GenomicSubProcessResult.INVALID_FILE_NAME:
return GenomicSubProcessResult.INVALID_FILE_NAME
if not struct_valid_result:
slack = True
invalid_message = f"{self.job_id.name}: File structure of {filename} is not valid."
if missing_fields:
invalid_message += f' Missing fields: {missing_fields}'
if len(missing_fields) == len(expected):
slack = False
self.controller.create_incident(
source_job_run_id=self.controller.job_run.id,
source_file_processed_id=file_processed.id,
code=GenomicIncidentCode.FILE_VALIDATION_FAILED_STRUCTURE.name,
message=invalid_message,
slack=slack
)
return GenomicSubProcessResult.INVALID_FILE_STRUCTURE
return GenomicSubProcessResult.SUCCESS
def validate_filename(self, filename):
"""
Applies a naming rule to an arbitrary filename
Naming rules are defined as local functions and
Mapped to a Genomic Job ID in naming_rules dict.
:param filename: passed to each name rule as 'fn'
:return: boolean
"""
if self.job_id in [GenomicJob.BB_RETURN_MANIFEST]:
filename_components = [x.lower() for x in filename.split('/')[-1].split("-")]
else:
filename_components = [x.lower() for x in filename.split('/')[-1].split("_")]
# Naming Rule Definitions
def bb_result_name_rule():
"""Biobank to DRC Result name rule"""
return (
filename_components[0] == 'genomic' and
filename_components[1] == 'manifest' and
filename_components[2] in ('aou_array', 'aou_wgs') and
filename.lower().endswith('csv')
)
def gc_validation_metrics_name_rule():
"""GC metrics file name rule"""
return (
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in self.GC_METRICS_SCHEMAS.keys() and
filename.lower().endswith('csv')
)
def bb_to_gc_manifest_name_rule():
"""Biobank to GCs manifest name rule"""
return (
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in ('seq', 'gen') and
filename.lower().endswith('csv')
)
def aw1f_manifest_name_rule():
"""Biobank to GCs Failure (AW1F) manifest name rule"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in ('seq', 'gen') and
re.search(r"pkg-[0-9]{4}-[0-9]{5,}$",
filename_components[3]) is not None and
filename_components[4] == 'failure.csv' and
filename.lower().endswith('csv')
)
def cvl_w2_manifest_name_rule():
"""
CVL W2 (secondary validation) manifest name rule
UW_AoU_CVL_RequestValidation_Date.csv
"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'requestvalidation' and
filename.lower().endswith('csv')
)
def gem_a2_manifest_name_rule():
"""GEM A2 manifest name rule: i.e. AoU_GEM_A2_manifest_2020-07-11-00-00-00.csv"""
return (
len(filename_components) == 5 and
filename_components[0] == 'aou' and
filename_components[1] == 'gem' and
filename_components[2] == 'a2' and
filename.lower().endswith('csv')
)
def cvl_aw1c_manifest_name_rule():
"""AW1C Biobank to CVLs manifest name rule"""
return (
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename.lower().endswith('csv')
)
def cvl_aw1cf_manifest_name_rule():
"""AW1F Biobank to CVLs manifest name rule"""
return (
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[4] == 'failure.csv' and
filename.lower().endswith('csv')
)
def gem_metrics_name_rule():
"""GEM Metrics name rule: i.e. AoU_GEM_metrics_aggregate_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'gem' and
filename_components[2] == 'metrics' and
filename.lower().endswith('csv')
)
def aw4_arr_manifest_name_rule():
"""DRC Broad AW4 Array manifest name rule: i.e. AoU_DRCB_GEN_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'drcb' and
filename_components[2] == 'gen' and
filename.lower().endswith('csv')
)
def aw4_wgs_manifest_name_rule():
"""DRC Broad AW4 WGS manifest name rule: i.e. AoU_DRCB_SEQ_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'drcb' and
filename_components[2] == 'seq' and
filename.lower().endswith('csv')
)
def aw5_wgs_manifest_name_rule():
# don't have name convention right now, if have in the future, add here
return filename.lower().endswith('csv')
def aw5_array_manifest_name_rule():
# don't have name convention right now, if have in the future, add here
return filename.lower().endswith('csv')
name_rules = {
GenomicJob.BB_RETURN_MANIFEST: bb_result_name_rule,
GenomicJob.METRICS_INGESTION: gc_validation_metrics_name_rule,
GenomicJob.AW1_MANIFEST: bb_to_gc_manifest_name_rule,
GenomicJob.AW1F_MANIFEST: aw1f_manifest_name_rule,
GenomicJob.GEM_A2_MANIFEST: gem_a2_manifest_name_rule,
GenomicJob.W2_INGEST: cvl_w2_manifest_name_rule,
GenomicJob.AW1C_INGEST: cvl_aw1c_manifest_name_rule,
GenomicJob.AW1CF_INGEST: cvl_aw1cf_manifest_name_rule,
GenomicJob.AW4_ARRAY_WORKFLOW: aw4_arr_manifest_name_rule,
GenomicJob.AW4_WGS_WORKFLOW: aw4_wgs_manifest_name_rule,
GenomicJob.GEM_METRICS_INGEST: gem_metrics_name_rule,
GenomicJob.AW5_WGS_MANIFEST: aw5_wgs_manifest_name_rule,
GenomicJob.AW5_ARRAY_MANIFEST: aw5_array_manifest_name_rule,
}
is_valid_filename = name_rules[self.job_id]()
if not is_valid_filename:
invalid_message = f"{self.job_id.name}: File name {filename.split('/')[1]} has failed validation."
self.controller.create_incident(
save_incident=False,
slack=True,
message=invalid_message,
)
return is_valid_filename
def _check_file_structure_valid(self, fields):
"""
Validates the structure of the CSV against a defined set of columns.
:param fields: the data from the CSV file; dictionary per row.
:return: boolean; True if valid structure, False if not.
"""
missing_fields = None
if not self.valid_schema:
self.valid_schema = self._set_schema(self.filename)
if self.valid_schema == GenomicSubProcessResult.INVALID_FILE_NAME:
return GenomicSubProcessResult.INVALID_FILE_NAME
cases = tuple([field.lower().replace('\ufeff', '').replace(' ', '').replace('_', '')
for field in fields])
all_file_columns_valid = all([c in self.valid_schema for c in cases])
all_expected_columns_in_file = all([c in cases for c in self.valid_schema])
if not all_expected_columns_in_file:
missing_fields = list(set(self.valid_schema) - set(cases))
return all([all_file_columns_valid, all_expected_columns_in_file]), missing_fields, self.valid_schema
def _set_schema(self, filename):
"""Since the schemas are different for WGS and Array metrics files,
this parses the filename to return which schema
to use for validation of the CSV columns
:param filename: filename of the csv to validate in string format.
:return: schema_to_validate,
(tuple from the CSV_SCHEMA or result code of INVALID_FILE_NAME).
"""
try:
if self.job_id == GenomicJob.METRICS_INGESTION:
file_type = filename.lower().split("_")[2]
self.genome_type = self.GENOME_TYPE_MAPPINGS[file_type]
return self.GC_METRICS_SCHEMAS[file_type]
if self.job_id == GenomicJob.AW1_MANIFEST:
return self.GC_MANIFEST_SCHEMA
if self.job_id == GenomicJob.GEM_A2_MANIFEST:
return self.GEM_A2_SCHEMA
if self.job_id == GenomicJob.AW1F_MANIFEST:
return self.GC_MANIFEST_SCHEMA # AW1F and AW1 use same schema
if self.job_id == GenomicJob.GEM_METRICS_INGEST:
return self.GEM_METRICS_SCHEMA
if self.job_id == GenomicJob.W2_INGEST:
return self.CVL_W2_SCHEMA
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW:
return self.AW4_ARRAY_SCHEMA
if self.job_id == GenomicJob.AW4_WGS_WORKFLOW:
return self.AW4_WGS_SCHEMA
if self.job_id in (GenomicJob.AW1C_INGEST, GenomicJob.AW1CF_INGEST):
return self.GC_MANIFEST_SCHEMA
if self.job_id == GenomicJob.AW5_WGS_MANIFEST:
self.genome_type = self.GENOME_TYPE_MAPPINGS['seq']
return self.AW5_WGS_SCHEMA
if self.job_id == GenomicJob.AW5_ARRAY_MANIFEST:
self.genome_type = self.GENOME_TYPE_MAPPINGS['gen']
return self.AW5_ARRAY_SCHEMA
except (IndexError, KeyError):
return GenomicSubProcessResult.INVALID_FILE_NAME
class GenomicFileMover:
"""
This utility class moves files in the bucket by copying into an archive folder
and deleting the old instance.
"""
def __init__(self, archive_folder=None):
self.archive_folder = archive_folder
def archive_file(self, file_obj=None, file_path=None):
"""
This method moves a file to an archive
by copy and delete
:param file_obj: a genomic_file_processed object to move
:return:
"""
source_path = file_obj.filePath if file_obj else file_path
file_name = source_path.split('/')[-1]
archive_path = source_path.replace(file_name,
f"{self.archive_folder}/"
f"{file_name}")
try:
copy_cloud_file(source_path, archive_path)
delete_cloud_file(source_path)
except FileNotFoundError:
logging.error(f"No file found at '{file_obj.filePath}'")
class GenomicReconciler:
""" This component handles reconciliation between genomic datasets """
def __init__(self, run_id, job_id, archive_folder=None, file_mover=None,
bucket_name=None, storage_provider=None, controller=None):
self.run_id = run_id
self.job_id = job_id
self.bucket_name = bucket_name
self.archive_folder = archive_folder
self.cvl_file_name = None
self.file_list = None
# Dao components
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.file_dao = GenomicFileProcessedDao()
# Other components
self.file_mover = file_mover
self.storage_provider = storage_provider
self.controller = controller
# Data files and names will be different
# file types are defined as
# (field_for_received_flag, filename suffix, field_for_gcs_path)
self.genotyping_file_types = (('idatRedReceived', "_red.idat", "idatRedPath"),
('idatGreenReceived', "_grn.idat", "idatGreenPath"),
('idatRedMd5Received', "_red.idat.md5sum", "idatRedMd5Path"),
('idatGreenMd5Received', "_grn.idat.md5sum", "idatGreenMd5Path"),
('vcfReceived', ".vcf.gz", "vcfPath"),
('vcfTbiReceived', ".vcf.gz.tbi", "vcfTbiPath"),
('vcfMd5Received', ".vcf.gz.md5sum", "vcfMd5Path"))
self.sequencing_file_types = (("hfVcfReceived", ".hard-filtered.vcf.gz", "hfVcfPath"),
("hfVcfTbiReceived", ".hard-filtered.vcf.gz.tbi", "hfVcfTbiPath"),
("hfVcfMd5Received", ".hard-filtered.vcf.gz.md5sum", "hfVcfMd5Path"),
("rawVcfReceived", ".vcf.gz", "rawVcfPath"),
("rawVcfTbiReceived", ".vcf.gz.tbi", "rawVcfTbiPath"),
("rawVcfMd5Received", ".vcf.gz.md5sum", "rawVcfMd5Path"),
("cramReceived", ".cram", "cramPath"),
("cramMd5Received", ".cram.md5sum", "cramMd5Path"),
("craiReceived", ".cram.crai", "craiPath"),
("gvcfReceived", ".hard-filtered.gvcf.gz", "gvcfPath"),
("gvcfMd5Received", ".hard-filtered.gvcf.gz.md5sum", "gvcfMd5Path"))
def reconcile_metrics_to_array_data(self, _gc_site_id):
""" The main method for the AW2 manifest vs. array data reconciliation
:param: _gc_site_id: "jh", "uw", "bi", etc.
:return: result code
"""
metrics = self.metrics_dao.get_with_missing_array_files(_gc_site_id)
total_missing_data = []
# Get list of files in GC data bucket
if self.storage_provider:
# Use the storage provider if it was set by tool
files = self.storage_provider.list(self.bucket_name, prefix=None)
else:
files = list_blobs('/' + self.bucket_name)
self.file_list = [f.name for f in files]
# Iterate over metrics, searching the bucket for filenames where *_received = 0
for metric in metrics:
member = self.member_dao.get(metric.genomicSetMemberId)
missing_data_files = []
metric_touched = False
for file_type in self.genotyping_file_types:
if not getattr(metric, file_type[0]):
filename = f"{metric.chipwellbarcode}{file_type[1]}"
file_exists = self._get_full_filename(filename)
if file_exists != 0:
setattr(metric, file_type[0], 1)
setattr(metric, file_type[2], f'gs://{self.bucket_name}/{file_exists}')
metric_touched = True
if not file_exists:
missing_data_files.append(filename)
if metric_touched:
# Only upsert the metric if changed
inserted_metrics_obj = self.metrics_dao.upsert(metric)
# Update GC Metrics for PDR
if inserted_metrics_obj:
bq_genomic_gc_validation_metrics_update(inserted_metrics_obj.id,
project_id=self.controller.bq_project_id)
genomic_gc_validation_metrics_update(inserted_metrics_obj.id)
next_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState, signal='gem-ready')
# Update Job Run ID on member
self.member_dao.update_member_job_run_id(member, self.run_id, 'reconcileMetricsSequencingJobRunId',
project_id=self.controller.bq_project_id)
else:
next_state = None
# Update state for missing files
if missing_data_files:
next_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState, signal='missing')
incident = self.controller.incident_dao.get_by_source_file_id(metric.genomicFileProcessedId)
if not incident or (incident and not any([i for i in incident if i.code == 'MISSING_FILES'])):
total_missing_data.append((metric.genomicFileProcessedId,
missing_data_files,
member
))
if next_state is not None and next_state != member.genomicWorkflowState:
self.member_dao.update_member_state(member, next_state, project_id=self.controller.bq_project_id)
# Make a roc ticket for missing data files
if total_missing_data:
description = f"{self.job_id.name}: The following AW2 manifests are missing data files."
description += f"\nGenomic Job Run ID: {self.run_id}"
for f in total_missing_data:
file = self.file_dao.get(f[0])
description += self._compile_missing_data_alert(
file_name=file.fileName,
missing_data=f[1]
)
self.controller.create_incident(
source_job_run_id=self.run_id,
source_file_processed_id=file.id,
code=GenomicIncidentCode.MISSING_FILES.name,
message=description,
genomic_set_member_id=f[2].id,
biobank_id=f[2].biobankId,
sample_id=f[2].sampleId if f[2].sampleId else "",
collection_tube_id=f[2].collectionTubeId if f[2].collectionTubeId else "",
slack=True
)
return GenomicSubProcessResult.SUCCESS
def reconcile_metrics_to_wgs_data(self, _gc_site_id):
""" The main method for the AW2 manifest vs. sequencing data reconciliation
:param: _gc_site_id: "jh", "uw", "bi", etc.
:return: result code
"""
metrics = self.metrics_dao.get_with_missing_wsg_files(_gc_site_id)
# Get list of files in GC data bucket
if self.storage_provider:
# Use the storage provider if it was set by tool
files = self.storage_provider.list(self.bucket_name, prefix=None)
else:
files = list_blobs('/' + self.bucket_name)
self.file_list = [f.name for f in files]
total_missing_data = []
metric_touched = False
# Iterate over metrics, searching the bucket for filenames
for metric in metrics:
member = self.member_dao.get(metric.GenomicGCValidationMetrics.genomicSetMemberId)
gc_prefix = _gc_site_id.upper()
missing_data_files = []
for file_type in self.sequencing_file_types:
if not getattr(metric.GenomicGCValidationMetrics, file_type[0]):
# Default filename in case the file is missing (used in alert)
default_filename = f"{gc_prefix}_{metric.biobankId}_{metric.sampleId}_" \
f"{metric.GenomicGCValidationMetrics.limsId}_1{file_type[1]}"
file_type_expression = file_type[1].replace('.', '\.')
# Naming rule for WGS files:
filename_exp = rf"{gc_prefix}_([A-Z]?){metric.biobankId}_{metric.sampleId}" \
rf"_{metric.GenomicGCValidationMetrics.limsId}_(\w*)(\d+){file_type_expression}$"
file_exists = self._get_full_filename_with_expression(filename_exp)
if file_exists != 0:
setattr(metric.GenomicGCValidationMetrics, file_type[0], 1)
setattr(metric.GenomicGCValidationMetrics, file_type[2],
f'gs://{self.bucket_name}/{file_exists}')
metric_touched = True
if not file_exists:
missing_data_files.append(default_filename)
if metric_touched:
# Only upsert the metric if changed
inserted_metrics_obj = self.metrics_dao.upsert(metric.GenomicGCValidationMetrics)
# Update GC Metrics for PDR
if inserted_metrics_obj:
bq_genomic_gc_validation_metrics_update(inserted_metrics_obj.id,
project_id=self.controller.bq_project_id)
genomic_gc_validation_metrics_update(inserted_metrics_obj.id)
next_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState, signal='cvl-ready')
self.member_dao.update_member_job_run_id(member, self.run_id, 'reconcileMetricsSequencingJobRunId',
project_id=self.controller.bq_project_id)
else:
next_state = None
# Handle for missing data files
if missing_data_files:
next_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState, signal='missing')
incident = self.controller.incident_dao.get_by_source_file_id(
metric.GenomicGCValidationMetrics.genomicFileProcessedId)
if not incident or (incident and not any([i for i in incident if i.code == 'MISSING_FILES'])):
total_missing_data.append((metric.GenomicGCValidationMetrics.genomicFileProcessedId,
missing_data_files,
member
))
# Update Member
if next_state is not None and next_state != member.genomicWorkflowState:
self.member_dao.update_member_state(member, next_state, project_id=self.controller.bq_project_id)
# Make a roc ticket for missing data files
if total_missing_data:
description = f"{self.job_id.name}: The following AW2 manifests are missing data files."
description += f"\nGenomic Job Run ID: {self.run_id}"
for f in total_missing_data:
file = self.file_dao.get(f[0])
description += self._compile_missing_data_alert(
file_name=file.fileName,
missing_data=f[1]
)
self.controller.create_incident(
source_job_run_id=self.run_id,
source_file_processed_id=file.id,
code=GenomicIncidentCode.MISSING_FILES.name,
message=description,
genomic_set_member_id=f[2].id,
biobank_id=f[2].biobankId,
sample_id=f[2].sampleId if f[2].sampleId else "",
collection_tube_id=f[2].collectionTubeId if f[2].collectionTubeId else "",
slack=True
)
return GenomicSubProcessResult.SUCCESS
@staticmethod
def _compile_missing_data_alert(file_name, missing_data):
"""
Compiles the description to include in a GenomicAlert
:param file_name:
:param missing_data: list of files
:return: summary, description
"""
file_list = '\n'.join([md for md in missing_data])
description = f"\nManifest File: {file_name}"
description += "\nMissing Genotype Data:"
description += f"\n{file_list}"
return description
def generate_cvl_reconciliation_report(self):
"""
The main method for the CVL Reconciliation report,
ouptuts report file to the cvl subfolder and updates
genomic_set_member
:return: result code
"""
members = self.member_dao.get_members_for_cvl_reconciliation()
if members:
cvl_subfolder = getSetting(GENOMIC_CVL_RECONCILIATION_REPORT_SUBFOLDER)
self.cvl_file_name = f"{cvl_subfolder}/cvl_report_{self.run_id}.csv"
self._write_cvl_report_to_file(members)
results = []
for member in members:
results.append(self.member_dao.update_member_job_run_id(
member, job_run_id=self.run_id,
field='reconcileCvlJobRunId')
)
return GenomicSubProcessResult.SUCCESS \
if GenomicSubProcessResult.ERROR not in results \
else GenomicSubProcessResult.ERROR
return GenomicSubProcessResult.NO_FILES
def reconcile_gem_report_states(self, _last_run_time=None):
"""
Scans GEM report states for changes
:param _last_run_time: the time when the current job last ran
"""
# Get unconsented members to update (consent > last run time of job_id)
unconsented_gror_members = self.member_dao.get_unconsented_gror_since_date(_last_run_time)
# update each member with the new state and withdrawal time
for member in unconsented_gror_members:
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='unconsented')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_state(member, new_state)
# Handle withdrawal (gror/primary consent) for reportConsentRemovalDate
removal_date = self.member_dao.get_gem_consent_removal_date(member)
self.member_dao.update_report_consent_removal_date(member, removal_date)
# Get reconsented members to update (consent > last run time of job_id)
reconsented_gror_members = self.member_dao.get_reconsented_gror_since_date(_last_run_time)
# update each member with the new state
for member in reconsented_gror_members:
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='reconsented')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_state(member, new_state)
self.member_dao.update_report_consent_removal_date(member, None)
@staticmethod
def _check_genotyping_file_exists(bucket_name, filename):
files = list_blobs('/' + bucket_name)
filenames = [f.name for f in files if f.name.endswith(filename)]
return 1 if len(filenames) > 0 else 0
def _get_full_filename(self, filename):
""" Searches file_list for names ending in filename
:param filename: file name to match
:return: first filename in list
"""
filenames = [name for name in self.file_list if name.lower().endswith(filename.lower())]
return filenames[0] if len(filenames) > 0 else 0
def _get_full_filename_with_expression(self, expression):
""" Searches file_list for names that match the expression
:param expression: pattern to match
:return: file name with highest revision number
"""
filenames = [name for name in self.file_list if re.search(expression, name)]
def sort_filenames(name):
version = name.split('.')[0].split('_')[-1]
if version[0].isalpha():
version = version[1:]
return int(version)
# Naturally sort the list in descending order of revisions
# ex: [name_11.ext, name_10.ext, name_9.ext, name_8.ext, etc.]
filenames.sort(reverse=True, key=sort_filenames)
return filenames[0] if len(filenames) > 0 else 0
def _get_sequence_files(self, bucket_name):
"""
Checks the bucket for sequencing files based on naming convention
:param bucket_name:
:return: file list or result code
"""
try:
files = list_blobs('/' + bucket_name)
# TODO: naming_convention is not yet finalized
naming_convention = r"^gc_sequencing_t\d*\.txt$"
files = [s.name for s in files
if self.archive_folder not in s.name.lower()
if re.search(naming_convention,
s.name.lower())]
if not files:
logging.info(
f'No valid sequencing files in bucket {bucket_name}'
)
return GenomicSubProcessResult.NO_FILES
return files
except FileNotFoundError:
return GenomicSubProcessResult.ERROR
def _parse_seq_filename(self, filename):
"""
Takes a sequencing filename and returns the biobank id.
:param filename:
:return: biobank_id
"""
# TODO: naming_convention is not yet finalized
try:
# pull biobank ID from filename
return filename.lower().split('_')[-1].split('.')[0][1:]
except IndexError:
return GenomicSubProcessResult.INVALID_FILE_NAME
def _update_genomic_set_member_seq_reconciliation(self, member, seq_file_name, job_run_id):
"""
Uses member DAO to update GenomicSetMember object
with sequencing reconciliation data
:param member: the GenomicSetMember to update
:param seq_file_name:
:param job_run_id:
:return: query result
"""
return self.member_dao.update_member_sequencing_file(member,
job_run_id,
seq_file_name)
def _write_cvl_report_to_file(self, members):
"""
writes data to csv file in bucket
:param members:
:return: result code
"""
try:
# extract only columns we need
cvl_columns = ('biobank_id', 'sample_id', 'member_id')
report_data = ((m.biobankId, m.sampleId, m.id) for m in members)
# Use SQL exporter
exporter = SqlExporter(self.bucket_name)
with exporter.open_cloud_writer(self.cvl_file_name) as writer:
writer.write_header(cvl_columns)
writer.write_rows(report_data)
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
class GenomicBiobankSamplesCoupler:
"""This component creates the source data for Cohot 3:
new genomic set and members from the biobank samples pipeline.
Class uses the manifest handler to create and upload a manifest"""
_SEX_AT_BIRTH_CODES = {
'male': 'M',
'female': 'F',
'none_intersex': 'NA'
}
_VALIDATION_FLAGS = (GenomicValidationFlag.INVALID_WITHDRAW_STATUS,
GenomicValidationFlag.INVALID_SUSPENSION_STATUS,
GenomicValidationFlag.INVALID_CONSENT,
GenomicValidationFlag.INVALID_AGE,
GenomicValidationFlag.INVALID_AIAN,
GenomicValidationFlag.INVALID_SEX_AT_BIRTH)
_ARRAY_GENOME_TYPE = "aou_array"
_WGS_GENOME_TYPE = "aou_wgs"
_LR_GENOME_TYPE = "long_read"
COHORT_1_ID = "C1"
COHORT_2_ID = "C2"
COHORT_3_ID = "C3"
GenomicSampleMeta = namedtuple("GenomicSampleMeta", ["bids",
"pids",
"order_ids",
"site_ids",
"state_ids",
"sample_ids",
"valid_withdrawal_status",
"valid_suspension_status",
"gen_consents",
"valid_ages",
"sabs",
"gror",
"valid_ai_ans"])
def __init__(self, run_id, controller=None):
self.samples_dao = BiobankStoredSampleDao()
self.set_dao = GenomicSetDao()
self.member_dao = GenomicSetMemberDao()
self.site_dao = SiteDao()
self.ps_dao = ParticipantSummaryDao()
self.code_dao = CodeDao()
self.run_id = run_id
self.controller = controller
self.query = GenomicQueryClass()
def create_new_genomic_participants(self, from_date):
"""
This method determines which samples to enter into the genomic system
from Cohort 3 (New Participants).
Validation is handled in the query that retrieves the newly consented
participants' samples to process.
:param: from_date : the date from which to lookup new biobank_ids
:return: result
"""
samples = self._get_new_biobank_samples(from_date)
if len(samples) > 0:
samples_meta = self.GenomicSampleMeta(*samples)
return self.process_samples_into_manifest(samples_meta, cohort=self.COHORT_3_ID)
else:
logging.info(f'New Participant Workflow: No new samples to process.')
return GenomicSubProcessResult.NO_FILES
def create_saliva_genomic_participants(self, local=False, config=None):
"""
This method determines which samples to enter into
the genomic system that are saliva only, via the
config obj passed in the argument.
:param: config : options for ror consent type and denoting if sample was generated in-home or in-clinic
:return: result
"""
participants = self._get_remaining_saliva_participants(config)
if len(participants) > 0:
return self.create_matrix_and_process_samples(participants, cohort=None, local=local, saliva=True)
else:
logging.info(
f'Saliva Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_c2_genomic_participants(self, from_date, local=False):
"""
Creates Cohort 2 Participants in the genomic system using reconsent.
Validation is handled in the query that retrieves the newly consented
participants. Only valid participants are currently sent.
Refactored to first pull valid participants, then pull their samples,
applying the new business logic of prioritizing
collection date & blood over saliva.
:param: from_date : the date from which to lookup new participants
:return: result
"""
participants = self._get_new_c2_participants(from_date)
if len(participants) > 0:
return self.create_matrix_and_process_samples(participants, cohort=self.COHORT_2_ID, local=local)
else:
logging.info(f'Cohort 2 Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_c1_genomic_participants(self, from_date, local=False):
"""
Creates Cohort 1 Participants in the genomic system using reconsent.
Validation is handled in the query that retrieves the newly consented
participants. Only valid participants are currently sent.
:param: from_date : the date from which to lookup new participants
:return: result
"""
participants = self._get_new_c1_participants(from_date)
if len(participants) > 0:
return self.create_matrix_and_process_samples(participants, cohort=self.COHORT_1_ID, local=local)
else:
logging.info(f'Cohort 1 Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_long_read_genomic_participants(self, limit=None):
"""
Create long_read participants that are already in the genomic system,
based on downstream filters.
:return:
"""
participants = self._get_long_read_participants(limit)
if len(participants) > 0:
return self.process_genomic_members_into_manifest(
participants=participants,
genome_type=self._LR_GENOME_TYPE
)
logging.info(f'Long Read Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def process_genomic_members_into_manifest(self, *, participants, genome_type):
"""
Compiles AW0 Manifest from already submitted genomic members.
:param participants:
:param genome_type
:return:
"""
new_genomic_set = self._create_new_genomic_set()
processed_members = []
count = 0
# duplicate genomic set members
with self.member_dao.session() as session:
for i, participant in enumerate(participants):
dup_member_obj = GenomicSetMember(
biobankId=participant.biobankId,
genomicSetId=new_genomic_set.id,
participantId=participant.participantId,
nyFlag=participant.nyFlag,
sexAtBirth=participant.sexAtBirth,
collectionTubeId=participant.collectionTubeId,
validationStatus=participant.validationStatus,
validationFlags=participant.validationFlags,
ai_an=participant.ai_an,
genomeType=genome_type,
genomicWorkflowState=GenomicWorkflowState.LR_PENDING,
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
)
processed_members.append(dup_member_obj)
count = i + 1
if count % 100 == 0:
self.genomic_members_insert(
members=processed_members,
session=session,
set_id=new_genomic_set.id,
bids=[pm.biobankId for pm in processed_members]
)
processed_members.clear()
if count and processed_members:
self.genomic_members_insert(
members=processed_members,
session=session,
set_id=new_genomic_set.id,
bids=[pm.biobankId for pm in processed_members]
)
return new_genomic_set.id
def process_samples_into_manifest(self, samples_meta, cohort, saliva=False, local=False):
"""
Compiles AW0 Manifest from samples list.
:param samples_meta:
:param cohort:
:param saliva:
:param local: overrides automatic push to bucket
:return: job result code
"""
logging.info(f'{self.__class__.__name__}: Processing new biobank_ids {samples_meta.bids}')
new_genomic_set = self._create_new_genomic_set()
processed_array_wgs = []
count = 0
bids = []
# Create genomic set members
with self.member_dao.session() as session:
for i, bid in enumerate(samples_meta.bids):
# Don't write participant to table if no sample
if samples_meta.sample_ids[i] == 0:
continue
logging.info(f'Validating sample: {samples_meta.sample_ids[i]}')
validation_criteria = (
samples_meta.valid_withdrawal_status[i],
samples_meta.valid_suspension_status[i],
samples_meta.gen_consents[i],
samples_meta.valid_ages[i],
samples_meta.valid_ai_ans[i],
samples_meta.sabs[i] in self._SEX_AT_BIRTH_CODES.values()
)
valid_flags = self._calculate_validation_flags(validation_criteria)
logging.info(f'Creating genomic set members for PID: {samples_meta.pids[i]}')
# Get NY flag for collected-site
if samples_meta.site_ids[i]:
_ny_flag = self._get_new_york_flag_from_site(samples_meta.site_ids[i])
# Get NY flag for mail-kit
elif samples_meta.state_ids[i]:
_ny_flag = self._get_new_york_flag_from_state_id(samples_meta.state_ids[i])
# default ny flag if no state id
elif not samples_meta.state_ids[i]:
_ny_flag = 0
else:
logging.warning(f'No collection site or mail kit state. Skipping biobank_id: {bid}')
continue
new_array_member_obj = GenomicSetMember(
biobankId=bid,
genomicSetId=new_genomic_set.id,
participantId=samples_meta.pids[i],
nyFlag=_ny_flag,
sexAtBirth=samples_meta.sabs[i],
collectionTubeId=samples_meta.sample_ids[i],
validationStatus=(GenomicSetMemberStatus.INVALID if len(valid_flags) > 0
else GenomicSetMemberStatus.VALID),
validationFlags=valid_flags,
ai_an='N' if samples_meta.valid_ai_ans[i] else 'Y',
genomeType=self._ARRAY_GENOME_TYPE,
genomicWorkflowState=GenomicWorkflowState.AW0_READY,
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
)
# Also create a WGS member
new_wgs_member_obj = deepcopy(new_array_member_obj)
new_wgs_member_obj.genomeType = self._WGS_GENOME_TYPE
bids.append(bid)
processed_array_wgs.extend([new_array_member_obj, new_wgs_member_obj])
count = i + 1
if count % 1000 == 0:
self.genomic_members_insert(
members=processed_array_wgs,
session=session,
set_id=new_genomic_set.id,
bids=bids
)
processed_array_wgs.clear()
bids.clear()
if count and processed_array_wgs:
self.genomic_members_insert(
members=processed_array_wgs,
session=session,
set_id=new_genomic_set.id,
bids=bids
)
# Create & transfer the Biobank Manifest based on the new genomic set
try:
if local:
return new_genomic_set.id
else:
create_and_upload_genomic_biobank_manifest_file(new_genomic_set.id,
cohort_id=cohort,
saliva=saliva)
# Handle Genomic States for manifests
for member in self.member_dao.get_members_from_set_id(new_genomic_set.id):
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='manifest-generated')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_state(member, new_state)
logging.info(f'{self.__class__.__name__}: Genomic set members created ')
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
def create_matrix_and_process_samples(self, participants, cohort, local, saliva=False):
"""
Wrapper method for processing participants for C1 and C2 manifests
:param cohort:
:param participants:
:param local:
:param saliva:
:return:
"""
participant_matrix = self.GenomicSampleMeta(*participants)
for i, _bid in enumerate(participant_matrix.bids):
logging.info(f'Retrieving samples for PID: f{participant_matrix.pids[i]}')
blood_sample_data = None
if not saliva:
blood_sample_data = self._get_usable_blood_sample(pid=participant_matrix.pids[i],
bid=_bid)
saliva_sample_data = self._get_usable_saliva_sample(pid=participant_matrix.pids[i],
bid=_bid)
# Determine which sample ID to use
sample_data = self._determine_best_sample(blood_sample_data, saliva_sample_data)
# update the sample id, collected site, and biobank order
if sample_data is not None:
participant_matrix.sample_ids[i] = sample_data[0]
participant_matrix.site_ids[i] = sample_data[1]
participant_matrix.order_ids[i] = sample_data[2]
else:
logging.info(f'No valid samples for pid {participant_matrix.pids[i]}.')
# insert new members and make the manifest
return self.process_samples_into_manifest(
participant_matrix,
cohort=cohort,
saliva=saliva,
local=local
)
def genomic_members_insert(self, *, members, session, set_id, bids):
"""
Bulk save of member for genomic_set_member as well as PDR
batch updating of members
:param: members
:param: session
:param: set_id
:param: bids
"""
try:
session.bulk_save_objects(members)
session.commit()
members = self.member_dao.get_members_from_set_id(set_id, bids=bids)
member_ids = [m.id for m in members]
bq_genomic_set_member_batch_update(member_ids, project_id=self.controller.bq_project_id)
genomic_set_member_batch_update(member_ids)
except Exception as e:
raise Exception("Error occurred on genomic member insert: {0}".format(e))
def _get_new_biobank_samples(self, from_date):
"""
Retrieves BiobankStoredSample objects with `rdr_created`
after the last run of the new participant workflow job.
The query filters out participants that do not match the
genomic validation requirements.
:param: from_date
:return: list of tuples (bid, pid, biobank_identifier.value, collected_site_id)
"""
_new_samples_sql = self.query.new_biobank_samples()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"from_date_param": from_date.strftime("%Y-%m-%d"),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_3_param": ParticipantCohort.COHORT_3.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_new_samples_sql, params).fetchall()
result = self._prioritize_samples_by_participant(result)
return list(zip(*result))[:-2] # Slicing to remove the last two columns retrieved for prioritization
def _prioritize_samples_by_participant(self, sample_results):
preferred_samples = {}
for sample in sample_results:
preferred_sample = sample
previously_found_sample = preferred_samples.get(sample.participant_id, None)
if previously_found_sample is not None:
preferred_sample = self._determine_best_sample(previously_found_sample, sample)
preferred_samples[sample.participant_id] = preferred_sample
return list(preferred_samples.values())
@staticmethod
def _determine_best_sample(sample_one, sample_two):
if sample_one is None:
return sample_two
if sample_two is None:
return sample_one
# Return the usable sample (status less than NOT_RECEIVED) if one is usable and the other isn't
if sample_one.status < int(SampleStatus.SAMPLE_NOT_RECEIVED) <= sample_two.status:
return sample_one
elif sample_two.status < int(SampleStatus.SAMPLE_NOT_RECEIVED) <= sample_two.status:
return sample_two
elif sample_one.status >= int(SampleStatus.SAMPLE_NOT_RECEIVED) \
and sample_two.status >= int(SampleStatus.SAMPLE_NOT_RECEIVED):
return None
# Both are usable
# Return the sample by the priority of the code: 1ED04, then 1ED10, and 1SAL2 last
test_codes_by_preference = ['1ED04', '1ED10', '1SAL2'] # most desirable first
samples_by_code = {}
for sample in [sample_one, sample_two]:
samples_by_code[sample.test] = sample
for test_code in test_codes_by_preference:
if samples_by_code.get(test_code):
return samples_by_code[test_code]
logging.error(f'Should have been able to select between '
f'{sample_one.biobank_stored_sample_id} and {sample_two.biobank_stored_sample_id}')
def _get_new_c2_participants(self, from_date):
"""
Retrieves C2 participants and validation data.
Broken out so that DNA samples' business logic is handled separately
:param from_date:
:return:
"""
_c2_participant_sql = self.query.new_c2_participants()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"from_date_param": from_date.strftime("%Y-%m-%d"),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_2_param": ParticipantCohort.COHORT_2.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.ps_dao.session() as session:
result = session.execute(_c2_participant_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _get_remaining_c2_participants(self):
_c2_participant_sql = self.query.remaining_c2_participants()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_2_param": ParticipantCohort.COHORT_2.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.ps_dao.session() as session:
result = session.execute(_c2_participant_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _get_new_c1_participants(self, from_date):
"""
Retrieves C1 participants and validation data.
:param from_date:
:return:
"""
_c1_participant_sql = self.query.new_c1_participants()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"from_date_param": from_date.strftime("%Y-%m-%d"),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_1_param": ParticipantCohort.COHORT_1.__int__(),
"c1_reconsent_param": COHORT_1_REVIEW_CONSENT_YES_CODE,
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.ps_dao.session() as session:
result = session.execute(_c1_participant_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _get_long_read_participants(self, limit=None):
"""
Retrieves participants based on filters that have
been denoted to use in the long read pilot program
"""
with self.member_dao.session() as session:
gsm_alias = aliased(GenomicSetMember)
result = session.query(GenomicSetMember).join(
ParticipantSummary,
GenomicSetMember.participantId == ParticipantSummary.participantId,
).join(
ParticipantRaceAnswers,
ParticipantRaceAnswers.participantId == ParticipantSummary.participantId,
).join(
Code,
ParticipantRaceAnswers.codeId == Code.codeId,
).join(
GenomicGCValidationMetrics,
GenomicSetMember.id == GenomicGCValidationMetrics.genomicSetMemberId,
).outerjoin(
gsm_alias,
sqlalchemy.and_(
gsm_alias.participantId == ParticipantSummary.participantId,
gsm_alias.genomeType == 'long_read'
)
).filter(
Code.value == 'WhatRaceEthnicity_Black',
GenomicSetMember.genomeType.in_(['aou_wgs']),
GenomicSetMember.genomicWorkflowState != GenomicWorkflowState.IGNORE,
GenomicGCValidationMetrics.ignoreFlag == 0,
GenomicGCValidationMetrics.contamination <= 0.01,
ParticipantSummary.participantOrigin == 'vibrent',
ParticipantSummary.ehrUpdateTime.isnot(None),
gsm_alias.id.is_(None),
).distinct(gsm_alias.biobankId)
if limit:
result = result.limit(limit)
return result.all()
def _get_usable_blood_sample(self, pid, bid):
"""
Select 1ED04 or 1ED10 based on max collected date
:param pid: participant_id
:param bid: biobank_id
:return: tuple(blood_collected date, blood sample, blood site, blood order)
"""
_samples_sql = self.query.usable_blood_sample()
params = {
"pid_param": pid,
"bid_param": bid,
}
with self.samples_dao.session() as session:
result = session.execute(_samples_sql, params).first()
return result
def _get_usable_saliva_sample(self, pid, bid):
"""
Select 1SAL2 based on max collected date
:param pid: participant_id
:param bid: biobank_id
:return: tuple(saliva date, saliva sample, saliva site, saliva order)
"""
_samples_sql = self.query.usable_saliva_sample()
params = {
"pid_param": pid,
"bid_param": bid,
}
with self.samples_dao.session() as session:
result = session.execute(_samples_sql, params).first()
return result
def _get_remaining_saliva_participants(self, config):
_saliva_sql = self.query.remaining_saliva_participants(config)
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_saliva_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _create_new_genomic_set(self):
"""Inserts a new genomic set for this run"""
attributes = {
'genomicSetName': f'new_participant_workflow_{self.run_id}',
'genomicSetCriteria': '.',
'genomicSetVersion': 1,
'genomicSetStatus': GenomicSetStatus.VALID,
}
new_set_obj = GenomicSet(**attributes)
inserted_set = self.set_dao.insert(new_set_obj)
# Insert new set for PDR
bq_genomic_set_update(inserted_set.id, project_id=self.controller.bq_project_id)
genomic_set_update(inserted_set.id)
return inserted_set
def _create_new_set_member(self, **kwargs):
"""Inserts new GenomicSetMember object"""
new_member_obj = GenomicSetMember(**kwargs)
return self.member_dao.insert(new_member_obj)
def _get_new_york_flag_from_site(self, collected_site_id):
"""
Looks up whether a collected site's state is NY
:param collected_site_id: the id of the site
:return: int (1 or 0 for NY or Not)
"""
return int(self.site_dao.get(collected_site_id).state == 'NY')
def _get_new_york_flag_from_state_id(self, state_id):
"""
Looks up whether a collected site's state is NY
:param state_id: the code ID for the state
:return: int (1 or 0 for NY or Not)
"""
return int(self.code_dao.get(state_id).value.split('_')[1] == 'NY')
def _calculate_validation_flags(self, validation_criteria):
"""
Determines validation and flags for genomic sample
:param validation_criteria:
:return: list of validation flags
"""
# Process validation flags for inserting into genomic_set_member
flags = [flag for (passing, flag) in
zip(validation_criteria, self._VALIDATION_FLAGS)
if not passing]
return flags
class ManifestDefinitionProvider:
"""
Helper class to produce the definitions for each manifest
"""
# Metadata for the various manifests
ManifestDef = namedtuple('ManifestDef', ["job_run_field",
"source_data",
"destination_bucket",
"output_filename",
"columns",
"signal"])
def __init__(
self,
job_run_id=None,
bucket_name=None,
**kwargs
):
# Attributes
self.job_run_id = job_run_id
self.bucket_name = bucket_name
self.kwargs = kwargs
self.query = GenomicQueryClass(
input_manifest=self.kwargs['kwargs'].get('input_manifest')
)
self.manifest_columns_config = {
GenomicManifestTypes.CVL_W1: (
"genomic_set_name",
"biobank_id",
"sample_id",
"sex_at_birth",
"ny_flag",
"site_id",
"secondary_validation",
"date_submitted",
"test_name",
),
GenomicManifestTypes.AW3_ARRAY: (
"chipwellbarcode",
"biobank_id",
"sample_id",
"sex_at_birth",
"site_id",
"red_idat_path",
"red_idat_md5_path",
"green_idat_path",
"green_idat_md5_path",
"vcf_path",
"vcf_index_path",
"vcf_md5_path",
"callrate",
"sex_concordance",
"contamination",
"processing_status",
"research_id",
),
GenomicManifestTypes.GEM_A1: (
'biobank_id',
'sample_id',
"sex_at_birth",
"consent_for_ror",
"date_of_consent_for_ror",
"chipwellbarcode",
"genome_center",
),
GenomicManifestTypes.GEM_A3: (
'biobank_id',
'sample_id',
'date_of_consent_removal',
),
GenomicManifestTypes.CVL_W3: (
"value",
"sample_id",
"biobank_id",
"collection_tubeid",
"sex_at_birth",
"genome_type",
"ny_flag",
"request_id",
"package_id",
"ai_an",
"site_id",
),
GenomicManifestTypes.AW3_WGS: (
"biobank_id",
"sample_id",
"biobankidsampleid",
"sex_at_birth",
"site_id",
"vcf_hf_path",
"vcf_hf_index_path",
"vcf_hf_md5_path",
"vcf_raw_path",
"vcf_raw_index_path",
"vcf_raw_md5_path",
"cram_path",
"cram_md5_path",
"crai_path",
"gvcf_path",
"gvcf_md5_path",
"contamination",
"sex_concordance",
"processing_status",
"mean_coverage",
"research_id",
),
GenomicManifestTypes.AW2F: (
"PACKAGE_ID",
"BIOBANKID_SAMPLEID",
"BOX_STORAGEUNIT_ID",
"BOX_ID/PLATE_ID",
"WELL_POSITION",
"SAMPLE_ID",
"PARENT_SAMPLE_ID",
"COLLECTION_TUBE_ID",
"MATRIX_ID",
"COLLECTION_DATE",
"BIOBANK_ID",
"SEX_AT_BIRTH",
"AGE",
"NY_STATE_(Y/N)",
"SAMPLE_TYPE",
"TREATMENTS",
"QUANTITY_(uL)",
"TOTAL_CONCENTRATION_(ng/uL)",
"TOTAL_DNA(ng)",
"VISIT_DESCRIPTION",
"SAMPLE_SOURCE",
"STUDY",
"TRACKING_NUMBER",
"CONTACT",
"EMAIL",
"STUDY_PI",
"TEST_NAME",
"FAILURE_MODE",
"FAILURE_MODE_DESC",
"PROCESSING_STATUS",
"CONTAMINATION",
"CONTAMINATION_CATEGORY",
"CONSENT_FOR_ROR",
),
}
def _get_source_data_query(self, manifest_type):
"""
Returns the query to use for manifest's source data
:param manifest_type:
:return: query object
"""
try:
return self.query.genomic_data_config[manifest_type]
except KeyError:
logging.warning(f"Manifest type {manifest_type} does not resolve query")
def get_def(self, manifest_type):
"""
Returns the manifest definition based on manifest_type
:param manifest_type:
:return: ManifestDef()
"""
now_formatted = clock.CLOCK.now().strftime("%Y-%m-%d-%H-%M-%S")
def_config = {
GenomicManifestTypes.CVL_W1: {
'job_run_field': 'cvlW1ManifestJobRunId',
'output_filename': f'{CVL_W1_MANIFEST_SUBFOLDER}/AoU_CVL_Manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.GEM_A1: {
'job_run_field': 'gemA1ManifestJobRunId',
'output_filename': f'{GENOMIC_GEM_A1_MANIFEST_SUBFOLDER}/AoU_GEM_A1_manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.GEM_A3: {
'job_run_field': 'gemA3ManifestJobRunId',
'output_filename': f'{GENOMIC_GEM_A3_MANIFEST_SUBFOLDER}/AoU_GEM_A3_manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.CVL_W3: {
'job_run_field': 'cvlW3ManifestJobRunID',
'output_filename': f'{CVL_W3_MANIFEST_SUBFOLDER}/AoU_CVL_W1_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.AW3_ARRAY: {
'job_run_field': 'aw3ManifestJobRunID',
'output_filename': f'{GENOMIC_AW3_ARRAY_SUBFOLDER}/AoU_DRCV_GEN_{now_formatted}.csv',
'signal': 'bypass'
},
GenomicManifestTypes.AW3_WGS: {
'job_run_field': 'aw3ManifestJobRunID',
'output_filename': f'{GENOMIC_AW3_WGS_SUBFOLDER}/AoU_DRCV_SEQ_{now_formatted}.csv',
'signal': 'bypass'
},
GenomicManifestTypes.AW2F: {
'job_run_field': 'aw2fManifestJobRunID',
'output_filename': f'{BIOBANK_AW2F_SUBFOLDER}/GC_AoU_DataType_PKG-YYMM-xxxxxx_contamination.csv',
'signal': 'bypass'
}
}
return self.ManifestDef(
job_run_field=def_config[manifest_type]['job_run_field'],
source_data=self._get_source_data_query(manifest_type),
destination_bucket=f'{self.bucket_name}',
output_filename=def_config[manifest_type]['output_filename'],
columns=self.manifest_columns_config[manifest_type],
signal=def_config[manifest_type]['signal'],
)
class ManifestCompiler:
"""
This component compiles Genomic manifests
based on definitions provided by ManifestDefinitionProvider
"""
def __init__(self, run_id, bucket_name=None):
self.run_id = run_id
self.bucket_name = bucket_name
self.output_file_name = None
self.manifest_def = None
self.def_provider = None
# Dao components
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
def generate_and_transfer_manifest(self, manifest_type, genome_type, **kwargs):
"""
Main execution method for ManifestCompiler
:return: result dict:
"code": (i.e. SUCCESS)
"feedback_file": None or feedback file record to update,
"record_count": integer
"""
self.def_provider = ManifestDefinitionProvider(
job_run_id=self.run_id,
bucket_name=self.bucket_name,
kwargs=kwargs
)
self.manifest_def = self.def_provider.get_def(manifest_type)
source_data = self._pull_source_data()
if source_data:
self.output_file_name = self.manifest_def.output_filename
# If the new manifest is a feedback manifest,
# it will have an input manifest
if "input_manifest" in kwargs.keys():
# AW2F manifest file name is based of of AW1
if manifest_type == GenomicManifestTypes.AW2F:
new_name = kwargs['input_manifest'].filePath.split('/')[-1]
new_name = new_name.replace('.csv', '_contamination.csv')
self.output_file_name = self.manifest_def.output_filename.replace(
"GC_AoU_DataType_PKG-YYMM-xxxxxx_contamination.csv",
f"{new_name}"
)
logging.info(
f'Preparing manifest of type {manifest_type}...'
f'{self.manifest_def.destination_bucket}/{self.output_file_name}'
)
self._write_and_upload_manifest(source_data)
results = []
record_count = len(source_data)
for row in source_data:
member = self.member_dao.get_member_from_sample_id(row.sample_id, genome_type)
if self.manifest_def.job_run_field is not None:
results.append(
self.member_dao.update_member_job_run_id(
member,
job_run_id=self.run_id,
field=self.manifest_def.job_run_field
)
)
# Handle Genomic States for manifests
if self.manifest_def.signal != "bypass":
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal=self.manifest_def.signal)
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_state(member, new_state)
# Assemble result dict
result_code = GenomicSubProcessResult.SUCCESS \
if GenomicSubProcessResult.ERROR not in results \
else GenomicSubProcessResult.ERROR
result = {
"code": result_code,
"record_count": record_count,
}
return result
logging.info(f'No records found for manifest type: {manifest_type}.')
return {
"code": GenomicSubProcessResult.NO_FILES,
"record_count": 0,
}
def _pull_source_data(self):
"""
Runs the source data query
:return: result set
"""
with self.member_dao.session() as session:
return session.execute(self.manifest_def.source_data).fetchall()
def _write_and_upload_manifest(self, source_data):
"""
writes data to csv file in bucket
:return: result code
"""
try:
# Use SQL exporter
exporter = SqlExporter(self.bucket_name)
with exporter.open_cloud_writer(self.output_file_name) as writer:
writer.write_header(self.manifest_def.columns)
writer.write_rows(source_data)
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
class GenomicAlertHandler:
"""
Creates a jira ROC ticket using Jira utils
"""
ROC_BOARD_ID = "ROC"
def __init__(self):
self._jira_handler = None
self.alert_envs = ["all-of-us-rdr-prod"]
if GAE_PROJECT in self.alert_envs:
self._jira_handler = JiraTicketHandler()
def make_genomic_alert(self, summary: str, description: str):
"""
Wraps create_ticket with genomic specifics
Get's the board ID and adds ticket to sprint
:param summary: the 'title' of the ticket
:param description: the 'body' of the ticket
"""
if self._jira_handler is not None:
ticket = self._jira_handler.create_ticket(summary, description,
board_id=self.ROC_BOARD_ID)
active_sprint = self._jira_handler.get_active_sprint(
self._jira_handler.get_board_by_id(self.ROC_BOARD_ID))
self._jira_handler.add_ticket_to_sprint(ticket, active_sprint)
else:
logging.info('Suppressing alert for missing files')
return
| bsd-3-clause | 7,104,423,912,018,031,000 | 39.924626 | 119 | 0.555604 | false |
jwdebelius/Machiavellian | machivellian/power.py | 1 | 15760 | r"""
Empirical Power Estimation (:mod:`skbio.stats.power`)
=====================================================
.. currentmodule:: skbio.stats.power
The purpose of this module is to provide empirical, post-hoc power estimation
of normally and non-normally distributed data. It also provides support to
subsample data to facilitate this analysis.
The underlying principle is based on subsampling and Monte Carlo simulation.
Assume that there is some set of populations, :math:`K_{1}, K_{2}, ... K_{n}`
which have some property, :math:`\mu` such that :math:`\mu_{1} \neq \mu_{2}
\neq ... \neq \mu_{n}`. For each of the populations, a sample, :math:`S` can be
drawn, with a parameter, :math:`x` where :math:`x \approx \mu` and for the
samples, we can use a test, :math:`f`, to show that :math:`x_{1} \neq x_{2}
\neq ... \neq x_{n}`.
Since we know that :math:`\mu_{1} \neq \mu_{2} \neq ... \neq \mu_{n}`,
we know we should reject the null hypothesis. If we fail to reject the null
hypothesis, we have committed a Type II error and our result is a false
negative. We can estimate the frequency of Type II errors at various sampling
depths by repeatedly subsampling the populations and observing how often we
see a false negative. If we repeat this several times for each subsampling
depth, and vary the depths we use, we can start to approximate a relationship
between the number of samples we use and the rate of false negatives, also
called the statistical power of the test.
To generate complete power curves from data which appears underpowered, the
`statsmodels.stats.power` package can be used to solve for an effect size. The
effect size can be used to extrapolate a power curve for the data.
Most functions in this module accept a statistical test function which takes a
list of samples and returns a p value. The test is then evaluated over a series
of subsamples.
Sampling may be handled in two ways. For any set of samples, we may simply
choose to draw :math:`n` observations at random for each sample. Alternatively,
if metadata is available, samples can be matched based on a set of control
categories so that paired samples are drawn at random from the set of available
matches.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from functools import partial
import numpy as np
import scipy.stats
def subsample_power(test, samples, counts, draw_mode='ind', numeric=True,
alpha=0.05, ratio=None, bootstrap=True, num_iter=500,
num_runs=10, test_kwargs=None):
"""Subsamples data to iteratively calculate power
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values when `numeric == True`; or a boolean value
indicating the null hypothesis should be rejected, or a
one-dimensional array of boolean values indicating the null
hypothesis should be rejected when `numeric == False`. Additional
keyword arguments can be provided with `test_kwargs`.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
counts : array-like
The depths at which to sample the data. If `bootstrap == False`, the
largest count depth times the group ratio cannot be greater than the
number of observations in each group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
numeric : bool, optional
Indicates whether `test` returns a numeric p-value or array of numeric
p values (`numeric=True`), or a boolean (`numeric=False`).
alpha : float, optional
The critical value used to calculate the power.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one. If `bootstrap == False`, then
the product of the `ratio` and a sampling depth specified by `counts`
cannot be greater than the number of observations in the respective
sample.
bootstrap : bool, optional
Indicates whether subsampling should be performed with replacement
(`bootstrap == True`) or without.
num_iter : positive int, optional
The number of p-values to generate for each point
on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
test_kwargs: dict, optional
Additional keyword arguments for the `test` which may include
parameters like a dataframe of values or distance matrix.
Returns
-------
ndarray
The power calculated for each subsample at each count. The array has
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
ValueError
When `replace` is true, and `counts` and `ratio` will draw more
observations than exist in a sample.
"""
if isinstance(test_kwargs, dict):
test = partial(test, **test_kwargs)
# Checks the inputs
ratio, num_p = _check_subsample_power_inputs(test=test,
samples=samples,
draw_mode=draw_mode,
ratio=ratio,
bootstrap=bootstrap,
counts=counts)
# Prealocates the power array
power = np.zeros((num_runs, len(counts), num_p))
for id2, c in enumerate(counts):
count = np.round(c * ratio, 0).astype(int)
for id1 in range(num_runs):
ps = _compare_distributions(test=test,
samples=samples,
num_p=num_p,
counts=count,
num_iter=num_iter,
bootstrap=bootstrap,
mode=draw_mode)
power[id1, id2, :] = _calculate_power(ps,
numeric=numeric,
alpha=alpha)
power = power.squeeze()
return power
def confidence_bound(vec, alpha=0.05, df=None, axis=None):
r"""Calculates a confidence bound assuming a normal distribution
Parameters
----------
vec : array_like
The array of values to use in the bound calculation.
alpha : float, optional
The critical value, used for the confidence bound calculation.
df : float, optional
The degrees of freedom associated with the
distribution. If None is given, df is assumed to be the number of
elements in specified axis.
axis : positive int, optional
The axis over which to take the deviation. When axis
is None, a single value will be calculated for the whole matrix.
Returns
-------
bound : float
The confidence bound around the mean. The confidence interval is
[mean - bound, mean + bound].
"""
# Determines the number of non-nan counts
vec = np.asarray(vec)
vec_shape = vec.shape
if axis is None and len(vec_shape) == 1:
num_counts = vec_shape[0] - np.isnan(vec).sum()
elif axis is None:
num_counts = vec_shape[0] * vec_shape[1] - np.isnan(vec).sum()
else:
num_counts = vec_shape[axis] - np.isnan(vec).sum() / \
(vec_shape[0] * vec_shape[1])
# Gets the df if not supplied
if df is None:
df = num_counts - 1
# Calculates the bound
# In the conversion from scipy.stats.nanstd -> np.nanstd `ddof=1` had to be
# added to match the scipy default of `bias=False`.
bound = np.nanstd(vec, axis=axis, ddof=1) / np.sqrt(num_counts - 1) * \
scipy.stats.t.ppf(1 - alpha / 2, df)
return bound
def _compare_distributions(test, samples, num_p, counts=5, mode="ind",
bootstrap=True, num_iter=100):
r"""Compares two distribution arrays iteratively
"""
# Prealocates the pvalue matrix
p_values = np.zeros((num_p, num_iter))
# Determines the number of samples per group
num_groups = len(samples)
samp_lens = [len(sample) for sample in samples]
if isinstance(counts, int):
counts = np.array([counts] * num_groups)
for idx in range(num_iter):
if mode == "matched":
pos = np.random.choice(np.arange(0, samp_lens[0]), counts[0],
replace=bootstrap)
subs = [sample[pos] for sample in samples]
else:
subs = [np.random.choice(np.array(pop), counts[i],
replace=bootstrap)
for i, pop in enumerate(samples)]
p_values[:, idx] = test(subs)
if num_p == 1:
p_values = p_values.squeeze()
return p_values
def _calculate_power(p_values, alpha=0.05, numeric=True):
r"""Calculates statistical power empirically for p-values
Parameters
----------
p_values : 1-D array
A 1-D numpy array with the test results.
alpha : float
The critical value for the power calculation.
numeric : Boolean
Indicates whether a numeric p value should be used
Returns
-------
power : float
The empirical power, or the fraction of observed p values below the
critical value.
"""
if numeric:
reject = np.atleast_2d(p_values < alpha)
else:
reject = np.atleast_2d(p_values)
w = (reject).sum(axis=1)/reject.shape[1]
return w
def _check_subsample_power_inputs(test, samples, counts, draw_mode='ind',
ratio=None, bootstrap=True):
"""Makes sure that everything is sane before power calculations
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array-like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
counts : 1-D array
The number of samples to use for each power depth calculation. If
`replace` is False, than `counts` and `ratio` must be scaled so that
no more samples are drawn than exist in a sample.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
bootstrap : Bool
Whether samples should be bootstrapped or subsampled without
replacement. When `bootstrap == False`, `counts` and `ratio` must
be scaled so that no more observations are drawn than exist in a
sample.
Returns
-------
ratio : 1-D array
The fraction of the sample counts which should be assigned to each
group.
num_p : positive integer
The number of p values returned by `test`.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
ValueError
When `replace` is true, and `counts` and `ratio` will draw more
observations than exist in a sample.
"""
# Checks the sample drawing model
if draw_mode not in {'ind', 'matched'}:
raise ValueError('mode must be "matched" or "ind".')
# Determines the minimum number of ids in a category
id_counts = np.array([len(id_) for id_ in samples])
num_groups = len(samples)
# Checks the ratio argument
if ratio is None or draw_mode == 'matched':
ratio = np.ones((num_groups))
else:
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
ratio_counts = np.array([id_counts[i] / ratio[i]
for i in range(num_groups)])
largest = ratio_counts.min()
# Determines the number of p values returned by the test
p_return = test(samples)
if isinstance(p_return, float):
num_p = 1
elif isinstance(p_return, np.ndarray) and len(p_return.shape) == 1:
num_p = p_return.shape[0]
else:
raise TypeError('test must return a float or one-dimensional array.')
# Checks the subsample size
counts = np.asarray(counts)
if counts.min() < 2:
raise ValueError('you cannot test less than 2 samples per group')
elif not bootstrap and counts.max() > largest:
raise ValueError('Sampling depth is too high. Please use replacement '
'or pick fewer observations.')
return ratio, num_p
| bsd-3-clause | -7,454,167,816,452,190,000 | 39.204082 | 79 | 0.621066 | false |
cherrypy/magicbus | magicbus/plugins/servers.py | 1 | 15085 | """
Multiple servers/ports
======================
If you need to start more than one HTTP server (to serve on multiple ports, or
protocols, etc.), you can manually register each one and then start them all
with bus.transition("RUN")::
s1 = ServerPlugin(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerPlugin(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.transition("RUN")
.. index:: SCGI
FastCGI/SCGI
============
There are also Flup\ **F**\ CGIServer and Flup\ **S**\ CGIServer classes in
:mod:`magicbus.plugins.servers`. To start an fcgi server, for example,
wrap an instance of it in a ServerPlugin::
addr = ('0.0.0.0', 4000)
f = servers.FlupFCGIServer(application=mywsgiapp, bindAddress=addr)
s = servers.ServerPlugin(bus, httpserver=f, bind_addr=addr)
s.subscribe()
Note that you need to download and install `flup <http://trac.saddi.com/flup>`_
yourself.
.. _fastcgi:
.. index:: FastCGI
FastCGI
-------
A very simple setup lets your server run with FastCGI.
You just need the flup library,
plus a running Apache server (with ``mod_fastcgi``) or lighttpd server.
Apache
^^^^^^
At the top level in httpd.conf::
FastCgiIpcDir /tmp
FastCgiServer /path/to/myapp.fcgi -idle-timeout 120 -processes 4
And inside the relevant VirtualHost section::
# FastCGI config
AddHandler fastcgi-script .fcgi
ScriptAliasMatch (.*$) /path/to/myapp.fcgi$1
Lighttpd
^^^^^^^^
For `Lighttpd <http://www.lighttpd.net/>`_ you can follow these
instructions. Within ``lighttpd.conf`` make sure ``mod_fastcgi`` is
active within ``server.modules``. Then, within your ``$HTTP["host"]``
directive, configure your fastcgi script like the following::
$HTTP["url"] =~ "" {
fastcgi.server = (
"/" => (
"script.fcgi" => (
"bin-path" => "/path/to/your/script.fcgi",
"socket" => "/tmp/script.sock",
"check-local" => "disable",
"disable-time" => 1,
"min-procs" => 1,
"max-procs" => 1, # adjust as needed
),
),
)
} # end of $HTTP["url"] =~ "^/"
Please see `Lighttpd FastCGI Docs
<http://redmine.lighttpd.net/wiki/lighttpd/Docs:ModFastCGI>`_ for an
explanation of the possible configuration options.
"""
import socket
import sys
import threading
import time
import warnings
class ServerPlugin(object):
"""Bus plugin for an HTTP server.
You don't have to use this plugin; you can make your own that listens on
the appropriate bus channels. This one is designed to:
* wrap HTTP servers whose accept loop blocks by running it in a
separate thread; any exceptions in it exit the bus
* wait until the server is truly ready to receive requests before
returning from the bus START listener
* wait until the server has finished processing requestss before
returning from the bus STOP listener
* log server start/stop via the bus
The httpserver argument MUST possess 'start' and 'stop' methods,
and a 'ready' boolean attribute which is True when the HTTP server
is ready to receive requests on its socket.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.transition("RUN")::
s1 = ServerPlugin(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerPlugin(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.transition("RUN")
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('START', self.START)
self.bus.subscribe('STOP', self.STOP)
def unsubscribe(self):
self.bus.unsubscribe('START', self.START)
self.bus.unsubscribe('STOP', self.STOP)
@property
def interface(self):
if self.bind_addr is None:
return 'unknown interface (dynamic?)'
elif isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
return '%s:%s' % (host, port)
else:
return 'socket file: %s' % self.bind_addr
def START(self):
"""Start the HTTP server."""
if self.running:
self.bus.log('Already serving on %s' % self.interface)
return
self.interrupt = None
if not self.httpserver:
raise ValueError('No HTTP server has been created.')
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
t = threading.Thread(target=self._start_http_thread)
t.setName('HTTPServer ' + t.getName())
self.bus.log('Starting on %s' % self.interface)
t.start()
self.wait()
self.running = True
self.bus.log('Serving on %s' % self.interface)
START.priority = 75
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt:
self.bus.log('<Ctrl-C> hit: shutting down HTTP server')
self.interrupt = sys.exc_info()[1]
self.bus.transition('EXITED')
except SystemExit:
self.bus.log('SystemExit raised: shutting down HTTP server')
self.interrupt = sys.exc_info()[1]
self.bus.transition('EXITED')
raise
except:
self.interrupt = sys.exc_info()[1]
self.bus.log('Error in HTTP server: shutting down',
traceback=True, level=40)
self.bus.transition('EXITED')
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, 'ready', False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# Wait for port to be occupied
if isinstance(self.bind_addr, tuple):
host, port = self.bind_addr
self.bus.log('Waiting for %s' % self.interface)
wait_for_occupied_port(host, port)
def STOP(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
wait_for_free_port(*self.bind_addr)
self.running = False
self.bus.log('HTTP Server %s shut down' % self.httpserver)
else:
self.bus.log('HTTP Server %s already shut down' % self.httpserver)
STOP.priority = 25
# ------- Wrappers for various HTTP servers for use with ServerPlugin ------- #
# These are not plugins, so they don't use the bus states as method names.
class FlupCGIServer(object):
"""Adapter for a flup.server.cgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the CGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.cgi import WSGIServer
self.cgiserver = WSGIServer(*self.args, **self.kwargs)
self.ready = True
self.cgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
if kwargs.get('bindAddress', None) is None:
if not hasattr(socket, 'fromfd'):
raise ValueError(
'Dynamic FCGI server not available on this platform. '
'You must use a static or external one by providing a '
'legal bindAddress.')
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = (
self.fcgiserver._threadPool._idleCount)
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
# ---------------------------- Utility functions ---------------------------- #
def client_host(server_host):
"""Return the host on which a client can connect to the given listener."""
if server_host == '0.0.0.0':
# 0.0.0.0 is INADDR_ANY, which should answer on localhost.
return '127.0.0.1'
if server_host in ('::', '::0', '::0.0.0.0'):
# :: is IN6ADDR_ANY, which should answer on localhost.
# ::0 and ::0.0.0.0 are non-canonical but common ways to write
# IN6ADDR_ANY.
return '::1'
return server_host
def check_port(host, port, timeout=1.0):
"""Raise OSError if the given port is not free on the given host."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
host = client_host(host)
port = int(port)
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
except socket.gaierror:
if ':' in host:
info = [
(socket.AF_INET6, socket.SOCK_STREAM, 0, '',
(host, port, 0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, '', (host, port))]
for res in info:
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(timeout)
s.connect((host, port))
s.close()
except (IOError, OSError):
if s:
s.close()
else:
raise OSError(
'Port %s is in use on %s; perhaps the previous '
'httpserver did not shut down properly.' %
(repr(port), repr(host))
)
# Feel free to increase these defaults on slow systems:
free_port_timeout = 0.1
occupied_port_timeout = 0.25
def wait_for_free_port(host, port, timeout=None):
"""Wait for the specified port to become free (drop requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
if timeout is None:
timeout = free_port_timeout
for trial in range(50):
try:
# we are expecting a free port, so reduce the timeout
check_port(host, port, timeout=timeout)
except OSError:
# Give the old server thread time to free the port.
time.sleep(timeout)
else:
return
raise OSError('Port %r not free on %r' % (port, host))
def wait_for_occupied_port(host, port, timeout=None):
"""Wait for the specified port to become active (receive requests)."""
if not host:
raise ValueError("Host values of '' or None are not allowed.")
if timeout is None:
timeout = occupied_port_timeout
for trial in range(50):
try:
check_port(host, port, timeout=timeout)
except OSError:
return
else:
time.sleep(timeout)
if host == client_host(host):
raise OSError('Port %r not bound on %r' % (port, host))
# On systems where a loopback interface is not available and the
# server is bound to all interfaces, it's difficult to determine
# whether the server is in fact occupying the port. In this case,
# just issue a warning and move on. See issue #1100.
msg = 'Unable to verify that the server is bound on %r' % port
warnings.warn(msg)
| bsd-3-clause | 5,599,271,003,952,700,000 | 33.838337 | 79 | 0.6 | false |
PavolVican/libyang | swig/python/tests/test_tree_data.py | 1 | 24317 | #!/usr/bin/env python
from __future__ import print_function
__author__ = "Matija Amidzic <[email protected]>"
__copyright__ = "Copyright 2018, Deutsche Telekom AG"
__license__ = "BSD 3-Clause"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yang as ly
import unittest
import sys
import config
lys_module_a = \
"<?xml version=\"1.0\" encoding=\"UTF-8\"?> \
<module name=\"a\" \
xmlns=\"urn:ietf:params:xml:ns:yang:yin:1\" \
xmlns:md=\"urn:ietf:params:xml:ns:yang:ietf-yang-metadata\"\
xmlns:a=\"urn:a\"> \
<namespace uri=\"urn:a\"/> \
<prefix value=\"a_mod\"/> \
<include module=\"asub\"/> \
<include module=\"atop\"/> \
<import module=\"ietf-yang-metadata\"> \
<prefix value=\"md\"/> \
</import> \
<feature name=\"foo\"/> \
<grouping name=\"gg\"> \
<leaf name=\"bar-gggg\"> \
<type name=\"string\"/> \
</leaf> \
</grouping> \
<md:annotation name=\"test\"> \
<type name=\"string\"/> \
</md:annotation> \
<container name=\"x\"> \
<leaf name=\"bar-leaf\"> \
<type name=\"string\"/> \
</leaf> \
<uses name=\"gg\"> \
</uses> \
<leaf name=\"baz\"> \
<type name=\"string\"/> \
</leaf> \
<leaf name=\"bubba\"> \
<type name=\"string\"/> \
</leaf> \
<leaf name=\"number32\"> \
<type name=\"int32\"/> \
</leaf> \
<leaf name=\"number64\"> \
<type name=\"int64\"/> \
</leaf> \
<leaf name=\"def-leaf\"> \
<type name=\"string\"/> \
<default value=\"def\"/> \
</leaf> \
</container> \
<leaf name=\"y\"><type name=\"string\"/></leaf> \
<anyxml name=\"any\"/> \
<augment target-node=\"/x\"> \
<container name=\"bar-y\"/> \
</augment> \
<rpc name=\"bar-rpc\"> \
</rpc> \
<rpc name=\"foo-rpc\"> \
</rpc> \
<rpc name=\"rpc1\"> \
<input> \
<leaf name=\"input-leaf1\"> \
<type name=\"string\"/> \
</leaf> \
<container name=\"x\"> \
<leaf name=\"input-leaf2\"> \
<type name=\"string\"/> \
</leaf> \
</container> \
</input> \
<output> \
<leaf name=\"output-leaf1\"> \
<type name=\"string\"/> \
</leaf> \
<leaf name=\"output-leaf2\"> \
<type name=\"string\"/> \
</leaf> \
<container name=\"rpc-container\"> \
<leaf name=\"output-leaf3\"> \
<type name=\"string\"/> \
</leaf> \
</container> \
</output> \
</rpc> \
<list name=\"l\"> \
<key value=\"key1 key2\"/> \
<leaf name=\"key1\"> \
<type name=\"uint8\"/> \
</leaf> \
<leaf name=\"key2\"> \
<type name=\"uint8\"/> \
</leaf> \
<leaf name=\"value\"> \
<type name=\"string\"/> \
</leaf> \
</list> \
</module> \
"
a_data_xml = "\
<x xmlns=\"urn:a\">\n\
<bubba>test</bubba>\n\
</x>\n"
result_xml = "<x xmlns=\"urn:a\"><bubba>test</bubba></x>"
result_xml_format ="\
<x xmlns=\"urn:a\">\n\
<bubba>test</bubba>\n\
</x>\n\
"
result_json = "\
{\n\
\"a:x\": {\n\
\"bubba\": \"test\"\n\
}\n\
}\n\
"
class UnexpectedError(Exception):
"""Exception raised for errors that are not expected.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
class TestUM(unittest.TestCase):
def test_ly_ctx_parse_data_mem(self):
yang_folder = config.TESTS_DIR + "/api/files"
yin_file = config.TESTS_DIR + "/api/files/a.yin"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_path(yin_file, ly.LYS_IN_YIN)
# Tests
root = ctx.parse_data_mem(a_data_xml, ly.LYD_XML, ly.LYD_OPT_NOSIBLINGS | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
self.assertEqual("x", root.schema().name())
except Exception as e:
self.fail(e)
def test_ly_ctx_parse_data_fd(self):
yang_folder = config.TESTS_DIR + "/api/files"
yin_file = config.TESTS_DIR + "/api/files/a.yin"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_path(yin_file, ly.LYS_IN_YIN)
# Tests
f = open(config_file, 'r')
fd = f.fileno()
root = ctx.parse_data_fd(fd, ly.LYD_XML, ly.LYD_OPT_NOSIBLINGS | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
self.assertEqual("x", root.schema().name())
except Exception as e:
self.fail(e)
finally:
f.close()
def test_ly_ctx_parse_data_path(self):
yang_folder = config.TESTS_DIR + "/api/files"
yin_file = config.TESTS_DIR + "/api/files/a.yin"
config_file = config.TESTS_DIR + "/api/files/a.xml"
module_name = "a"
schema_name = "x"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
module = ctx.parse_module_path(yin_file, ly.LYS_IN_YIN)
self.assertIsNotNone(module)
self.assertEqual(module_name, module.name(), "Module names don't match")
# Tests
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
self.assertEqual(schema_name, root.schema().name())
except Exception as e:
self.fail(e)
def test_ly_ctx_parse_data_path_invalid(self):
yang_folder = config.TESTS_DIR + "/api/files"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
# Tests
root = ctx.parse_data_path("INVALID_PATH", ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
raise UnexpectedError("exception not thrown")
except UnexpectedError as e:
self.fail(e)
except RuntimeError as e:
return
except Exception as e:
self.fail(e)
def test_ly_data_node(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
new_node = ly.Data_Node(root, root.schema().module(), "number32", "100")
self.assertIsNotNone(new_node)
dup_node = new_node.dup(0)
self.assertIsNotNone(dup_node)
except Exception as e:
self.fail(e)
def test_ly_data_node_new_path(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
mod = ctx.get_module("a", None, 1)
self.assertIsNotNone(mod)
# Tests
root = ly.Data_Node(ctx, "/a:x/bar-gggg", "a", 0, 0)
self.assertIsNotNone(root)
self.assertEqual("x", root.schema().name())
self.assertEqual("bar-gggg", root.child().schema().name())
node = root.new_path(ctx, "def-leaf", "def", 0, ly.LYD_PATH_OPT_DFLT)
self.assertIsNotNone(node)
self.assertEqual("def-leaf", node.schema().name())
self.assertEqual(1, node.dflt())
node = root.new_path(ctx, "def-leaf", "def", 0, 0)
self.assertIsNotNone(node)
self.assertEqual("def-leaf", node.schema().name())
self.assertEqual(0, node.dflt())
node = root.new_path(ctx, "bubba", "b", 0, 0)
self.assertIsNotNone(node)
self.assertEqual("bubba", node.schema().name())
node = root.new_path(ctx, "/a:x/number32", "3", 0, 0)
self.assertIsNotNone(node)
self.assertEqual("number32", node.schema().name())
node = root.new_path(ctx, "/a:l[key1='1'][key2='2']/value", None, 0, 0)
self.assertIsNotNone(node)
self.assertEqual("l", node.schema().name())
self.assertEqual("key1", node.child().schema().name())
self.assertEqual("key2", node.child().next().schema().name())
self.assertEqual("value", node.child().next().next().schema().name())
except Exception as e:
self.fail(e)
def test_ly_data_node_insert(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
new_node = ly.Data_Node(root, root.schema().module(), "number32", "200")
self.assertIsNotNone(new_node)
rc = root.insert(new_node)
self.assertEqual(0, rc)
self.assertEqual("number32", root.child().prev().schema().name())
except Exception as e:
self.fail(e)
def test_ly_data_node_insert_sibling(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
last = root.prev()
new_node = ly.Data_Node(None, root.schema().module(), "y", "test")
self.assertIsNotNone(new_node)
rc = root.insert_sibling(new_node)
self.assertEqual(0, rc)
self.assertNotEqual(last.schema().name(), root.prev().schema().name())
self.assertEqual("y", root.prev().schema().name())
except Exception as e:
self.fail(e)
def test_ly_data_node_insert_before(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
last = root.prev()
new_node = ly.Data_Node(None, root.schema().module(), "y", "test")
self.assertIsNotNone(new_node)
rc = root.insert_before(new_node)
self.assertEqual(0, rc)
self.assertNotEqual(last.schema().name(), root.prev().schema().name())
self.assertEqual("y", root.prev().schema().name())
except Exception as e:
self.fail(e)
def test_ly_data_node_insert_after(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
last = root.next()
new_node = ly.Data_Node(None, root.schema().module(), "y", "test")
self.assertIsNotNone(new_node)
rc = root.insert_after(new_node)
self.assertEqual(0, rc)
self.assertNotEqual(last.schema().name(), root.next().schema().name())
self.assertEqual("y", root.next().schema().name())
except Exception as e:
self.fail(e)
def test_ly_data_node_schema_sort(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
mod = ctx.get_module("a", None, 1)
self.assertIsNotNone(mod)
# Tests
root = ly.Data_Node(None, mod, "l")
self.assertIsNotNone(root)
node = ly.Data_Node(root, mod, "key1", "1")
self.assertIsNotNone(node)
node = ly.Data_Node(root, mod, "key2", "2")
self.assertIsNotNone(node)
node = ly.Data_Node(None, mod, "x")
self.assertIsNotNone(node)
rc = root.insert_after(node)
self.assertEqual(0, rc)
node = root.next()
node2 = ly.Data_Node(node, mod, "bubba", "a")
self.assertIsNotNone(node2)
node2 = ly.Data_Node(node, mod, "bar-gggg", "b")
self.assertIsNotNone(node2)
node2 = ly.Data_Node(node, mod, "number64", "64")
self.assertIsNotNone(node2)
node2 = ly.Data_Node(node, mod, "number32", "32")
self.assertIsNotNone(node2)
rc = root.schema_sort(1)
self.assertEqual(0, rc)
root = node
self.assertEqual("x", root.schema().name())
self.assertEqual("l", root.next().schema().name())
self.assertEqual("bar-gggg", root.child().schema().name())
self.assertEqual("bubba", root.child().next().schema().name())
self.assertEqual("number32", root.child().next().next().schema().name())
self.assertEqual("number64", root.child().next().next().next().schema().name())
except Exception as e:
self.fail(e)
def test_ly_data_node_find_path(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
node = root.child()
self.assertIsNotNone(node)
set = node.find_path("/a:x/bubba")
self.assertIsNotNone(set)
self.assertEqual(1, set.number())
except Exception as e:
self.fail(e)
def test_ly_data_node_find_instance(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
node = root.child()
self.assertIsNotNone(node)
set = node.find_instance(node.schema())
self.assertIsNotNone(set)
self.assertEqual(1, set.number())
except Exception as e:
self.fail(e)
def test_ly_data_node_validate(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
rc = root.validate(ly.LYD_OPT_CONFIG, ctx)
self.assertEqual(0, rc)
new = ly.Data_Node(root, root.schema().module(), "number32", "1")
self.assertIsNotNone(new)
rc = root.insert(new)
self.assertEqual(0, rc)
rc = root.validate(ly.LYD_OPT_CONFIG, ctx)
self.assertEqual(0, rc)
except Exception as e:
self.fail(e)
def test_ly_data_node_unlink(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
node = root.child()
new = ly.Data_Node(root, node.schema().module(), "number32", "1")
self.assertIsNotNone(new)
rc = root.insert(new)
self.assertEqual(0, rc)
schema = node.prev().schema()
if (ly.LYS_LEAF == schema.nodetype() or ly.LYS_LEAFLIST == schema.nodetype()):
casted = node.prev().subtype()
self.assertEqual("1", casted.value_str())
else:
self.fail()
rc = node.prev().unlink()
self.assertEqual(0, rc)
schema = node.prev().schema()
if (ly.LYS_LEAF == schema.nodetype() or ly.LYS_LEAFLIST == schema.nodetype()):
self.fail()
else:
return
except Exception as e:
self.fail(e)
def test_ly_data_node_print_mem_xml(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
result = root.print_mem(ly.LYD_XML, 0)
self.assertEqual(result_xml, result)
except Exception as e:
self.fail(e)
def test_ly_data_node_print_mem_xml_format(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
result = root.print_mem(ly.LYD_XML, ly.LYP_FORMAT)
self.assertEqual(result_xml_format, result)
except Exception as e:
self.fail(e)
def test_ly_data_node_print_mem_json(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
result = root.print_mem(ly.LYD_JSON, ly.LYP_FORMAT)
self.assertEqual(result_json, result)
except Exception as e:
self.fail(e)
def test_ly_data_node_path(self):
yang_folder = config.TESTS_DIR + "/api/files"
config_file = config.TESTS_DIR + "/api/files/a.xml"
try:
# Setup
ctx = ly.Context(yang_folder)
self.assertIsNotNone(ctx)
ctx.parse_module_mem(lys_module_a, ly.LYS_IN_YIN)
root = ctx.parse_data_path(config_file, ly.LYD_XML, ly.LYD_OPT_CONFIG | ly.LYD_OPT_STRICT)
self.assertIsNotNone(root)
# Tests
str = root.path()
self.assertIsNotNone(str)
self.assertEqual("/a:x", str)
str = root.child().path()
self.assertIsNotNone(str)
self.assertEqual("/a:x/bubba", str)
except Exception as e:
self.fail(e)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,908,619,082,135,739,000 | 37.721338 | 105 | 0.483119 | false |
rjw57/foldbeam | foldbeam/rendering/renderer/tile_fetcher.py | 1 | 8728 | import math
import logging
import StringIO
import sys
import cairo
import numpy as np
from osgeo.osr import SpatialReference
from PIL import Image
import httplib2
from foldbeam.rendering.renderer.base import RendererBase, set_geo_transform
from foldbeam.rendering.renderer.decorator import reproject_from_native_spatial_reference
log = logging.getLogger()
class URLFetchError(Exception):
"""An error raised by a custom URL fetchber for TileFetcher if the URL could not be fetchbed."""
pass
class TileFetcher(RendererBase):
"""Render from slippy map tile URLs.
This is somewhat incomplete at the moment. Given a Google/Bing/OSM-style slippy map tile URL pattern of the form
``http://server/path/to/tiles/{zoom}/{x}/{y}.format``, this renderer can render the tiles to a Cairo context.
In addition to ``{x}`` and ``{y}``, ``{quadkey}`` can be used to support Bing-style quad keys. See
http://msdn.microsoft.com/en-us/library/bb259689.aspx.
.. note::
If no spatial reference is specified, it will default to EPSG:3857. Similarly, if no bounds are specified, the
default is to assume the bounds of this projection (x and y being +/- 20037508.34 metres).
The default URL pattern is ``http://otile1.mqcdn.com/tiles/1.0.0/osm/{zoom}/{x}/{y}.jpg`` which will load tiles
from the MapQuest servers.
If the *url_fetcher* parameter is specified, it is a callable which takes a single string giving a URL as the first
argument and returns a sequence of bytes for the URL contents. It can raise URLFetchError if the resource is not
available. If no fetcher is provided, :py:func:`default_url_fetcher` is used. The fetcher callable must be
thread-safe.
:param url_pattern: default is to use MapQuest, a pattern for calculating the URL to load tiles from
:type url_pattern: string
:param spatial_reference: default EPSG:3857, the native spatial reference for the tiles
:type spatial_reference: osgeo.osr.SpatialReference or None
:param tile_size: default (256, 256), the width and height of one tile in pixels
:type tile_size: tuple of integer or None
:param bounds: default as noted above, the left, right, top and bottom boundary of the projection
:type bounds: tuple of float or None
:param url_fetcher: which callable to use for URL fetching
:type url_fetcher: callable or None
"""
def __init__(self, url_pattern=None, spatial_reference=None, tile_size=None, bounds=None, url_fetcher=None):
super(TileFetcher, self).__init__()
self.url_pattern = url_pattern or 'http://otile1.mqcdn.com/tiles/1.0.0/osm/{zoom}/{x}/{y}.jpg'
self.tile_size = tile_size or (256, 256)
self.bounds = bounds or (-20037508.34, 20037508.34, 20037508.34, -20037508.34)
self.bounds_size = (abs(self.bounds[1] - self.bounds[0]), abs(self.bounds[3] - self.bounds[2]))
if spatial_reference is not None:
self.native_spatial_reference = spatial_reference
else:
self.native_spatial_reference = SpatialReference()
self.native_spatial_reference.ImportFromEPSG(3857)
self._fetch_url = url_fetcher or default_url_fetcher
@reproject_from_native_spatial_reference
def render_callable(self, context, spatial_reference=None):
if spatial_reference is not None and not spatial_reference.IsSame(self.native_spatial_reference):
raise ValueError('TileFetcher asked to render tile from incompatible spatial reference.')
# Calculate the distance in projection co-ordinates of one device pixel
pixel_size = context.device_to_user_distance(1,1)
# And hence the size in projection co-ordinates of one tile
ideal_tile_size = tuple([abs(x[0] * x[1]) for x in zip(pixel_size, self.tile_size)])
# How many math.powers of two smaller than the bounds is this?
ideal_zoom = tuple([math.log(x[0],2) - math.log(x[1],2) for x in zip(self.bounds_size, ideal_tile_size)])
# What zoom will we *actually* use
zoom = min(18, max(0, int(round(max(*ideal_zoom)))))
# How many tiles at this zoom level?
n_tiles = 1<<zoom
# Calculate the tile co-ordinates for the clip area extent
min_px, min_py, max_px, max_py = context.clip_extents()
# This give tile co-ordinates for the extremal tiles
tl = [int(math.floor(x)) for x in self._projection_to_tile(min_px, max_py, zoom)]
br = [int(math.floor(x)) for x in self._projection_to_tile(max_px, min_py, zoom)]
# extract the minimum/maximum x/y co-ordinate for the tiles
min_x, min_y = tl
max_x, max_y = br
tiles_to_fetch = []
for x in range(min_x, max_x+1):
# wrap the x co-ordinate in the number of tiles
wrapped_x = x % n_tiles
if wrapped_x < 0:
wrapped_x += n_tiles
for y in range(min_y, max_y+1):
# skip out of range y-tiles
if y < 0 or y >= n_tiles:
continue
# Calculate quadkey
quadkey = ''
for bit in xrange(zoom):
v = ((x>>bit)&0x1) + ((((y)>>bit)&0x1)<<1)
quadkey = str(v) + quadkey
url = self.url_pattern.format(x=wrapped_x, y=y, zoom=zoom, quadkey=quadkey)
tiles_to_fetch.append((x,y,url,self._fetch_url(url)))
def f():
# render the tiles as they come in
for x, y, url, data in tiles_to_fetch:
# load the tile into a cairo surface
surface = _cairo_surface_from_data(data)
# what extents should this tile have?
tile_x, tile_y, tile_w, tile_h = self._tile_extents(x, y, zoom)
tile_x_scale = surface.get_width() / tile_w
tile_y_scale = -surface.get_height() / tile_h
# set up the tile as a source
context.set_source_surface(surface)
context.get_source().set_matrix(cairo.Matrix(
xx = tile_x_scale,
yy = tile_y_scale,
x0 = -tile_x * tile_x_scale,
y0 = -tile_y * tile_y_scale + surface.get_height()
))
# we need to set the extend options to avoid interpolating towards zero-alpha at the edges
context.get_source().set_extend(cairo.EXTEND_PAD)
# draw the tile itself. We disable antialiasing because if the tile slightly overlaps an output
# pixel we want the interpolation of the tile to do the smoothing, not the rasteriser
context.save()
context.set_antialias(cairo.ANTIALIAS_NONE)
context.rectangle(tile_x, tile_y, tile_w, tile_h)
context.fill()
context.restore()
return f
def _tile_extents(self, tx, ty, zoom):
"""Return a tuple (minx, miny, width, height) giving the extents of a tile in projection co-ords."""
# Calculate size of one tile in projection co-ordinates
tile_size = tuple([math.pow(2.0, math.log(x,2) - zoom) for x in self.bounds_size])
left = tx * tile_size[0] + self.bounds[0]
top = self.bounds[2] - ty * tile_size[1]
return (left, top-tile_size[1], tile_size[0], tile_size[1])
def _projection_to_tile(self, px, py, zoom):
"""Convert from a projection co-ordinate to a tile co-ordinate. The tile co-ordinate system has an origin in the
top-left hand corner.
"""
# Calculate size of one tile in projection co-ordinates
tile_size = tuple([x / math.pow(2.0, zoom) for x in self.bounds_size])
# Map projection co-ords into tile co-ords
return tuple([x[0] / x[1] for x in zip((px-self.bounds[0], self.bounds[2]-py), tile_size)])
def default_url_fetcher(url):
"""The default URL fetcher to use in :py:class:`TileFetcher`. If there is an error fetching the URL a URLFetchError
is raised.
"""
http = httplib2.Http()
rep, content = http.request(url, 'GET')
if rep.status != 200:
raise URLFetchError(str(rep.status) + ' ' + rep.reason)
return content
def _cairo_surface_from_data(data):
# load via the PIL
image = Image.open(StringIO.StringIO(data)).convert('RGBA')
imw, imh = image.size
# swizzle RGBA -> BGRA
image = Image.frombuffer('RGBA', (imw, imh), image.tostring(), 'raw', 'BGRA', 0, 1)
# write into a Cairo surface
surface = cairo.ImageSurface.create_for_data(np.array(image), cairo.FORMAT_ARGB32, imw, imh)
return surface
| apache-2.0 | -7,803,865,678,951,885,000 | 42.64 | 120 | 0.626604 | false |
DonOregano/pyharmony | harmony/client.py | 1 | 5081 | """Client class for connecting to the Logitech Harmony."""
from __future__ import print_function
import json
import logging
import time
import sleekxmpp
from sleekxmpp.xmlstream import ET
LOGGER = logging.getLogger(__name__)
class HarmonyClient(sleekxmpp.ClientXMPP):
"""An XMPP client for connecting to the Logitech Harmony."""
def __init__(self, auth_token):
user = '%[email protected]/gatorade.' % auth_token
password = auth_token
plugin_config = {
# Enables PLAIN authentication which is off by default.
'feature_mechanisms': {'unencrypted_plain': True},
}
super(HarmonyClient, self).__init__(
user, password, plugin_config=plugin_config)
def get_config(self):
"""Retrieves the Harmony device configuration.
Returns:
A nested dictionary containing activities, devices, etc.
"""
iq_cmd = self.Iq()
iq_cmd['type'] = 'get'
action_cmd = ET.Element('oa')
action_cmd.attrib['xmlns'] = 'connect.logitech.com'
action_cmd.attrib['mime'] = (
'vnd.logitech.harmony/vnd.logitech.harmony.engine?config')
iq_cmd.set_payload(action_cmd)
result = iq_cmd.send(block=True)
payload = result.get_payload()
assert len(payload) == 1
action_cmd = payload[0]
assert action_cmd.attrib['errorcode'] == '200'
device_list = action_cmd.text
return json.loads(device_list)
def get_current_activity(self):
"""Retrieves the current activity.
Returns:
A int with the activity ID.
"""
iq_cmd = self.Iq()
iq_cmd['type'] = 'get'
action_cmd = ET.Element('oa')
action_cmd.attrib['xmlns'] = 'connect.logitech.com'
action_cmd.attrib['mime'] = (
'vnd.logitech.harmony/vnd.logitech.harmony.engine?getCurrentActivity')
iq_cmd.set_payload(action_cmd)
result = iq_cmd.send(block=True)
payload = result.get_payload()
assert len(payload) == 1
action_cmd = payload[0]
assert action_cmd.attrib['errorcode'] == '200'
activity = action_cmd.text.split("=")
return int(activity[1])
def start_activity(self, activity_id):
"""Starts an activity.
Args:
activity_id: An int or string identifying the activity to start
Returns:
A nested dictionary containing activities, devices, etc.
"""
iq_cmd = self.Iq()
iq_cmd['type'] = 'get'
action_cmd = ET.Element('oa')
action_cmd.attrib['xmlns'] = 'connect.logitech.com'
action_cmd.attrib['mime'] = ('harmony.engine?startactivity')
cmd = 'activityId=' + str(activity_id) + ':timestamp=0'
action_cmd.text = cmd
iq_cmd.set_payload(action_cmd)
result = iq_cmd.send(block=True)
payload = result.get_payload()
assert len(payload) == 1
action_cmd = payload[0]
return action_cmd.text
def sync(self):
"""Syncs the harmony hub with the web service.
"""
iq_cmd = self.Iq()
iq_cmd['type'] = 'get'
action_cmd = ET.Element('oa')
action_cmd.attrib['xmlns'] = 'connect.logitech.com'
action_cmd.attrib['mime'] = ('setup.sync')
iq_cmd.set_payload(action_cmd)
result = iq_cmd.send(block=True)
payload = result.get_payload()
assert len(payload) == 1
def send_command(self, device_id, command):
"""Send a simple command to the Harmony Hub.
"""
iq_cmd = self.Iq()
iq_cmd['type'] = 'get'
iq_cmd['id'] = '5e518d07-bcc2-4634-ba3d-c20f338d8927-2'
action_cmd = ET.Element('oa')
action_cmd.attrib['xmlns'] = 'connect.logitech.com'
action_cmd.attrib['mime'] = (
'vnd.logitech.harmony/vnd.logitech.harmony.engine?holdAction')
action_cmd.text = 'action={"type"::"IRCommand","deviceId"::"'+device_id+'","command"::"'+command+'"}:status=press'
iq_cmd.set_payload(action_cmd)
result = iq_cmd.send(block=False)
return True
def power_off(self):
"""Turns the system off if it's on, otherwise it does nothing.
Returns:
True.
"""
activity = self.get_current_activity()
print(activity)
if activity != -1:
print("OFF")
self.start_activity(-1)
return True
def create_and_connect_client(ip_address, port, token):
"""Creates a Harmony client and initializes session.
Args:
ip_address: IP Address of the Harmony device.
port: Port that the Harmony device is listening on.
token: A string containing a session token.
Returns:
An instance of HarmonyClient that is connected.
"""
client = HarmonyClient(token)
client.connect(address=(ip_address, port),
use_tls=False, use_ssl=False)
client.process(block=False)
while not client.sessionstarted:
time.sleep(0.1)
return client
| bsd-3-clause | 5,787,378,854,046,831,000 | 32.427632 | 122 | 0.592403 | false |
davy39/eric | Helpviewer/HelpBrowserWV.py | 1 | 92524 | # -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing the helpbrowser using QWebView.
"""
from __future__ import unicode_literals
try:
str = unicode
except NameError:
pass
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QObject, QT_TRANSLATE_NOOP, \
QUrl, QBuffer, QIODevice, QFileInfo, Qt, QTimer, QEvent, \
QRect, QFile, QPoint, QByteArray, qVersion
from PyQt5.QtGui import QDesktopServices, QClipboard, QMouseEvent, QColor, \
QPalette
from PyQt5.QtWidgets import qApp, QStyle, QMenu, QApplication, QInputDialog, \
QLineEdit, QLabel, QToolTip, QFrame, QDialog
from PyQt5.QtPrintSupport import QPrinter, QPrintDialog
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebView, QWebPage
try:
from PyQt5.QtWebKit import QWebElement
except ImportError:
pass
from PyQt5.QtNetwork import QNetworkReply, QNetworkRequest
import sip
from E5Gui import E5MessageBox, E5FileDialog
import Preferences
import UI.PixmapCache
try:
from PyQt5.QtNetwork import QSslCertificate
SSL_AVAILABLE = True
except ImportError:
SSL_AVAILABLE = False
###############################################################################
class JavaScriptExternalObject(QObject):
"""
Class implementing an external javascript object to add search providers.
"""
def __init__(self, mw, parent=None):
"""
Constructor
@param mw reference to the main window 8HelpWindow)
@param parent reference to the parent object (QObject)
"""
super(JavaScriptExternalObject, self).__init__(parent)
self.__mw = mw
@pyqtSlot(str)
def AddSearchProvider(self, url):
"""
Public slot to add a search provider.
@param url url of the XML file defining the search provider (string)
"""
self.__mw.openSearchManager().addEngine(QUrl(url))
class LinkedResource(object):
"""
Class defining a data structure for linked resources.
"""
def __init__(self):
"""
Constructor
"""
self.rel = ""
self.type_ = ""
self.href = ""
self.title = ""
###############################################################################
class JavaScriptEricObject(QObject):
"""
Class implementing an external javascript object to search via the
startpage.
"""
# these must be in line with the strings used by the javascript part of
# the start page
translations = [
QT_TRANSLATE_NOOP("JavaScriptEricObject",
"Welcome to eric6 Web Browser!"),
QT_TRANSLATE_NOOP("JavaScriptEricObject", "eric6 Web Browser"),
QT_TRANSLATE_NOOP("JavaScriptEricObject", "Search!"),
QT_TRANSLATE_NOOP("JavaScriptEricObject", "About eric6"),
]
def __init__(self, mw, parent=None):
"""
Constructor
@param mw reference to the main window 8HelpWindow)
@param parent reference to the parent object (QObject)
"""
super(JavaScriptEricObject, self).__init__(parent)
self.__mw = mw
@pyqtSlot(str, result=str)
def translate(self, trans):
"""
Public method to translate the given string.
@param trans string to be translated (string)
@return translation (string)
"""
if trans == "QT_LAYOUT_DIRECTION":
# special handling to detect layout direction
if qApp.isLeftToRight():
return "LTR"
else:
return "RTL"
return self.tr(trans)
@pyqtSlot(result=str)
def providerString(self):
"""
Public method to get a string for the search provider.
@return string for the search provider (string)
"""
return self.tr("Search results provided by {0}")\
.format(self.__mw.openSearchManager().currentEngineName())
@pyqtSlot(str, result=str)
def searchUrl(self, searchStr):
"""
Public method to get the search URL for the given search term.
@param searchStr search term (string)
@return search URL (string)
"""
return bytes(
self.__mw.openSearchManager().currentEngine()
.searchUrl(searchStr).toEncoded()).decode()
###############################################################################
class HelpWebPage(QWebPage):
"""
Class implementing an enhanced web page.
"""
_webPluginFactory = None
def __init__(self, parent=None):
"""
Constructor
@param parent parent widget of this window (QWidget)
"""
super(HelpWebPage, self).__init__(parent)
self.setPluginFactory(self.webPluginFactory())
self.__lastRequest = None
self.__lastRequestType = QWebPage.NavigationTypeOther
import Helpviewer.HelpWindow
from .Network.NetworkAccessManagerProxy import \
NetworkAccessManagerProxy
self.__proxy = NetworkAccessManagerProxy(self)
self.__proxy.setWebPage(self)
self.__proxy.setPrimaryNetworkAccessManager(
Helpviewer.HelpWindow.HelpWindow.networkAccessManager())
self.setNetworkAccessManager(self.__proxy)
self.__sslConfiguration = None
self.__proxy.finished.connect(self.__managerFinished)
self.__adBlockedEntries = []
self.loadStarted.connect(self.__loadStarted)
def acceptNavigationRequest(self, frame, request, type_):
"""
Public method to determine, if a request may be accepted.
@param frame reference to the frame sending the request (QWebFrame)
@param request reference to the request object (QNetworkRequest)
@param type_ type of the navigation request (QWebPage.NavigationType)
@return flag indicating acceptance (boolean)
"""
self.__lastRequest = request
if self.__lastRequest.url() != request.url() or \
type_ != QWebPage.NavigationTypeOther:
self.__lastRequestType = type_
scheme = request.url().scheme()
if scheme == "mailto":
QDesktopServices.openUrl(request.url())
return False
if type_ == QWebPage.NavigationTypeFormResubmitted:
res = E5MessageBox.yesNo(
self.view(),
self.tr("Resending POST request"),
self.tr(
"""In order to display the site, the request along with"""
""" all the data must be sent once again, which may lead"""
""" to some unexpected behaviour of the site e.g. the"""
""" same action might be performed once again. Do you"""
""" want to continue anyway?"""),
icon=E5MessageBox.Warning)
if not res:
return False
return QWebPage.acceptNavigationRequest(self, frame, request, type_)
def populateNetworkRequest(self, request):
"""
Public method to add data to a network request.
@param request reference to the network request object
(QNetworkRequest)
"""
try:
request.setAttribute(QNetworkRequest.User + 100, self)
if self.__lastRequest.url() == request.url():
request.setAttribute(QNetworkRequest.User + 101,
self.__lastRequestType)
if self.__lastRequestType == \
QWebPage.NavigationTypeLinkClicked:
request.setRawHeader("X-Eric6-UserLoadAction",
QByteArray("1"))
except TypeError:
pass
def pageAttributeId(self):
"""
Public method to get the attribute id of the page attribute.
@return attribute id of the page attribute (integer)
"""
return QNetworkRequest.User + 100
def supportsExtension(self, extension):
"""
Public method to check the support for an extension.
@param extension extension to test for (QWebPage.Extension)
@return flag indicating the support of extension (boolean)
"""
try:
if extension in [QWebPage.ErrorPageExtension,
QWebPage.ChooseMultipleFilesExtension]:
return True
except AttributeError:
pass
return QWebPage.supportsExtension(self, extension)
def extension(self, extension, option, output):
"""
Public method to implement a specific extension.
@param extension extension to be executed (QWebPage.Extension)
@param option provides input to the extension
(QWebPage.ExtensionOption)
@param output stores the output results (QWebPage.ExtensionReturn)
@return flag indicating a successful call of the extension (boolean)
"""
if extension == QWebPage.ChooseMultipleFilesExtension:
info = sip.cast(option,
QWebPage.ChooseMultipleFilesExtensionOption)
files = sip.cast(output,
QWebPage.ChooseMultipleFilesExtensionReturn)
if info is None or files is None:
return super(HelpWebPage, self).extension(
extension, option, output)
suggestedFileName = ""
if info.suggestedFileNames:
suggestedFileName = info.suggestedFileNames[0]
files.fileNames = E5FileDialog.getOpenFileNames(
None,
self.tr("Select files to upload..."),
suggestedFileName)
return True
if extension == QWebPage.ErrorPageExtension:
info = sip.cast(option, QWebPage.ErrorPageExtensionOption)
errorPage = sip.cast(output, QWebPage.ErrorPageExtensionReturn)
urlString = bytes(info.url.toEncoded()).decode()
errorPage.baseUrl = info.url
if info.domain == QWebPage.QtNetwork and \
info.error == QNetworkReply.ContentAccessDenied and \
info.errorString.startswith("AdBlockRule:"):
if info.frame != info.frame.page().mainFrame():
# content in <iframe>
docElement = info.frame.page().mainFrame()\
.documentElement()
for element in docElement.findAll("iframe"):
src = element.attribute("src")
if src in info.url.toString():
element.setAttribute("style", "display:none;")
return False
else:
# the whole page is blocked
rule = info.errorString.replace("AdBlockRule:", "")
title = self.tr("Content blocked by AdBlock Plus")
message = self.tr(
"Blocked by rule: <i>{0}</i>").format(rule)
htmlFile = QFile(":/html/adblockPage.html")
htmlFile.open(QFile.ReadOnly)
html = htmlFile.readAll()
html = html.replace(
"@FAVICON@", "qrc:icons/adBlockPlus16.png")
html = html.replace(
"@IMAGE@", "qrc:icons/adBlockPlus64.png")
html = html.replace("@TITLE@", title.encode("utf8"))
html = html.replace("@MESSAGE@", message.encode("utf8"))
errorPage.content = html
return True
if info.domain == QWebPage.QtNetwork and \
info.error == QNetworkReply.OperationCanceledError and \
info.errorString == "eric6:No Error":
return False
if info.domain == QWebPage.WebKit and info.error == 203:
# "Loading is handled by the media engine"
return False
title = self.tr("Error loading page: {0}").format(urlString)
htmlFile = QFile(":/html/notFoundPage.html")
htmlFile.open(QFile.ReadOnly)
html = htmlFile.readAll()
pixmap = qApp.style()\
.standardIcon(QStyle.SP_MessageBoxWarning).pixmap(48, 48)
imageBuffer = QBuffer()
imageBuffer.open(QIODevice.ReadWrite)
if pixmap.save(imageBuffer, "PNG"):
html = html.replace("@IMAGE@", imageBuffer.buffer().toBase64())
pixmap = qApp.style()\
.standardIcon(QStyle.SP_MessageBoxWarning).pixmap(16, 16)
imageBuffer = QBuffer()
imageBuffer.open(QIODevice.ReadWrite)
if pixmap.save(imageBuffer, "PNG"):
html = html.replace(
"@FAVICON@", imageBuffer.buffer().toBase64())
html = html.replace("@TITLE@", title.encode("utf8"))
html = html.replace("@H1@", info.errorString.encode("utf8"))
html = html.replace(
"@H2@", self.tr("When connecting to: {0}.")
.format(urlString).encode("utf8"))
html = html.replace(
"@LI-1@",
self.tr("Check the address for errors such as "
"<b>ww</b>.example.org instead of "
"<b>www</b>.example.org").encode("utf8"))
html = html.replace(
"@LI-2@",
self.tr(
"If the address is correct, try checking the network "
"connection.").encode("utf8"))
html = html.replace(
"@LI-3@",
self.tr(
"If your computer or network is protected by a firewall "
"or proxy, make sure that the browser is permitted to "
"access the network.").encode("utf8"))
html = html.replace(
"@LI-4@",
self.tr("If your cache policy is set to offline browsing,"
"only pages in the local cache are available.")
.encode("utf8"))
html = html.replace(
"@BUTTON@", self.tr("Try Again").encode("utf8"))
errorPage.content = html
return True
return QWebPage.extension(self, extension, option, output)
def __loadStarted(self):
"""
Private method to handle the loadStarted signal.
"""
self.__adBlockedEntries = []
def addAdBlockRule(self, rule, url):
"""
Public slot to add an AdBlock rule to the page.
@param rule AdBlock rule to add (AdBlockRule)
@param url URL that matched the rule (QUrl)
"""
from .AdBlock.AdBlockPage import AdBlockedPageEntry
entry = AdBlockedPageEntry(rule, url)
if entry not in self.__adBlockedEntries:
self.__adBlockedEntries.append(entry)
def getAdBlockedPageEntries(self):
"""
Public method to get the list of AdBlock page entries.
@return list of AdBlock page entries (list of AdBlockedPageEntry)
"""
return self.__adBlockedEntries
def url(self):
"""
Public method to get the URL of the page.
@return URL of the page (QUrl)
"""
return self.mainFrame().url()
def userAgent(self, resolveEmpty=False):
"""
Public method to get the global user agent setting.
@param resolveEmpty flag indicating to resolve an empty
user agent (boolean)
@return user agent string (string)
"""
agent = Preferences.getHelp("UserAgent")
if agent == "" and resolveEmpty:
agent = self.userAgentForUrl(QUrl())
return agent
def setUserAgent(self, agent):
"""
Public method to set the global user agent string.
@param agent new current user agent string (string)
"""
Preferences.setHelp("UserAgent", agent)
def userAgentForUrl(self, url):
"""
Public method to determine the user agent for the given URL.
@param url URL to determine user agent for (QUrl)
@return user agent string (string)
"""
import Helpviewer.HelpWindow
agent = Helpviewer.HelpWindow.HelpWindow.userAgentsManager()\
.userAgentForUrl(url)
if agent == "":
# no agent string specified for the given host -> use global one
agent = Preferences.getHelp("UserAgent")
if agent == "":
# no global agent string specified -> use default one
agent = QWebPage.userAgentForUrl(self, url)
return agent
def __managerFinished(self, reply):
"""
Private slot to handle a finished reply.
This slot is used to get SSL related information for a reply.
@param reply reference to the finished reply (QNetworkReply)
"""
try:
frame = reply.request().originatingObject()
except AttributeError:
frame = None
mainFrameRequest = frame == self.mainFrame()
if mainFrameRequest and \
self.__sslConfiguration is not None and \
reply.url() == self.mainFrame().url():
self.__sslConfiguration = None
if reply.error() == QNetworkReply.NoError and \
mainFrameRequest and \
self.__sslConfiguration is None and \
reply.url().scheme().lower() == "https" and \
reply.url() == self.mainFrame().url():
self.__sslConfiguration = reply.sslConfiguration()
self.__sslConfiguration.url = QUrl(reply.url())
if reply.error() == QNetworkReply.NoError and \
mainFrameRequest and \
reply.url() == self.mainFrame().url():
modified = reply.header(QNetworkRequest.LastModifiedHeader)
if modified and modified.isValid():
import Helpviewer.HelpWindow
manager = Helpviewer.HelpWindow.HelpWindow.bookmarksManager()
from .Bookmarks.BookmarkNode import BookmarkNode
for bookmark in manager.bookmarksForUrl(reply.url()):
manager.setTimestamp(bookmark, BookmarkNode.TsModified,
modified)
def getSslCertificate(self):
"""
Public method to get a reference to the SSL certificate.
@return amended SSL certificate (QSslCertificate)
"""
if self.__sslConfiguration is None:
return None
sslInfo = self.__sslConfiguration.peerCertificate()
sslInfo.url = QUrl(self.__sslConfiguration.url)
return sslInfo
def getSslCertificateChain(self):
"""
Public method to get a reference to the SSL certificate chain.
@return SSL certificate chain (list of QSslCertificate)
"""
if self.__sslConfiguration is None:
return []
chain = self.__sslConfiguration.peerCertificateChain()
return chain
def getSslConfiguration(self):
"""
Public method to return a reference to the current SSL configuration.
@return reference to the SSL configuration in use (QSslConfiguration)
"""
return self.__sslConfiguration
def showSslInfo(self, pos):
"""
Public slot to show some SSL information for the loaded page.
@param pos position to show the info at (QPoint)
"""
if SSL_AVAILABLE and self.__sslConfiguration is not None:
from E5Network.E5SslInfoWidget import E5SslInfoWidget
widget = E5SslInfoWidget(
self.mainFrame().url(), self.__sslConfiguration, self.view())
widget.showAt(pos)
else:
E5MessageBox.warning(
self.view(),
self.tr("SSL Info"),
self.tr("""This site does not contain SSL information."""))
def hasValidSslInfo(self):
"""
Public method to check, if the page has a valid SSL certificate.
@return flag indicating a valid SSL certificate (boolean)
"""
if self.__sslConfiguration is None:
return False
certList = self.__sslConfiguration.peerCertificateChain()
if not certList:
return False
certificateDict = Preferences.toDict(
Preferences.Prefs.settings.value("Ssl/CaCertificatesDict"))
for server in certificateDict:
localCAList = QSslCertificate.fromData(certificateDict[server])
for cert in certList:
if cert in localCAList:
return True
if qVersion() >= "5.0.0":
for cert in certList:
if cert.isBlacklisted():
return False
else:
for cert in certList:
if not cert.isValid():
return False
return True
@classmethod
def webPluginFactory(cls):
"""
Class method to get a reference to the web plug-in factory
instance.
@return reference to the web plug-in factory instance (WebPluginFactory
"""
if cls._webPluginFactory is None:
from .WebPlugins.WebPluginFactory import WebPluginFactory
cls._webPluginFactory = WebPluginFactory()
return cls._webPluginFactory
def event(self, evt):
"""
Public method implementing the event handler.
@param evt reference to the event (QEvent)
@return flag indicating that the event was handled (boolean)
"""
if evt.type() == QEvent.Leave:
# Fake a mouse move event just outside of the widget to trigger
# the WebKit event handler's mouseMoved function. This implements
# the interesting mouse-out behavior like invalidating scrollbars.
fakeEvent = QMouseEvent(QEvent.MouseMove, QPoint(0, -1),
Qt.NoButton, Qt.NoButton, Qt.NoModifier)
return super(HelpWebPage, self).event(fakeEvent)
return super(HelpWebPage, self).event(evt)
###############################################################################
class HelpBrowser(QWebView):
"""
Class implementing the helpbrowser widget.
This is a subclass of the Qt QWebView to implement an
interface compatible with the QTextBrowser based variant.
@signal sourceChanged(QUrl) emitted after the current URL has changed
@signal forwardAvailable(bool) emitted after the current URL has changed
@signal backwardAvailable(bool) emitted after the current URL has changed
@signal highlighted(str) emitted, when the mouse hovers over a link
@signal search(QUrl) emitted, when a search is requested
@signal zoomValueChanged(int) emitted to signal a change of the zoom value
"""
sourceChanged = pyqtSignal(QUrl)
forwardAvailable = pyqtSignal(bool)
backwardAvailable = pyqtSignal(bool)
highlighted = pyqtSignal(str)
search = pyqtSignal(QUrl)
zoomValueChanged = pyqtSignal(int)
ZoomLevels = [
30, 50, 67, 80, 90,
100,
110, 120, 133, 150, 170, 200, 240, 300,
]
ZoomLevelDefault = 100
def __init__(self, mainWindow, parent=None, name=""):
"""
Constructor
@param mainWindow reference to the main window (HelpWindow)
@param parent parent widget of this window (QWidget)
@param name name of this window (string)
"""
super(HelpBrowser, self).__init__(parent)
self.setObjectName(name)
self.setWhatsThis(self.tr(
"""<b>Help Window</b>"""
"""<p>This window displays the selected help information.</p>"""
))
import Helpviewer.HelpWindow
self.__speedDial = Helpviewer.HelpWindow.HelpWindow.speedDial()
self.__page = HelpWebPage(self)
self.setPage(self.__page)
self.mw = mainWindow
self.ctrlPressed = False
self.__isLoading = False
self.__progress = 0
self.__currentZoom = 100
self.__zoomLevels = HelpBrowser.ZoomLevels[:]
self.__javaScriptBinding = None
self.__javaScriptEricObject = None
self.mw.zoomTextOnlyChanged.connect(self.__applyZoom)
self.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
self.linkClicked.connect(self.setSource)
self.urlChanged.connect(self.__urlChanged)
self.statusBarMessage.connect(self.__statusBarMessage)
self.page().linkHovered.connect(self.__linkHovered)
self.loadStarted.connect(self.__loadStarted)
self.loadProgress.connect(self.__loadProgress)
self.loadFinished.connect(self.__loadFinished)
self.page().setForwardUnsupportedContent(True)
self.page().unsupportedContent.connect(self.__unsupportedContent)
self.page().downloadRequested.connect(self.__downloadRequested)
self.page().frameCreated.connect(self.__addExternalBinding)
self.__addExternalBinding(self.page().mainFrame())
self.page().databaseQuotaExceeded.connect(self.__databaseQuotaExceeded)
self.mw.openSearchManager().currentEngineChanged.connect(
self.__currentEngineChanged)
self.setAcceptDrops(True)
self.__enableAccessKeys = Preferences.getHelp("AccessKeysEnabled")
self.__accessKeysPressed = False
self.__accessKeyLabels = []
self.__accessKeyNodes = {}
self.page().loadStarted.connect(self.__hideAccessKeys)
self.page().scrollRequested.connect(self.__hideAccessKeys)
self.__rss = []
self.__clickedFrame = None
self.mw.personalInformationManager().connectPage(self.page())
self.mw.greaseMonkeyManager().connectPage(self.page())
self.grabGesture(Qt.PinchGesture)
def __addExternalBinding(self, frame=None):
"""
Private slot to add javascript bindings for adding search providers.
@param frame reference to the web frame (QWebFrame)
"""
self.page().settings().setAttribute(QWebSettings.JavascriptEnabled,
True)
if self.__javaScriptBinding is None:
self.__javaScriptBinding = JavaScriptExternalObject(self.mw, self)
if frame is None:
# called from QWebFrame.javaScriptWindowObjectCleared
frame = self.sender()
if isinstance(frame, HelpWebPage):
frame = frame.mainFrame()
if frame.url().scheme() == "eric" and frame.url().path() == "home":
if self.__javaScriptEricObject is None:
self.__javaScriptEricObject = JavaScriptEricObject(
self.mw, self)
frame.addToJavaScriptWindowObject(
"eric", self.__javaScriptEricObject)
elif frame.url().scheme() == "eric" and \
frame.url().path() == "speeddial":
frame.addToJavaScriptWindowObject(
"speeddial", self.__speedDial)
self.__speedDial.addWebFrame(frame)
else:
# called from QWebPage.frameCreated
frame.javaScriptWindowObjectCleared.connect(
self.__addExternalBinding)
frame.addToJavaScriptWindowObject("external", self.__javaScriptBinding)
def linkedResources(self, relation=""):
"""
Public method to extract linked resources.
@param relation relation to extract (string)
@return list of linked resources (list of LinkedResource)
"""
resources = []
baseUrl = self.page().mainFrame().baseUrl()
linkElements = self.page().mainFrame().findAllElements(
"html > head > link")
for linkElement in linkElements.toList():
rel = linkElement.attribute("rel")
href = linkElement.attribute("href")
type_ = linkElement.attribute("type")
title = linkElement.attribute("title")
if href == "" or type_ == "":
continue
if relation and rel != relation:
continue
resource = LinkedResource()
resource.rel = rel
resource.type_ = type_
resource.href = baseUrl.resolved(QUrl.fromEncoded(href))
resource.title = title
resources.append(resource)
return resources
def __currentEngineChanged(self):
"""
Private slot to track a change of the current search engine.
"""
if self.url().toString() == "eric:home":
self.reload()
def setSource(self, name, requestData=None):
"""
Public method used to set the source to be displayed.
@param name filename to be shown (QUrl)
@param requestData tuple containing the request data (QNetworkRequest,
QNetworkAccessManager.Operation, QByteArray)
"""
if (name is None or not name.isValid()) and requestData is None:
return
if name is None and requestData is not None:
name = requestData[0].url()
if self.ctrlPressed:
# open in a new window
self.mw.newTab(name)
self.ctrlPressed = False
return
if not name.scheme():
name.setUrl(Preferences.getHelp("DefaultScheme") + name.toString())
if len(name.scheme()) == 1 or \
name.scheme() == "file":
# name is a local file
if name.scheme() and len(name.scheme()) == 1:
# it is a local path on win os
name = QUrl.fromLocalFile(name.toString())
if not QFileInfo(name.toLocalFile()).exists():
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>The file <b>{0}</b> does not exist.</p>""")
.format(name.toLocalFile()))
return
if name.toLocalFile().endswith(".pdf") or \
name.toLocalFile().endswith(".PDF") or \
name.toLocalFile().endswith(".chm") or \
name.toLocalFile().endswith(".CHM"):
started = QDesktopServices.openUrl(name)
if not started:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Could not start a viewer"""
""" for file <b>{0}</b>.</p>""")
.format(name.path()))
return
elif name.scheme() in ["mailto"]:
started = QDesktopServices.openUrl(name)
if not started:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Could not start an application"""
""" for URL <b>{0}</b>.</p>""")
.format(name.toString()))
return
elif name.scheme() == "javascript":
scriptSource = QUrl.fromPercentEncoding(name.toString(
QUrl.FormattingOptions(QUrl.TolerantMode | QUrl.RemoveScheme)))
self.page().mainFrame().evaluateJavaScript(scriptSource)
return
else:
if name.toString().endswith(".pdf") or \
name.toString().endswith(".PDF") or \
name.toString().endswith(".chm") or \
name.toString().endswith(".CHM"):
started = QDesktopServices.openUrl(name)
if not started:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Could not start a viewer"""
""" for file <b>{0}</b>.</p>""")
.format(name.path()))
return
if requestData is not None:
self.load(*requestData)
else:
self.load(name)
def source(self):
"""
Public method to return the URL of the loaded page.
@return URL loaded in the help browser (QUrl)
"""
return self.url()
def documentTitle(self):
"""
Public method to return the title of the loaded page.
@return title (string)
"""
return self.title()
def backward(self):
"""
Public slot to move backwards in history.
"""
self.triggerPageAction(QWebPage.Back)
self.__urlChanged(self.history().currentItem().url())
def forward(self):
"""
Public slot to move forward in history.
"""
self.triggerPageAction(QWebPage.Forward)
self.__urlChanged(self.history().currentItem().url())
def home(self):
"""
Public slot to move to the first page loaded.
"""
homeUrl = QUrl(Preferences.getHelp("HomePage"))
self.setSource(homeUrl)
self.__urlChanged(self.history().currentItem().url())
def reload(self):
"""
Public slot to reload the current page.
"""
self.triggerPageAction(QWebPage.Reload)
def copy(self):
"""
Public slot to copy the selected text.
"""
self.triggerPageAction(QWebPage.Copy)
def isForwardAvailable(self):
"""
Public method to determine, if a forward move in history is possible.
@return flag indicating move forward is possible (boolean)
"""
return self.history().canGoForward()
def isBackwardAvailable(self):
"""
Public method to determine, if a backwards move in history is possible.
@return flag indicating move backwards is possible (boolean)
"""
return self.history().canGoBack()
def __levelForZoom(self, zoom):
"""
Private method determining the zoom level index given a zoom factor.
@param zoom zoom factor (integer)
@return index of zoom factor (integer)
"""
try:
index = self.__zoomLevels.index(zoom)
except ValueError:
for index in range(len(self.__zoomLevels)):
if zoom <= self.__zoomLevels[index]:
break
return index
def __applyZoom(self):
"""
Private slot to apply the current zoom factor.
"""
self.setZoomValue(self.__currentZoom)
def setZoomValue(self, value):
"""
Public method to set the zoom value.
@param value zoom value (integer)
"""
if value != self.zoomValue():
try:
self.setZoomFactor(value / 100.0)
except AttributeError:
self.setTextSizeMultiplier(value / 100.0)
self.zoomValueChanged.emit(value)
def zoomValue(self):
"""
Public method to get the current zoom value.
@return zoom value (integer)
"""
try:
val = self.zoomFactor() * 100
except AttributeError:
val = self.textSizeMultiplier() * 100
return int(val)
def zoomIn(self):
"""
Public slot to zoom into the page.
"""
index = self.__levelForZoom(self.__currentZoom)
if index < len(self.__zoomLevels) - 1:
self.__currentZoom = self.__zoomLevels[index + 1]
self.__applyZoom()
def zoomOut(self):
"""
Public slot to zoom out of the page.
"""
index = self.__levelForZoom(self.__currentZoom)
if index > 0:
self.__currentZoom = self.__zoomLevels[index - 1]
self.__applyZoom()
def zoomReset(self):
"""
Public method to reset the zoom factor.
"""
self.__currentZoom = self.__zoomLevels[HelpBrowser.ZoomLevelDefault]
self.__applyZoom()
def hasSelection(self):
"""
Public method to determine, if there is some text selected.
@return flag indicating text has been selected (boolean)
"""
return self.selectedText() != ""
def findNextPrev(self, txt, case, backwards, wrap, highlightAll):
"""
Public slot to find the next occurrence of a text.
@param txt text to search for (string)
@param case flag indicating a case sensitive search (boolean)
@param backwards flag indicating a backwards search (boolean)
@param wrap flag indicating to wrap around (boolean)
@param highlightAll flag indicating to highlight all occurrences
(boolean)
@return flag indicating that a match was found (boolean)
"""
findFlags = QWebPage.FindFlags()
if case:
findFlags |= QWebPage.FindCaseSensitively
if backwards:
findFlags |= QWebPage.FindBackward
if wrap:
findFlags |= QWebPage.FindWrapsAroundDocument
try:
if highlightAll:
findFlags |= QWebPage.HighlightAllOccurrences
except AttributeError:
pass
return self.findText(txt, findFlags)
def __isMediaElement(self, element):
"""
Private method to check, if the given element is a media element.
@param element element to be checked (QWebElement)
@return flag indicating a media element (boolean)
"""
return element.tagName().lower() in ["video", "audio"]
def contextMenuEvent(self, evt):
"""
Protected method called to create a context menu.
This method is overridden from QWebView.
@param evt reference to the context menu event object
(QContextMenuEvent)
"""
from .UserAgent.UserAgentMenu import UserAgentMenu
menu = QMenu(self)
frameAtPos = self.page().frameAt(evt.pos())
hit = self.page().mainFrame().hitTestContent(evt.pos())
if not hit.linkUrl().isEmpty():
menu.addAction(
UI.PixmapCache.getIcon("openNewTab.png"),
self.tr("Open Link in New Tab\tCtrl+LMB"),
self.__openLinkInNewTab).setData(hit.linkUrl())
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("download.png"),
self.tr("Save Lin&k"), self.__downloadLink)
menu.addAction(
UI.PixmapCache.getIcon("bookmark22.png"),
self.tr("Bookmark this Link"), self.__bookmarkLink)\
.setData(hit.linkUrl())
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("editCopy.png"),
self.tr("Copy Link to Clipboard"), self.__copyLink)
menu.addAction(
UI.PixmapCache.getIcon("mailSend.png"),
self.tr("Send Link"),
self.__sendLink).setData(hit.linkUrl())
if Preferences.getHelp("VirusTotalEnabled") and \
Preferences.getHelp("VirusTotalServiceKey") != "":
menu.addAction(
UI.PixmapCache.getIcon("virustotal.png"),
self.tr("Scan Link with VirusTotal"),
self.__virusTotal).setData(hit.linkUrl())
if not hit.imageUrl().isEmpty():
if not menu.isEmpty():
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("openNewTab.png"),
self.tr("Open Image in New Tab"),
self.__openLinkInNewTab).setData(hit.imageUrl())
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("download.png"),
self.tr("Save Image"), self.__downloadImage)
menu.addAction(
self.tr("Copy Image to Clipboard"), self.__copyImage)
menu.addAction(
UI.PixmapCache.getIcon("editCopy.png"),
self.tr("Copy Image Location to Clipboard"),
self.__copyLocation).setData(hit.imageUrl().toString())
menu.addAction(
UI.PixmapCache.getIcon("mailSend.png"),
self.tr("Send Image Link"),
self.__sendLink).setData(hit.imageUrl())
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("adBlockPlus.png"),
self.tr("Block Image"), self.__blockImage)\
.setData(hit.imageUrl().toString())
if Preferences.getHelp("VirusTotalEnabled") and \
Preferences.getHelp("VirusTotalServiceKey") != "":
menu.addAction(
UI.PixmapCache.getIcon("virustotal.png"),
self.tr("Scan Image with VirusTotal"),
self.__virusTotal).setData(hit.imageUrl())
element = hit.element()
if not element.isNull():
if self.__isMediaElement(element):
if not menu.isEmpty():
menu.addSeparator()
self.__clickedMediaElement = element
paused = element.evaluateJavaScript("this.paused")
muted = element.evaluateJavaScript("this.muted")
videoUrl = QUrl(element.evaluateJavaScript("this.currentSrc"))
if paused:
menu.addAction(
UI.PixmapCache.getIcon("mediaPlaybackStart.png"),
self.tr("Play"), self.__pauseMedia)
else:
menu.addAction(
UI.PixmapCache.getIcon("mediaPlaybackPause.png"),
self.tr("Pause"), self.__pauseMedia)
if muted:
menu.addAction(
UI.PixmapCache.getIcon("audioVolumeHigh.png"),
self.tr("Unmute"), self.__muteMedia)
else:
menu.addAction(
UI.PixmapCache.getIcon("audioVolumeMuted.png"),
self.tr("Mute"), self.__muteMedia)
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("editCopy.png"),
self.tr("Copy Media Address to Clipboard"),
self.__copyLocation).setData(videoUrl.toString())
menu.addAction(
UI.PixmapCache.getIcon("mailSend.png"),
self.tr("Send Media Address"), self.__sendLink)\
.setData(videoUrl)
menu.addAction(
UI.PixmapCache.getIcon("download.png"),
self.tr("Save Media"), self.__downloadMedia)\
.setData(videoUrl)
if element.tagName().lower() in ["input", "textarea"]:
if menu.isEmpty():
pageMenu = self.page().createStandardContextMenu()
directionFound = False
# used to detect double direction entry
for act in pageMenu.actions():
if act.isSeparator():
menu.addSeparator()
continue
if act.menu():
if self.pageAction(
QWebPage.SetTextDirectionDefault) in \
act.menu().actions():
if directionFound:
act.setVisible(False)
directionFound = True
elif self.pageAction(QWebPage.ToggleBold) in \
act.menu().actions():
act.setVisible(False)
elif act == self.pageAction(QWebPage.InspectElement):
# we have our own inspect entry
act.setVisible(False)
menu.addAction(act)
pageMenu = None
if not menu.isEmpty():
menu.addSeparator()
self.mw.personalInformationManager().createSubMenu(menu, self, hit)
menu.addAction(self.mw.newTabAct)
menu.addAction(self.mw.newAct)
menu.addSeparator()
menu.addAction(self.mw.saveAsAct)
menu.addSeparator()
if frameAtPos and self.page().mainFrame() != frameAtPos:
self.__clickedFrame = frameAtPos
fmenu = QMenu(self.tr("This Frame"))
frameUrl = self.__clickedFrame.url()
if frameUrl.isValid():
fmenu.addAction(
self.tr("Show &only this frame"),
self.__loadClickedFrame)
fmenu.addAction(
UI.PixmapCache.getIcon("openNewTab.png"),
self.tr("Show in new &tab"),
self.__openLinkInNewTab).setData(self.__clickedFrame.url())
fmenu.addSeparator()
fmenu.addAction(
UI.PixmapCache.getIcon("print.png"),
self.tr("&Print"), self.__printClickedFrame)
fmenu.addAction(
UI.PixmapCache.getIcon("printPreview.png"),
self.tr("Print Preview"), self.__printPreviewClickedFrame)
fmenu.addAction(
UI.PixmapCache.getIcon("printPdf.png"),
self.tr("Print as PDF"), self.__printPdfClickedFrame)
fmenu.addSeparator()
fmenu.addAction(
UI.PixmapCache.getIcon("zoomIn.png"),
self.tr("Zoom &in"), self.__zoomInClickedFrame)
fmenu.addAction(
UI.PixmapCache.getIcon("zoomReset.png"),
self.tr("Zoom &reset"), self.__zoomResetClickedFrame)
fmenu.addAction(
UI.PixmapCache.getIcon("zoomOut.png"),
self.tr("Zoom &out"), self.__zoomOutClickedFrame)
fmenu.addSeparator()
fmenu.addAction(
self.tr("Show frame so&urce"),
self.__showClickedFrameSource)
menu.addMenu(fmenu)
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("bookmark22.png"),
self.tr("Bookmark this Page"), self.addBookmark)
menu.addAction(
UI.PixmapCache.getIcon("mailSend.png"),
self.tr("Send Page Link"), self.__sendLink).setData(self.url())
menu.addSeparator()
self.__userAgentMenu = UserAgentMenu(self.tr("User Agent"),
url=self.url())
menu.addMenu(self.__userAgentMenu)
menu.addSeparator()
menu.addAction(self.mw.backAct)
menu.addAction(self.mw.forwardAct)
menu.addAction(self.mw.homeAct)
menu.addSeparator()
menu.addAction(self.mw.zoomInAct)
menu.addAction(self.mw.zoomResetAct)
menu.addAction(self.mw.zoomOutAct)
menu.addSeparator()
if self.selectedText():
menu.addAction(self.mw.copyAct)
menu.addAction(
UI.PixmapCache.getIcon("mailSend.png"),
self.tr("Send Text"),
self.__sendLink).setData(self.selectedText())
menu.addAction(self.mw.findAct)
menu.addSeparator()
if self.selectedText():
self.__searchMenu = menu.addMenu(self.tr("Search with..."))
from .OpenSearch.OpenSearchEngineAction import \
OpenSearchEngineAction
engineNames = self.mw.openSearchManager().allEnginesNames()
for engineName in engineNames:
engine = self.mw.openSearchManager().engine(engineName)
act = OpenSearchEngineAction(engine, self.__searchMenu)
act.setData(engineName)
self.__searchMenu.addAction(act)
self.__searchMenu.triggered.connect(self.__searchRequested)
menu.addSeparator()
from .HelpLanguagesDialog import HelpLanguagesDialog
languages = Preferences.toList(
Preferences.Prefs.settings.value(
"Help/AcceptLanguages",
HelpLanguagesDialog.defaultAcceptLanguages()))
if languages:
language = languages[0]
langCode = language.split("[")[1][:2]
googleTranslatorUrl = QUrl(
"http://translate.google.com/#auto|{0}|{1}".format(
langCode, self.selectedText()))
menu.addAction(
UI.PixmapCache.getIcon("translate.png"),
self.tr("Google Translate"), self.__openLinkInNewTab)\
.setData(googleTranslatorUrl)
wiktionaryUrl = QUrl(
"http://{0}.wiktionary.org/wiki/Special:Search?search={1}"
.format(langCode, self.selectedText()))
menu.addAction(
UI.PixmapCache.getIcon("wikipedia.png"),
self.tr("Dictionary"), self.__openLinkInNewTab)\
.setData(wiktionaryUrl)
menu.addSeparator()
guessedUrl = QUrl.fromUserInput(self.selectedText().strip())
if self.__isUrlValid(guessedUrl):
menu.addAction(
self.tr("Go to web address"),
self.__openLinkInNewTab).setData(guessedUrl)
menu.addSeparator()
element = hit.element()
if not element.isNull() and \
element.tagName().lower() == "input" and \
element.attribute("type", "text") == "text":
menu.addAction(self.tr("Add to web search toolbar"),
self.__addSearchEngine).setData(element)
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("webInspector.png"),
self.tr("Web Inspector..."), self.__webInspector)
menu.exec_(evt.globalPos())
def __isUrlValid(self, url):
"""
Private method to check a URL for validity.
@param url URL to be checked (QUrl)
@return flag indicating a valid URL (boolean)
"""
return url.isValid() and \
bool(url.host()) and \
bool(url.scheme()) and \
"." in url.host()
def __openLinkInNewTab(self):
"""
Private method called by the context menu to open a link in a new
window.
"""
act = self.sender()
url = act.data()
if url.isEmpty():
return
self.ctrlPressed = True
self.setSource(url)
self.ctrlPressed = False
def __bookmarkLink(self):
"""
Private slot to bookmark a link via the context menu.
"""
act = self.sender()
url = act.data()
if url.isEmpty():
return
from .Bookmarks.AddBookmarkDialog import AddBookmarkDialog
dlg = AddBookmarkDialog()
dlg.setUrl(bytes(url.toEncoded()).decode())
dlg.exec_()
def __sendLink(self):
"""
Private slot to send a link via email.
"""
act = self.sender()
data = act.data()
if isinstance(data, QUrl) and data.isEmpty():
return
if isinstance(data, QUrl):
data = data.toString()
QDesktopServices.openUrl(QUrl("mailto:?body=" + data))
def __downloadLink(self):
"""
Private slot to download a link and save it to disk.
"""
self.pageAction(QWebPage.DownloadLinkToDisk).trigger()
def __copyLink(self):
"""
Private slot to copy a link to the clipboard.
"""
self.pageAction(QWebPage.CopyLinkToClipboard).trigger()
def __downloadImage(self):
"""
Private slot to download an image and save it to disk.
"""
self.pageAction(QWebPage.DownloadImageToDisk).trigger()
def __copyImage(self):
"""
Private slot to copy an image to the clipboard.
"""
self.pageAction(QWebPage.CopyImageToClipboard).trigger()
def __copyLocation(self):
"""
Private slot to copy an image or media location to the clipboard.
"""
act = self.sender()
url = act.data()
QApplication.clipboard().setText(url)
def __blockImage(self):
"""
Private slot to add a block rule for an image URL.
"""
import Helpviewer.HelpWindow
act = self.sender()
url = act.data()
dlg = Helpviewer.HelpWindow.HelpWindow.adBlockManager().showDialog()
dlg.addCustomRule(url)
def __downloadMedia(self):
"""
Private slot to download a media and save it to disk.
"""
act = self.sender()
url = act.data()
self.mw.downloadManager().download(url, True, mainWindow=self.mw)
def __pauseMedia(self):
"""
Private slot to pause or play the selected media.
"""
paused = self.__clickedMediaElement.evaluateJavaScript("this.paused")
if paused:
self.__clickedMediaElement.evaluateJavaScript("this.play()")
else:
self.__clickedMediaElement.evaluateJavaScript("this.pause()")
def __muteMedia(self):
"""
Private slot to (un)mute the selected media.
"""
muted = self.__clickedMediaElement.evaluateJavaScript("this.muted")
if muted:
self.__clickedMediaElement.evaluateJavaScript("this.muted = false")
else:
self.__clickedMediaElement.evaluateJavaScript("this.muted = true")
def __virusTotal(self):
"""
Private slot to scan the selected URL with VirusTotal.
"""
act = self.sender()
url = act.data()
self.mw.requestVirusTotalScan(url)
def __searchRequested(self, act):
"""
Private slot to search for some text with a selected search engine.
@param act reference to the action that triggered this slot (QAction)
"""
searchText = self.selectedText()
if not searchText:
return
engineName = act.data()
if engineName:
engine = self.mw.openSearchManager().engine(engineName)
self.search.emit(engine.searchUrl(searchText))
def __addSearchEngine(self):
"""
Private slot to add a new search engine.
"""
act = self.sender()
if act is None:
return
element = act.data()
elementName = element.attribute("name")
formElement = QWebElement(element)
while formElement.tagName().lower() != "form":
formElement = formElement.parent()
if formElement.isNull() or \
formElement.attribute("action") == "":
return
method = formElement.attribute("method", "get").lower()
if method != "get":
E5MessageBox.warning(
self,
self.tr("Method not supported"),
self.tr(
"""{0} method is not supported.""").format(method.upper()))
return
searchUrl = QUrl(self.page().mainFrame().baseUrl().resolved(
QUrl(formElement.attribute("action"))))
if searchUrl.scheme() != "http":
return
if qVersion() >= "5.0.0":
from PyQt5.QtCore import QUrlQuery
searchUrlQuery = QUrlQuery(searchUrl)
searchEngines = {}
inputFields = formElement.findAll("input")
for inputField in inputFields.toList():
type_ = inputField.attribute("type", "text")
name = inputField.attribute("name")
value = inputField.evaluateJavaScript("this.value")
if type_ == "submit":
searchEngines[value] = name
elif type_ == "text":
if inputField == element:
value = "{searchTerms}"
if qVersion() >= "5.0.0":
searchUrlQuery.addQueryItem(name, value)
else:
searchUrl.addQueryItem(name, value)
elif type_ == "checkbox" or type_ == "radio":
if inputField.evaluateJavaScript("this.checked"):
if qVersion() >= "5.0.0":
searchUrlQuery.addQueryItem(name, value)
else:
searchUrl.addQueryItem(name, value)
elif type_ == "hidden":
if qVersion() >= "5.0.0":
searchUrlQuery.addQueryItem(name, value)
else:
searchUrl.addQueryItem(name, value)
selectFields = formElement.findAll("select")
for selectField in selectFields.toList():
name = selectField.attribute("name")
selectedIndex = selectField.evaluateJavaScript(
"this.selectedIndex")
if selectedIndex == -1:
continue
options = selectField.findAll("option")
value = options.at(selectedIndex).toPlainText()
if qVersion() >= "5.0.0":
searchUrlQuery.addQueryItem(name, value)
else:
searchUrl.addQueryItem(name, value)
ok = True
if len(searchEngines) > 1:
searchEngine, ok = QInputDialog.getItem(
self,
self.tr("Search engine"),
self.tr("Choose the desired search engine"),
sorted(searchEngines.keys()), 0, False)
if not ok:
return
if searchEngines[searchEngine] != "":
if qVersion() >= "5.0.0":
searchUrlQuery.addQueryItem(
searchEngines[searchEngine], searchEngine)
else:
searchUrl.addQueryItem(
searchEngines[searchEngine], searchEngine)
engineName = ""
labels = formElement.findAll('label[for="{0}"]'.format(elementName))
if labels.count() > 0:
engineName = labels.at(0).toPlainText()
engineName, ok = QInputDialog.getText(
self,
self.tr("Engine name"),
self.tr("Enter a name for the engine"),
QLineEdit.Normal,
engineName)
if not ok:
return
if qVersion() >= "5.0.0":
searchUrl.setQuery(searchUrlQuery)
from .OpenSearch.OpenSearchEngine import OpenSearchEngine
engine = OpenSearchEngine()
engine.setName(engineName)
engine.setDescription(engineName)
engine.setSearchUrlTemplate(searchUrl.toString())
engine.setImage(self.icon().pixmap(16, 16).toImage())
self.mw.openSearchManager().addEngine(engine)
def __webInspector(self):
"""
Private slot to show the web inspector window.
"""
self.triggerPageAction(QWebPage.InspectElement)
def addBookmark(self):
"""
Public slot to bookmark the current page.
"""
from .Bookmarks.AddBookmarkDialog import AddBookmarkDialog
dlg = AddBookmarkDialog()
dlg.setUrl(bytes(self.url().toEncoded()).decode())
dlg.setTitle(self.title())
meta = self.page().mainFrame().metaData()
if "description" in meta:
dlg.setDescription(meta["description"][0])
dlg.exec_()
def dragEnterEvent(self, evt):
"""
Protected method called by a drag enter event.
@param evt reference to the drag enter event (QDragEnterEvent)
"""
evt.acceptProposedAction()
def dragMoveEvent(self, evt):
"""
Protected method called by a drag move event.
@param evt reference to the drag move event (QDragMoveEvent)
"""
evt.ignore()
if evt.source() != self:
if len(evt.mimeData().urls()) > 0:
evt.acceptProposedAction()
else:
url = QUrl(evt.mimeData().text())
if url.isValid():
evt.acceptProposedAction()
if not evt.isAccepted():
super(HelpBrowser, self).dragMoveEvent(evt)
def dropEvent(self, evt):
"""
Protected method called by a drop event.
@param evt reference to the drop event (QDropEvent)
"""
super(HelpBrowser, self).dropEvent(evt)
if not evt.isAccepted() and \
evt.source() != self and \
evt.possibleActions() & Qt.CopyAction:
url = QUrl()
if len(evt.mimeData().urls()) > 0:
url = evt.mimeData().urls()[0]
if not url.isValid():
url = QUrl(evt.mimeData().text())
if url.isValid():
self.setSource(url)
evt.acceptProposedAction()
def mousePressEvent(self, evt):
"""
Protected method called by a mouse press event.
@param evt reference to the mouse event (QMouseEvent)
"""
self.mw.setEventMouseButtons(evt.buttons())
self.mw.setEventKeyboardModifiers(evt.modifiers())
if evt.button() == Qt.XButton1:
self.pageAction(QWebPage.Back).trigger()
elif evt.button() == Qt.XButton2:
self.pageAction(QWebPage.Forward).trigger()
else:
super(HelpBrowser, self).mousePressEvent(evt)
def mouseReleaseEvent(self, evt):
"""
Protected method called by a mouse release event.
@param evt reference to the mouse event (QMouseEvent)
"""
accepted = evt.isAccepted()
self.__page.event(evt)
if not evt.isAccepted() and \
self.mw.eventMouseButtons() & Qt.MidButton:
url = QUrl(QApplication.clipboard().text(QClipboard.Selection))
if not url.isEmpty() and \
url.isValid() and \
url.scheme() != "":
self.mw.setEventMouseButtons(Qt.NoButton)
self.mw.setEventKeyboardModifiers(Qt.NoModifier)
self.setSource(url)
evt.setAccepted(accepted)
def wheelEvent(self, evt):
"""
Protected method to handle wheel events.
@param evt reference to the wheel event (QWheelEvent)
"""
if qVersion() >= "5.0.0":
delta = evt.angleDelta().y()
else:
delta = evt.delta()
if evt.modifiers() & Qt.ControlModifier:
if delta < 0:
self.zoomOut()
else:
self.zoomIn()
evt.accept()
return
if evt.modifiers() & Qt.ShiftModifier:
if delta < 0:
self.backward()
else:
self.forward()
evt.accept()
return
super(HelpBrowser, self).wheelEvent(evt)
def keyPressEvent(self, evt):
"""
Protected method called by a key press.
@param evt reference to the key event (QKeyEvent)
"""
if self.mw.personalInformationManager().viewKeyPressEvent(self, evt):
return
if self.__enableAccessKeys:
self.__accessKeysPressed = (
evt.modifiers() == Qt.ControlModifier and
evt.key() == Qt.Key_Control)
if not self.__accessKeysPressed:
if self.__checkForAccessKey(evt):
self.__hideAccessKeys()
evt.accept()
return
self.__hideAccessKeys()
else:
QTimer.singleShot(300, self.__accessKeyShortcut)
self.ctrlPressed = (evt.key() == Qt.Key_Control)
super(HelpBrowser, self).keyPressEvent(evt)
def keyReleaseEvent(self, evt):
"""
Protected method called by a key release.
@param evt reference to the key event (QKeyEvent)
"""
if self.__enableAccessKeys:
self.__accessKeysPressed = evt.key() == Qt.Key_Control
self.ctrlPressed = False
super(HelpBrowser, self).keyReleaseEvent(evt)
def focusOutEvent(self, evt):
"""
Protected method called by a focus out event.
@param evt reference to the focus event (QFocusEvent)
"""
if self.__accessKeysPressed:
self.__hideAccessKeys()
self.__accessKeysPressed = False
super(HelpBrowser, self).focusOutEvent(evt)
def event(self, evt):
"""
Public method handling events.
@param evt reference to the event (QEvent)
@return flag indicating, if the event was handled (boolean)
"""
if evt.type() == QEvent.Gesture:
self.gestureEvent(evt)
return True
return super(HelpBrowser, self).event(evt)
def gestureEvent(self, evt):
"""
Protected method handling gesture events.
@param evt reference to the gesture event (QGestureEvent
"""
pinch = evt.gesture(Qt.PinchGesture)
if pinch:
if pinch.state() == Qt.GestureStarted:
pinch.setScaleFactor(self.__currentZoom / 100.0)
else:
scaleFactor = pinch.scaleFactor()
self.__currentZoom = int(scaleFactor * 100)
self.__applyZoom()
evt.accept()
def clearHistory(self):
"""
Public slot to clear the history.
"""
self.history().clear()
self.__urlChanged(self.history().currentItem().url())
###########################################################################
## Signal converters below
###########################################################################
def __urlChanged(self, url):
"""
Private slot to handle the urlChanged signal.
@param url the new url (QUrl)
"""
self.sourceChanged.emit(url)
self.forwardAvailable.emit(self.isForwardAvailable())
self.backwardAvailable.emit(self.isBackwardAvailable())
def __statusBarMessage(self, text):
"""
Private slot to handle the statusBarMessage signal.
@param text text to be shown in the status bar (string)
"""
self.mw.statusBar().showMessage(text)
def __linkHovered(self, link, title, textContent):
"""
Private slot to handle the linkHovered signal.
@param link the URL of the link (string)
@param title the link title (string)
@param textContent text content of the link (string)
"""
self.highlighted.emit(link)
###########################################################################
## Signal handlers below
###########################################################################
def __loadStarted(self):
"""
Private method to handle the loadStarted signal.
"""
self.__isLoading = True
self.__progress = 0
def __loadProgress(self, progress):
"""
Private method to handle the loadProgress signal.
@param progress progress value (integer)
"""
self.__progress = progress
def __loadFinished(self, ok):
"""
Private method to handle the loadFinished signal.
@param ok flag indicating the result (boolean)
"""
self.__isLoading = False
self.__progress = 0
if Preferences.getHelp("ClickToFlashEnabled"):
# this is a hack to make the ClickToFlash button appear
self.zoomIn()
self.zoomOut()
if ok:
self.mw.adBlockManager().page().hideBlockedPageEntries(self.page())
self.mw.passwordManager().fill(self.page())
def isLoading(self):
"""
Public method to get the loading state.
@return flag indicating the loading state (boolean)
"""
return self.__isLoading
def progress(self):
"""
Public method to get the load progress.
@return load progress (integer)
"""
return self.__progress
def saveAs(self):
"""
Public method to save the current page to a file.
"""
url = self.url()
if url.isEmpty():
return
self.mw.downloadManager().download(url, True, mainWindow=self.mw)
def __unsupportedContent(self, reply, requestFilename=None,
download=False):
"""
Private slot to handle the unsupportedContent signal.
@param reply reference to the reply object (QNetworkReply)
@keyparam requestFilename indicating to ask for a filename
(boolean or None). If it is None, the behavior is determined
by a configuration option.
@keyparam download flag indicating a download operation (boolean)
"""
if reply is None:
return
replyUrl = reply.url()
if replyUrl.scheme() == "abp":
return
if reply.error() == QNetworkReply.NoError:
if reply.header(QNetworkRequest.ContentTypeHeader):
self.mw.downloadManager().handleUnsupportedContent(
reply, webPage=self.page(), mainWindow=self.mw)
return
replyUrl = reply.url()
if replyUrl.isEmpty():
return
notFoundFrame = self.page().mainFrame()
if notFoundFrame is None:
return
if reply.header(QNetworkRequest.ContentTypeHeader):
data = reply.readAll()
if contentSniff(data):
notFoundFrame.setHtml(str(data, encoding="utf-8"), replyUrl)
return
urlString = bytes(replyUrl.toEncoded()).decode()
title = self.tr("Error loading page: {0}").format(urlString)
htmlFile = QFile(":/html/notFoundPage.html")
htmlFile.open(QFile.ReadOnly)
html = htmlFile.readAll()
pixmap = qApp.style()\
.standardIcon(QStyle.SP_MessageBoxWarning).pixmap(48, 48)
imageBuffer = QBuffer()
imageBuffer.open(QIODevice.ReadWrite)
if pixmap.save(imageBuffer, "PNG"):
html = html.replace("@IMAGE@", imageBuffer.buffer().toBase64())
pixmap = qApp.style()\
.standardIcon(QStyle.SP_MessageBoxWarning).pixmap(16, 16)
imageBuffer = QBuffer()
imageBuffer.open(QIODevice.ReadWrite)
if pixmap.save(imageBuffer, "PNG"):
html = html.replace("@FAVICON@", imageBuffer.buffer().toBase64())
html = html.replace("@TITLE@", title.encode("utf8"))
html = html.replace("@H1@", reply.errorString().encode("utf8"))
html = html.replace(
"@H2@", self.tr("When connecting to: {0}.")
.format(urlString).encode("utf8"))
html = html.replace(
"@LI-1@",
self.tr("Check the address for errors such as "
"<b>ww</b>.example.org instead of "
"<b>www</b>.example.org").encode("utf8"))
html = html.replace(
"@LI-2@",
self.tr("If the address is correct, try checking the network "
"connection.").encode("utf8"))
html = html.replace(
"@LI-3@",
self.tr(
"If your computer or network is protected by a firewall "
"or proxy, make sure that the browser is permitted to "
"access the network.").encode("utf8"))
html = html.replace(
"@LI-4@",
self.tr("If your cache policy is set to offline browsing,"
"only pages in the local cache are available.")
.encode("utf8"))
html = html.replace(
"@BUTTON@", self.tr("Try Again").encode("utf8"))
notFoundFrame.setHtml(bytes(html).decode("utf8"), replyUrl)
self.mw.historyManager().removeHistoryEntry(replyUrl, self.title())
self.loadFinished.emit(False)
def __downloadRequested(self, request):
"""
Private slot to handle a download request.
@param request reference to the request object (QNetworkRequest)
"""
self.mw.downloadManager().download(request, mainWindow=self.mw)
def __databaseQuotaExceeded(self, frame, databaseName):
"""
Private slot to handle the case, where the database quota is exceeded.
@param frame reference to the frame (QWebFrame)
@param databaseName name of the web database (string)
"""
securityOrigin = frame.securityOrigin()
if securityOrigin.databaseQuota() > 0 and \
securityOrigin.databaseUsage() == 0:
# cope with a strange behavior of Qt 4.6, if a database is
# accessed for the first time
return
res = E5MessageBox.yesNo(
self,
self.tr("Web Database Quota"),
self.tr(
"""<p>The database quota of <strong>{0}</strong> has"""
""" been exceeded while accessing database <strong>{1}"""
"""</strong>.</p><p>Shall it be changed?</p>""")
.format(self.__dataString(securityOrigin.databaseQuota()),
databaseName),
yesDefault=True)
if res:
newQuota, ok = QInputDialog.getInt(
self,
self.tr("New Web Database Quota"),
self.tr(
"Enter the new quota in MB (current = {0}, used = {1}; "
"step size = 5 MB):"
.format(
self.__dataString(securityOrigin.databaseQuota()),
self.__dataString(securityOrigin.databaseUsage()))),
securityOrigin.databaseQuota() // (1024 * 1024),
0, 2147483647, 5)
if ok:
securityOrigin.setDatabaseQuota(newQuota * 1024 * 1024)
def __dataString(self, size):
"""
Private method to generate a formatted data string.
@param size size to be formatted (integer)
@return formatted data string (string)
"""
unit = ""
if size < 1024:
unit = self.tr("bytes")
elif size < 1024 * 1024:
size /= 1024
unit = self.tr("kB")
else:
size /= 1024 * 1024
unit = self.tr("MB")
return "{0:.1f} {1}".format(size, unit)
###########################################################################
## Access key related methods below
###########################################################################
def __accessKeyShortcut(self):
"""
Private slot to switch the display of access keys.
"""
if not self.hasFocus() or \
not self.__accessKeysPressed or \
not self.__enableAccessKeys:
return
if self.__accessKeyLabels:
self.__hideAccessKeys()
else:
self.__showAccessKeys()
self.__accessKeysPressed = False
def __checkForAccessKey(self, evt):
"""
Private method to check the existence of an access key and activate the
corresponding link.
@param evt reference to the key event (QKeyEvent)
@return flag indicating, if the event was handled (boolean)
"""
if not self.__accessKeyLabels:
return False
text = evt.text()
if not text:
return False
key = text[0].upper()
handled = False
if key in self.__accessKeyNodes:
element = self.__accessKeyNodes[key]
p = element.geometry().center()
frame = element.webFrame()
p -= frame.scrollPosition()
frame = frame.parentFrame()
while frame and frame != self.page().mainFrame():
p -= frame.scrollPosition()
frame = frame.parentFrame()
pevent = QMouseEvent(
QEvent.MouseButtonPress, p, Qt.LeftButton,
Qt.MouseButtons(Qt.NoButton),
Qt.KeyboardModifiers(Qt.NoModifier))
qApp.sendEvent(self, pevent)
revent = QMouseEvent(
QEvent.MouseButtonRelease, p, Qt.LeftButton,
Qt.MouseButtons(Qt.NoButton),
Qt.KeyboardModifiers(Qt.NoModifier))
qApp.sendEvent(self, revent)
handled = True
return handled
def __hideAccessKeys(self):
"""
Private slot to hide the access key labels.
"""
if self.__accessKeyLabels:
for label in self.__accessKeyLabels:
label.hide()
label.deleteLater()
self.__accessKeyLabels = []
self.__accessKeyNodes = {}
self.update()
def __showAccessKeys(self):
"""
Private method to show the access key labels.
"""
supportedElements = [
"input", "a", "area", "button", "label", "legend", "textarea",
]
unusedKeys = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z" \
" 0 1 2 3 4 5 6 7 8 9".split()
viewport = QRect(self.__page.mainFrame().scrollPosition(),
self.__page.viewportSize())
# Priority first goes to elements with accesskey attributes
alreadyLabeled = []
for elementType in supportedElements:
result = self.page().mainFrame().findAllElements(elementType)\
.toList()
for element in result:
geometry = element.geometry()
if geometry.size().isEmpty() or \
not viewport.contains(geometry.topLeft()):
continue
accessKeyAttribute = element.attribute("accesskey").upper()
if not accessKeyAttribute:
continue
accessKey = ""
i = 0
while i < len(accessKeyAttribute):
if accessKeyAttribute[i] in unusedKeys:
accessKey = accessKeyAttribute[i]
break
i += 2
if accessKey == "":
continue
unusedKeys.remove(accessKey)
self.__makeAccessLabel(accessKey, element)
alreadyLabeled.append(element)
# Pick an access key first from the letters in the text and then
# from the list of unused access keys
for elementType in supportedElements:
result = self.page().mainFrame().findAllElements(elementType)\
.toList()
for element in result:
geometry = element.geometry()
if not unusedKeys or \
element in alreadyLabeled or \
geometry.size().isEmpty() or \
not viewport.contains(geometry.topLeft()):
continue
accessKey = ""
text = element.toPlainText().upper()
for c in text:
if c in unusedKeys:
accessKey = c
break
if accessKey == "":
accessKey = unusedKeys[0]
unusedKeys.remove(accessKey)
self.__makeAccessLabel(accessKey, element)
def __makeAccessLabel(self, accessKey, element):
"""
Private method to generate the access label for an element.
@param accessKey access key to generate the label for (str)
@param element reference to the web element to create the label for
(QWebElement)
"""
label = QLabel(self)
label.setText("<qt><b>{0}</b></qt>".format(accessKey))
p = QToolTip.palette()
color = QColor(Qt.yellow).lighter(150)
color.setAlpha(175)
p.setColor(QPalette.Window, color)
label.setPalette(p)
label.setAutoFillBackground(True)
label.setFrameStyle(QFrame.Box | QFrame.Plain)
point = element.geometry().center()
point -= self.__page.mainFrame().scrollPosition()
label.move(point)
label.show()
point.setX(point.x() - label.width() // 2)
label.move(point)
self.__accessKeyLabels.append(label)
self.__accessKeyNodes[accessKey] = element
###########################################################################
## Miscellaneous methods below
###########################################################################
def createWindow(self, windowType):
"""
Public method called, when a new window should be created.
@param windowType type of the requested window (QWebPage.WebWindowType)
@return reference to the created browser window (HelpBrowser)
"""
self.mw.newTab(addNextTo=self)
return self.mw.currentBrowser()
def preferencesChanged(self):
"""
Public method to indicate a change of the settings.
"""
self.__enableAccessKeys = Preferences.getHelp("AccessKeysEnabled")
if not self.__enableAccessKeys:
self.__hideAccessKeys()
self.reload()
###########################################################################
## RSS related methods below
###########################################################################
def checkRSS(self):
"""
Public method to check, if the loaded page contains feed links.
@return flag indicating the existence of feed links (boolean)
"""
self.__rss = []
frame = self.page().mainFrame()
linkElementsList = frame.findAllElements("link").toList()
for linkElement in linkElementsList:
# only atom+xml and rss+xml will be processed
if linkElement.attribute("rel") != "alternate" or \
(linkElement.attribute("type") != "application/rss+xml" and
linkElement.attribute("type") != "application/atom+xml"):
continue
title = linkElement.attribute("title")
href = linkElement.attribute("href")
if href == "" or title == "":
continue
self.__rss.append((title, href))
return len(self.__rss) > 0
def getRSS(self):
"""
Public method to get the extracted RSS feeds.
@return list of RSS feeds (list of tuples of two strings)
"""
return self.__rss
def hasRSS(self):
"""
Public method to check, if the loaded page has RSS links.
@return flag indicating the presence of RSS links (boolean)
"""
return len(self.__rss) > 0
###########################################################################
## Clicked Frame slots
###########################################################################
def __loadClickedFrame(self):
"""
Private slot to load the selected frame only.
"""
self.setSource(self.__clickedFrame.url())
def __printClickedFrame(self):
"""
Private slot to print the selected frame.
"""
printer = QPrinter(mode=QPrinter.HighResolution)
if Preferences.getPrinter("ColorMode"):
printer.setColorMode(QPrinter.Color)
else:
printer.setColorMode(QPrinter.GrayScale)
if Preferences.getPrinter("FirstPageFirst"):
printer.setPageOrder(QPrinter.FirstPageFirst)
else:
printer.setPageOrder(QPrinter.LastPageFirst)
printer.setPageMargins(
Preferences.getPrinter("LeftMargin") * 10,
Preferences.getPrinter("TopMargin") * 10,
Preferences.getPrinter("RightMargin") * 10,
Preferences.getPrinter("BottomMargin") * 10,
QPrinter.Millimeter
)
printerName = Preferences.getPrinter("PrinterName")
if printerName:
printer.setPrinterName(printerName)
printDialog = QPrintDialog(printer, self)
if printDialog.exec_() == QDialog.Accepted:
try:
self.__clickedFrame.print_(printer)
except AttributeError:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Printing is not available due to a bug in"""
""" PyQt5. Please upgrade.</p>"""))
def __printPreviewClickedFrame(self):
"""
Private slot to show a print preview of the clicked frame.
"""
from PyQt5.QtPrintSupport import QPrintPreviewDialog
printer = QPrinter(mode=QPrinter.HighResolution)
if Preferences.getPrinter("ColorMode"):
printer.setColorMode(QPrinter.Color)
else:
printer.setColorMode(QPrinter.GrayScale)
if Preferences.getPrinter("FirstPageFirst"):
printer.setPageOrder(QPrinter.FirstPageFirst)
else:
printer.setPageOrder(QPrinter.LastPageFirst)
printer.setPageMargins(
Preferences.getPrinter("LeftMargin") * 10,
Preferences.getPrinter("TopMargin") * 10,
Preferences.getPrinter("RightMargin") * 10,
Preferences.getPrinter("BottomMargin") * 10,
QPrinter.Millimeter
)
printerName = Preferences.getPrinter("PrinterName")
if printerName:
printer.setPrinterName(printerName)
preview = QPrintPreviewDialog(printer, self)
preview.paintRequested.connect(self.__generatePrintPreviewClickedFrame)
preview.exec_()
def __generatePrintPreviewClickedFrame(self, printer):
"""
Private slot to generate a print preview of the clicked frame.
@param printer reference to the printer object (QPrinter)
"""
try:
self.__clickedFrame.print_(printer)
except AttributeError:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Printing is not available due to a bug in PyQt5."""
"""Please upgrade.</p>"""))
return
def __printPdfClickedFrame(self):
"""
Private slot to print the selected frame to PDF.
"""
printer = QPrinter(mode=QPrinter.HighResolution)
if Preferences.getPrinter("ColorMode"):
printer.setColorMode(QPrinter.Color)
else:
printer.setColorMode(QPrinter.GrayScale)
printerName = Preferences.getPrinter("PrinterName")
if printerName:
printer.setPrinterName(printerName)
printer.setOutputFormat(QPrinter.PdfFormat)
name = self.__clickedFrame.url().path().rsplit('/', 1)[-1]
if name:
name = name.rsplit('.', 1)[0]
name += '.pdf'
printer.setOutputFileName(name)
printDialog = QPrintDialog(printer, self)
if printDialog.exec_() == QDialog.Accepted:
try:
self.__clickedFrame.print_(printer)
except AttributeError:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Printing is not available due to a bug in"""
""" PyQt5. Please upgrade.</p>"""))
return
def __zoomInClickedFrame(self):
"""
Private slot to zoom into the clicked frame.
"""
index = self.__levelForZoom(
int(self.__clickedFrame.zoomFactor() * 100))
if index < len(self.__zoomLevels) - 1:
self.__clickedFrame.setZoomFactor(
self.__zoomLevels[index + 1] / 100)
def __zoomResetClickedFrame(self):
"""
Private slot to reset the zoom factor of the clicked frame.
"""
self.__clickedFrame.setZoomFactor(self.__currentZoom / 100)
def __zoomOutClickedFrame(self):
"""
Private slot to zoom out of the clicked frame.
"""
index = self.__levelForZoom(
int(self.__clickedFrame.zoomFactor() * 100))
if index > 0:
self.__clickedFrame.setZoomFactor(
self.__zoomLevels[index - 1] / 100)
def __showClickedFrameSource(self):
"""
Private slot to show the source of the clicked frame.
"""
from QScintilla.MiniEditor import MiniEditor
src = self.__clickedFrame.toHtml()
editor = MiniEditor(parent=self)
editor.setText(src, "Html")
editor.setLanguage("dummy.html")
editor.show()
def contentSniff(data):
"""
Module function to do some content sniffing to check, if the data is HTML.
@param data data block to sniff at (string)
@return flag indicating HTML content (boolean)
"""
if data.contains("<!doctype") or \
data.contains("<script") or \
data.contains("<html") or \
data.contains("<!--") or \
data.contains("<head") or \
data.contains("<iframe") or \
data.contains("<h1") or \
data.contains("<div") or \
data.contains("<font") or \
data.contains("<table") or \
data.contains("<a") or \
data.contains("<style") or \
data.contains("<title") or \
data.contains("<b") or \
data.contains("<body") or \
data.contains("<br") or \
data.contains("<p"):
return True
return False
| gpl-3.0 | -5,708,538,440,104,066,000 | 35.92099 | 79 | 0.535364 | false |
souravbadami/oppia | core/domain/event_services.py | 1 | 10774 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for handling events."""
import inspect
from core import jobs_registry
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import stats_domain
from core.domain import stats_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
import feconf
(stats_models, feedback_models) = models.Registry.import_models([
models.NAMES.statistics, models.NAMES.feedback])
taskqueue_services = models.Registry.import_taskqueue_services()
class BaseEventHandler(object):
"""Base class for event dispatchers."""
# A string denoting the type of the event. Should be specified by
# subclasses and considered immutable.
EVENT_TYPE = None
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
"""Dispatch events asynchronously to continuous computation realtime
layers that are listening for them.
"""
taskqueue_services.defer(
jobs_registry.ContinuousComputationEventDispatcher.dispatch_event,
taskqueue_services.QUEUE_NAME_EVENTS, cls.EVENT_TYPE, *args,
**kwargs)
@classmethod
def _handle_event(cls, *args, **kwargs):
"""Perform in-request processing of an incoming event."""
raise NotImplementedError(
'Subclasses of BaseEventHandler should implement the '
'_handle_event() method, using explicit arguments '
'(no *args or **kwargs).')
@classmethod
def record(cls, *args, **kwargs):
"""Process incoming events.
Callers of event handlers should call this method, not _handle_event().
"""
cls._notify_continuous_computation_listeners_async(*args, **kwargs)
cls._handle_event(*args, **kwargs)
class StatsEventsHandler(BaseEventHandler):
"""Event handler for incremental update of analytics model using aggregated
stats data.
"""
EVENT_TYPE = feconf.EVENT_TYPE_ALL_STATS
@classmethod
def _is_latest_version(cls, exp_id, exp_version):
"""Verifies whether the exploration version for the stats to be stored
corresponds to the latest version of the exploration.
"""
exploration = exp_fetchers.get_exploration_by_id(exp_id)
return exploration.version == exp_version
@classmethod
def _handle_event(cls, exploration_id, exp_version, aggregated_stats):
if cls._is_latest_version(exploration_id, exp_version):
taskqueue_services.defer(
stats_services.update_stats,
taskqueue_services.QUEUE_NAME_STATS, exploration_id,
exp_version, aggregated_stats)
class AnswerSubmissionEventHandler(BaseEventHandler):
"""Event handler for recording answer submissions."""
EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED
@classmethod
def _notify_continuous_computation_listeners_async(cls, *args, **kwargs):
# Disable this method until we can deal with large answers, otherwise
# the data that is being placed on the task queue is too large.
pass
@classmethod
def _handle_event(
cls, exploration_id, exploration_version, state_name,
interaction_id, answer_group_index, rule_spec_index,
classification_categorization, session_id, time_spent_in_secs,
params, normalized_answer):
"""Records an event when an answer triggers a rule. The answer recorded
here is a Python-representation of the actual answer submitted by the
user.
"""
# TODO(sll): Escape these args?
stats_services.record_answer(
exploration_id, exploration_version, state_name, interaction_id,
stats_domain.SubmittedAnswer(
normalized_answer, interaction_id, answer_group_index,
rule_spec_index, classification_categorization, params,
session_id, time_spent_in_secs))
feedback_is_useful = (
classification_categorization != (
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION))
stats_models.AnswerSubmittedEventLogEntryModel.create(
exploration_id, exploration_version, state_name, session_id,
time_spent_in_secs, feedback_is_useful)
class ExplorationActualStartEventHandler(BaseEventHandler):
"""Event handler for recording exploration actual start events."""
EVENT_TYPE = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id):
stats_models.ExplorationActualStartEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id)
class SolutionHitEventHandler(BaseEventHandler):
"""Event handler for recording solution hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_SOLUTION_HIT
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.SolutionHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class StartExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration start events."""
EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, params,
play_type):
stats_models.StartExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, params,
play_type)
class MaybeLeaveExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration leave events."""
EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.MaybeLeaveExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class CompleteExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration completion events."""
EVENT_TYPE = feconf.EVENT_TYPE_COMPLETE_EXPLORATION
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id, time_spent,
params, play_type):
stats_models.CompleteExplorationEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id, time_spent,
params, play_type)
class RateExplorationEventHandler(BaseEventHandler):
"""Event handler for recording exploration rating events."""
EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION
@classmethod
def _handle_event(cls, exploration_id, user_id, rating, old_rating):
stats_models.RateExplorationEventLogEntryModel.create(
exploration_id, user_id, rating, old_rating)
class StateHitEventHandler(BaseEventHandler):
"""Event handler for recording state hit events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT
# TODO(sll): remove params before sending this event to the jobs taskqueue.
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
params, play_type):
stats_models.StateHitEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
params, play_type)
class StateCompleteEventHandler(BaseEventHandler):
"""Event handler for recording state complete events."""
EVENT_TYPE = feconf.EVENT_TYPE_STATE_COMPLETED
@classmethod
def _handle_event(
cls, exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.StateCompleteEventLogEntryModel.create(
exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class LeaveForRefresherExpEventHandler(BaseEventHandler):
"""Event handler for recording "leave for refresher exploration" events."""
EVENT_TYPE = feconf.EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP
@classmethod
def _handle_event(
cls, exp_id, refresher_exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs):
stats_models.LeaveForRefresherExplorationEventLogEntryModel.create(
exp_id, refresher_exp_id, exp_version, state_name, session_id,
time_spent_in_state_secs)
class FeedbackThreadCreatedEventHandler(BaseEventHandler):
"""Event handler for recording new feedback thread creation events."""
EVENT_TYPE = feconf.EVENT_TYPE_NEW_THREAD_CREATED
@classmethod
def _handle_event(cls, exp_id):
pass
class FeedbackThreadStatusChangedEventHandler(BaseEventHandler):
"""Event handler for recording reopening feedback thread events."""
EVENT_TYPE = feconf.EVENT_TYPE_THREAD_STATUS_CHANGED
@classmethod
def _handle_event(cls, exp_id, old_status, new_status):
pass
class Registry(object):
"""Registry of event handlers."""
# Dict mapping event types to their classes.
_event_types_to_classes = {}
@classmethod
def _refresh_registry(cls):
"""Regenerates the event handler registry."""
cls._event_types_to_classes.clear()
# Find all subclasses of BaseEventHandler in the current module.
for obj_name, obj in globals().iteritems():
if inspect.isclass(obj) and issubclass(obj, BaseEventHandler):
if obj_name == 'BaseEventHandler':
continue
cls._event_types_to_classes[obj.EVENT_TYPE] = obj
@classmethod
def get_event_class_by_type(cls, event_type):
"""Gets an event handler class by its type.
Refreshes once if the event type is not found; subsequently, throws an
error.
"""
if event_type not in cls._event_types_to_classes:
cls._refresh_registry()
return cls._event_types_to_classes[event_type]
| apache-2.0 | -1,731,946,290,718,996,500 | 34.675497 | 80 | 0.679785 | false |
samli6479/bigdata | stream-processing.py | 1 | 2559 | # 1. read from kafka, kafka broker, kafka topic
# 2. write back to kafka, kafka broker, new kafka topic
import sys
import atexit
import logging
import json
import time
from kafka import KafkaProducer
from kafka.errors import KafkaError, KafkaTimeoutError
from pyspark import SparkContext # how to talk to spark
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
logger_format = "%(asctime)-15s %(message)s"
logging.basicConfig(format=logger_format)
logger = logging.getLogger('stream-processing')
logger.setLevel(logging.INFO)
topic = ""
new_topic = ""
kafka_broker = ""
kafka_producer = ""
def shutdown_hook(producer):
try:
logger.info('flush pending messages to kafka')
producer.flush(10)
logger.info('finish flushing pending messages')
except kafkaError as kafka_error:
logger.warn('Failed to flush pending messages to kafka')
finally:
try:
producer.close(10)
except Exception as e:
logger.warn('Failed to clode kafka connection')
def process(timeobj, rdd):
# - calculate the average
num_of_records = rdd.count()
if num_of_records == 0:
return
price_sum = rdd.map(lambda record: float(json.loads(record[1].decode('utf-8'))[0].get('LastTradePrice'))).reduce(lambda a, b: a+b)
average = price_sum/num_of_records
logger.info('Received %d records from Kafka, average price is %f' % (num_of_records, average))
# - write back to kafka
# {timestamp, average}
data = json.dumps({
'timestamp': time.time(),
'average': average
})
kafka_producer.send(new_topic, value = data)
if __name__ == "__main__":
# kafka broker, topic,new topic and application name
if len(sys.argv) != 4:
print('Usage: stream-processing [topic] [new topic] [kafka-broker]')
exit(1)
topic, new_topic, kafka_broker = sys.argv[1:]
# -setup connection to spark cluster
# local[x] -x number of cores
sc = SparkContext("local[2]", "StockAveragePrice")
sc.setLogLevel('ERROR')
# Streaming(sc,x) - open in x seconds
ssc = StreamingContext(sc, 5)
# - create a data stream from spark
# we can add pur own kafka consumer to process but not recommanded
# due to additional layer
directKafkaStream = KafkaUtils.createDirectStream(ssc, [topic], {'metadata.broker.list':kafka_broker})
# - for each RDD, do something
# Action
directKafkaStream.foreachRDD(process)
# - instantiate kafka producer
kafka_producer = KafkaProducer(bootstrap_servers=kafka_broker)
# - setup proper shutdown hook
# Action
atexit.register(shutdown_hook, kafka_producer)
ssc.start()
ssc.awaitTermination() | apache-2.0 | 8,905,566,711,919,112,000 | 26.826087 | 131 | 0.729191 | false |
t123/ReadingTool.Python | lib/stringutil.py | 1 | 4798 | import re, time, datetime
class StringUtil:
@staticmethod
def isEmpty(x):
if x is None:
return True
x = x.strip()
if len(x)==0:
return True
return False
@staticmethod
def isTrue(x):
if x is None:
return False
if isinstance(x, bool) and x==True:
return True
x = str(x).lower().strip()
if x=="1" or x=="true" or x=="yes":
return True
return False
class FilterParser():
def __init__(self, languageNames=[]):
self.languageNames = [item.lower() for item in languageNames]
self.tags = []
self.normal = []
self.special = []
self.languages = []
self.source = []
self.current = ""
self.isTag = False
self.inQuote = False
self.limit = 0
self.createdSign = ""
self.modifiedSign = ""
self.created = None
self.modified = None
def parseSource(self, string):
string = string.replace("source:", "")
self.source.append(string)
def parseTime(self, string):
string = string.lower()
string = string.replace("created:", "")
string = string.replace("modified:", "")
sign1 = string[0:1]
sign2 = string[0:2]
if sign2==">=" or sign2=="<=":
date = string[2:]
sign = sign2
elif sign1==">" or sign1=="<" or sign1=="=":
date = string[1:]
sign = sign1
else:
date = string[0:]
sign = "="
try:
if date=="today":
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
elif date=="yesterday":
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
date = yesterday.strftime("%Y-%m-%d")
date = time.strptime(date, "%Y-%m-%d")
created = date
if sign.startswith("<"):
created = date + 60*60*24
return (sign, time.mktime(created))
except:
pass
return None
def append(self):
if not StringUtil.isEmpty(self.current):
if self.isTag:
self.tags.append(self.current.lower())
self.current = ""
self.isTag = False
self.inQuote = False
else:
if self.current.lower() in self.languageNames:
self.languages.append(self.current)
else:
if self.current.lower().startswith("limit:"):
try:
self.limit = int(self.current[6:])
except:
self.limit = 0
elif self.current.lower().startswith("created:"):
result = self.parseTime(self.current)
if result is not None:
self.createdSign = result[0]
self.created = result[1]
elif self.current.lower().startswith("modified:"):
result = self.parseTime(self.current)
if result is not None:
self.modifiedSign = result[0]
self.modified = result[1]
elif self.current.lower().startswith("source:"):
self.source.append(self.current[7:])
else:
self.normal.append(self.current)
self.current = ""
self.isTag = False
self.inQuote = False
def filter(self, text):
if StringUtil.isEmpty(text):
return
text = text.strip()
for char in text:
if char=="#":
self.isTag = True
continue
if char=="\"":
if self.inQuote:
self.append()
self.inQuote = False
else:
self.inQuote = True
continue
if char==" ":
if self.inQuote:
self.current += char
continue
self.append()
continue
self.current += char
self.append()
| agpl-3.0 | -973,658,565,560,457,300 | 28.9875 | 80 | 0.40892 | false |
2027205T/tango_with_django | tango_with_django_project/rango/bing_search.py | 1 | 2779 | import json
import urllib, urllib2
import keys
# Add your BING_API_KEY
BING_API_KEY = keys.BING_API_KEY
def main():
# The main function should ask a user for a query (from the command line)
query = raw_input("Please enter a search query: ")
# and then issue the query to the BING API via the run_query method
results = run_query(query)
# and print out the top ten results returned.
print "Your results: ", results
# Print out the rank, title and URL for each result.
def run_query(search_terms):
# Specify the base
root_url = 'https://api.datamarket.azure.com/Bing/Search/'
source = 'Web'
# Specify how many results we wish to be returned per page.
# Offset specifies where in the results list to start from.
# With results_per_page = 10 and offset = 11, this would start from page 2.
results_per_page = 10
offset = 0
# Wrap quotes around our query terms as required by the Bing API.
# The query we will then use is stored within variable query.
query = "'{0}'".format(search_terms)
query = urllib.quote(query)
# Construct the latter part of our request's URL.
# Sets the format of the response to JSON and sets other properties.
search_url = "{0}{1}?$format=json&$top={2}&$skip={3}&Query={4}".format(
root_url,
source,
results_per_page,
offset,
query)
# Setup authentication with the Bing servers.
# The username MUST be a blank string, and put in your API key!
username = ''
# Create a 'password manager' which handles authentication for us.
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, search_url, username, BING_API_KEY)
# Create our results list which we'll populate.
results = []
try:
# Prepare for connecting to Bing's servers.
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
# Connect to the server and read the response generated.
response = urllib2.urlopen(search_url).read()
# Convert the string response to a Python dictionary object.
json_response = json.loads(response)
# Loop through each page returned, populating out results list.
for result in json_response['d']['results']:
results.append({
'title': result['Title'],
'link': result['Url'],
'summary': result['Description']})
# Catch a URLError exception - something went wrong when connecting!
except urllib2.URLError, e:
print "Error when querying the Bing API: ", e
# Return the list of results to the calling function.
return results
if __name__ == '__main__':
main()
| mit | 7,681,770,095,719,579,000 | 31.694118 | 79 | 0.660669 | false |
deschler/django-modeltranslation | modeltranslation/tests/models.py | 1 | 13047 | # -*- coding: utf-8 -*-
import six
from django.conf import settings
from django.core import validators
from django.db import models
from django.utils.translation import gettext_lazy
from modeltranslation.manager import MultilingualManager
class TestModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
text = models.TextField(blank=True, null=True)
url = models.URLField(blank=True, null=True)
email = models.EmailField(blank=True, null=True)
class UniqueNullableModel(models.Model):
title = models.CharField(null=True, unique=True, max_length=255)
# ######### Proxy model testing
class ProxyTestModel(TestModel):
class Meta:
proxy = True
def get_title(self):
return self.title
# ######### Fallback values testing
class FallbackModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
text = models.TextField(blank=True, null=True)
url = models.URLField(blank=True, null=True)
email = models.EmailField(blank=True, null=True)
description = models.CharField(max_length=255, null=True)
class FallbackModel2(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
text = models.TextField(blank=True, null=True)
url = models.URLField(blank=True, null=True)
email = models.EmailField(blank=True, null=True)
# ######### File fields testing
class FileFieldsModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
file = models.FileField(upload_to='modeltranslation_tests', null=True, blank=True)
file2 = models.FileField(upload_to='modeltranslation_tests')
image = models.ImageField(upload_to='modeltranslation_tests', null=True, blank=True)
# ######### Foreign Key / OneToOneField testing
class NonTranslated(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
class ForeignKeyModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
test = models.ForeignKey(
TestModel, null=True, related_name="test_fks", on_delete=models.CASCADE,
)
optional = models.ForeignKey(TestModel, blank=True, null=True, on_delete=models.CASCADE)
hidden = models.ForeignKey(
TestModel, blank=True, null=True, related_name="+", on_delete=models.CASCADE,
)
non = models.ForeignKey(
NonTranslated, blank=True, null=True, related_name="test_fks", on_delete=models.CASCADE,
)
untrans = models.ForeignKey(
TestModel, blank=True, null=True, related_name="test_fks_un", on_delete=models.CASCADE,
)
class OneToOneFieldModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
test = models.OneToOneField(
TestModel, null=True, related_name="test_o2o", on_delete=models.CASCADE,
)
optional = models.OneToOneField(TestModel, blank=True, null=True, on_delete=models.CASCADE)
# No hidden option for OneToOne
non = models.OneToOneField(
NonTranslated, blank=True, null=True, related_name="test_o2o", on_delete=models.CASCADE,
)
# ######### Custom fields testing
class OtherFieldsModel(models.Model):
"""
This class is supposed to include other newly added fields types, so that
adding new supported field doesn't end in adding new test model.
"""
# That's rich! PositiveIntegerField is only validated in forms, not in models.
int = models.PositiveIntegerField(default=42, validators=[validators.MinValueValidator(0)])
boolean = models.BooleanField(default=False)
nullboolean = models.NullBooleanField()
csi = models.CommaSeparatedIntegerField(max_length=255)
ip = models.IPAddressField(blank=True, null=True)
float = models.FloatField(blank=True, null=True)
decimal = models.DecimalField(max_digits=5, decimal_places=2, blank=True, null=True)
date = models.DateField(blank=True, null=True)
datetime = models.DateTimeField(blank=True, null=True)
time = models.TimeField(blank=True, null=True)
genericip = models.GenericIPAddressField(blank=True, null=True)
class FancyDescriptor(object):
"""
Stupid demo descriptor, that store int in database and return string of that length on get.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, owner):
length = instance.__dict__[self.field.name]
if length is None:
return ''
return 'a' * length
def __set__(self, obj, value):
if isinstance(value, six.integer_types):
obj.__dict__[self.field.name] = value
elif isinstance(value, six.string_types):
obj.__dict__[self.field.name] = len(value)
else:
obj.__dict__[self.field.name] = 0
class FancyField(models.PositiveIntegerField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', '')
super(FancyField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
super(FancyField, self).contribute_to_class(cls, name)
setattr(cls, self.name, FancyDescriptor(self))
def pre_save(self, model_instance, add):
value = super(FancyField, self).pre_save(model_instance, add)
# In this part value should be retrieved using descriptor and be a string
assert isinstance(value, six.string_types)
# We put an int to database
return len(value)
class DescriptorModel(models.Model):
normal = FancyField()
trans = FancyField()
# ######### Multitable inheritance testing
class MultitableModelA(models.Model):
titlea = models.CharField(gettext_lazy('title a'), max_length=255)
class MultitableModelB(MultitableModelA):
titleb = models.CharField(gettext_lazy('title b'), max_length=255)
class MultitableModelC(MultitableModelB):
titlec = models.CharField(gettext_lazy('title c'), max_length=255)
class MultitableModelD(MultitableModelB):
titled = models.CharField(gettext_lazy('title d'), max_length=255)
# ######### Abstract inheritance testing
class AbstractModelA(models.Model):
titlea = models.CharField(gettext_lazy('title a'), max_length=255)
def __init__(self, *args, **kwargs):
super(AbstractModelA, self).__init__(*args, **kwargs)
self.titlea = 'title_a'
class Meta:
abstract = True
class AbstractModelB(AbstractModelA):
titleb = models.CharField(gettext_lazy('title b'), max_length=255)
def __init__(self, *args, **kwargs):
super(AbstractModelB, self).__init__(*args, **kwargs)
self.titleb = 'title_b'
# ######### Fields inheritance testing
class Slugged(models.Model):
slug = models.CharField(max_length=255)
class Meta:
abstract = True
class MetaData(models.Model):
keywords = models.CharField(max_length=255)
class Meta:
abstract = True
class Displayable(Slugged, MetaData):
class Meta:
abstract = True
class BasePage(Displayable):
class Meta:
abstract = True
class Page(BasePage):
title = models.CharField(max_length=255)
class RichText(models.Model):
content = models.CharField(max_length=255)
class Meta:
abstract = True
class RichTextPage(Page, RichText):
pass
# ######### Admin testing
class DataModel(models.Model):
data = models.TextField(blank=True, null=True)
class GroupFieldsetsModel(models.Model):
title = models.CharField(max_length=255)
text = models.TextField(blank=True, null=True)
email = models.EmailField(blank=True, null=True)
class NameModel(models.Model):
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=50)
age = models.CharField(max_length=50)
slug = models.SlugField(max_length=100)
slug2 = models.SlugField(max_length=100)
# ######### Integration testing
class ThirdPartyModel(models.Model):
name = models.CharField(max_length=20)
class ThirdPartyRegisteredModel(models.Model):
name = models.CharField(max_length=20)
# ######### Manager testing
class FilteredManager(MultilingualManager):
def get_queryset(self):
# always return empty queryset
return super(FilteredManager, self).get_queryset().filter(pk=None)
class FilteredTestModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
objects = FilteredManager()
class ForeignKeyFilteredModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
test = models.ForeignKey(
FilteredTestModel, null=True, related_name="test_fks", on_delete=models.CASCADE,
)
class ManagerTestModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
visits = models.IntegerField(gettext_lazy('visits'), default=0)
description = models.CharField(max_length=255, null=True)
class Meta:
ordering = ('-visits',)
class CustomManager(models.Manager):
def get_queryset(self):
sup = super(CustomManager, self)
queryset = sup.get_queryset() if hasattr(sup, 'get_queryset') else sup.get_query_set()
return queryset.filter(title__contains='a').exclude(description__contains='x')
get_query_set = get_queryset
def custom_qs(self):
sup = super(CustomManager, self)
queryset = sup.get_queryset() if hasattr(sup, 'get_queryset') else sup.get_query_set()
return queryset
def foo(self):
return 'bar'
class CustomManagerTestModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
description = models.CharField(max_length=255, null=True, db_column='xyz')
objects = CustomManager()
another_mgr_name = CustomManager()
class CustomQuerySet(models.query.QuerySet):
pass
class CustomManager2(models.Manager):
def get_queryset(self):
return CustomQuerySet(self.model, using=self._db)
get_query_set = get_queryset
class CustomManager2TestModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
objects = CustomManager2()
class CustomManagerAbstract(models.Manager):
pass
class CustomManagerBaseModel(models.Model):
needs_translation = models.BooleanField(default=False)
objects = models.Manager() # ensures objects is the default manager
translations = CustomManagerAbstract()
class Meta:
abstract = True
class CustomManagerChildTestModel(CustomManagerBaseModel):
title = models.CharField(gettext_lazy('title'), max_length=255)
objects = CustomManager2()
class PlainChildTestModel(CustomManagerBaseModel):
title = models.CharField(gettext_lazy('title'), max_length=255)
# ######### Required fields testing
class RequiredModel(models.Model):
non_req = models.CharField(max_length=10, blank=True)
req = models.CharField(max_length=10)
req_reg = models.CharField(max_length=10)
req_en_reg = models.CharField(max_length=10)
# ######### Name collision registration testing
class ConflictModel(models.Model):
title = models.CharField(gettext_lazy('title'), max_length=255)
title_de = models.IntegerField()
class AbstractConflictModelA(models.Model):
title_de = models.IntegerField()
class Meta:
abstract = True
class AbstractConflictModelB(AbstractConflictModelA):
title = models.CharField(gettext_lazy('title'), max_length=255)
class MultitableConflictModelA(models.Model):
title_de = models.IntegerField()
class MultitableConflictModelB(MultitableConflictModelA):
title = models.CharField(gettext_lazy('title'), max_length=255)
# ######### Complex M2M with abstract classes and custom managers
class CustomQuerySetX(models.query.QuerySet):
pass
class CustomManagerX(models.Manager):
def get_queryset(self):
return CustomQuerySetX(self.model, using=self._db)
get_query_set = get_queryset
class AbstractBaseModelX(models.Model):
name = models.CharField(max_length=255)
objects = CustomManagerX()
class Meta:
abstract = True
class AbstractModelX(AbstractBaseModelX):
class Meta:
abstract = True
class ModelX(AbstractModelX):
pass
class AbstractModelXY(models.Model):
model_x = models.ForeignKey('ModelX', on_delete=models.CASCADE)
model_y = models.ForeignKey('ModelY', on_delete=models.CASCADE)
class Meta:
abstract = True
class ModelXY(AbstractModelXY):
pass
class CustomManagerY(models.Manager):
pass
class AbstractModelY(models.Model):
title = models.CharField(max_length=255)
xs = models.ManyToManyField('ModelX', through='ModelXY')
objects = CustomManagerY()
class Meta:
abstract = True
class ModelY(AbstractModelY):
pass
# Non-abstract base models whos Manager is not allowed to be overwritten
if "django.contrib.auth" in settings.INSTALLED_APPS:
from django.contrib.auth.models import Permission
class InheritedPermission(Permission):
translated_var = models.CharField(max_length=255)
| bsd-3-clause | -8,254,027,629,270,377,000 | 27.4869 | 96 | 0.696405 | false |
vahndi/scitwi | scitwi/trends/trend.py | 1 | 1155 | from datetime import datetime
from typing import List
from scitwi.places.location import Location
from scitwi.utils.strs import list_obj_string, obj_string
class Trend(object):
def __init__(self, trend_dict: dict, as_of: datetime, created_at: datetime, locations: List[Location]):
self.as_of = as_of
self.created_at = created_at
self.locations = locations
self.name = trend_dict['name']
self.promoted_content = trend_dict['promoted_content']
self.query = trend_dict['query']
self.tweet_volume = trend_dict['tweet_volume']
self.url = trend_dict['url']
def __str__(self):
str_out = ''
str_out += obj_string('Name', self.name)
str_out += obj_string('Promoted Content', self.promoted_content)
str_out += obj_string('Query', self.query)
str_out += obj_string('Tweet Volume', self.tweet_volume)
str_out += obj_string('Url', self.url)
str_out += obj_string('As Of', self.url)
str_out += obj_string('Created At', self.created_at)
str_out += list_obj_string('Locations', self.locations)
return str_out
| mit | -5,805,980,990,255,305,000 | 32 | 107 | 0.624242 | false |
akmiller01/di-quick-vis | qv/core/models.py | 1 | 1829 | from django.db import models
from redactor.fields import RedactorField
from jsonfield import JSONField
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.text import slugify
import datetime
from os.path import basename, splitext
class Dataset(models.Model):
name = models.CharField(max_length=255, null=True, blank=True)
slug = models.SlugField(unique=True,max_length=255, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
file_field = models.FileField(upload_to=settings.MEDIA_ROOT+'/%Y/%m/%d')
json = JSONField(null=True,blank=True)
sheet = models.IntegerField(null=True,blank=True,default=0)
starting_row = models.IntegerField(null=True,blank=True,default=0)
xVar = models.CharField(max_length=255, null=True, blank=True, default='id')
yVar = models.CharField(max_length=255, null=True, blank=True, default='value')
timeVar = models.CharField(max_length=255, null=True, blank=True, default='year')
class Meta:
ordering = ['-created']
def __unicode__(self):
return u'%s' % self.name
def get_absolute_url(self):
return reverse('core.views.data',args=[self.slug])
def save(self, *args, **kwargs):
super(Dataset, self).save(*args, **kwargs)
date = datetime.date.today()
if self.name is None or self.name == "":
self.name = splitext(basename(self.file_field.name))[0]
self.slug = '%s-%i%i%i' % (
slugify(self.name), date.year, date.month, date.day
)
elif self.slug is None or self.slug == "":
self.slug = '%s-%i%i%i%i' % (
slugify(self.name), date.year, date.month, date.day, self.id
)
super(Dataset, self).save(*args, **kwargs)
| gpl-2.0 | -582,374,485,033,654,700 | 41.534884 | 85 | 0.648442 | false |
EDITD/queue_util | queue_util/producer.py | 1 | 2789 | """
Allow the ability to connect and publish to a queue.
"""
import logging
import time
import kombu
import six
class Producer(object):
def __init__(self, dest_queue_name, rabbitmq_host, rabbitmq_port=None,
serializer=None, compression=None,
userid=None, password=None):
connect_kwargs = {}
if userid is not None:
connect_kwargs['userid'] = userid
if password is not None:
connect_kwargs['password'] = password
if rabbitmq_port is not None:
connect_kwargs['port'] = rabbitmq_port
broker = kombu.BrokerConnection(rabbitmq_host, **connect_kwargs)
self.dest_queue = broker.SimpleQueue(
dest_queue_name,
serializer=serializer,
compression=compression,
)
def put(self, item):
"""
Put one item onto the queue.
"""
self.dest_queue.put(item)
def buffered_put(self, input_iter, batch_size, resume_threshold=0.1, delay_in_seconds=5.0):
"""
Given an input iterator, keep adding batches of items to the
destination queue.
After each batch, wait for the queue size to drop to a certain level
until putting in the next batch.
(Wait until the queue size is batch_size * resume_threshold.)
Note that it isn't exact, but it will attempt to ensure that the queue
size never goes (much) beyond batch_size.
"""
num_enqueued = 0
while True:
try:
logging.debug('Starting batch (batch_size={0})'.format(batch_size))
for i in range(batch_size):
self.put(six.next(input_iter))
num_enqueued += 1
logging.debug('Batch done. {0} items enqueued so far'.format(num_enqueued))
except StopIteration:
# We're done!
#
logging.debug('Input exhausted. {0} items enqueued in total'.format(num_enqueued))
break
# After each batch, we need to pause briefly.
# Otherwise get_num_messages won't include the messages that we
# just enqueued.
#
time.sleep(delay_in_seconds)
# Now that we have completed one batch, we need to wait.
max_size = resume_threshold * batch_size
num_messages = self.dest_queue.qsize()
while num_messages >= max_size:
logging.debug(
'Current queue size = {0}, waiting until size <= {1}'.format(
num_messages, max_size,
),
)
time.sleep(delay_in_seconds)
num_messages = self.dest_queue.qsize()
| mit | -2,356,338,687,290,004,000 | 35.220779 | 98 | 0.556472 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.