repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ShaguptaS/moviepy | moviepy/video/fx/resize.py | 1 | 3276 |
resize_possible = True
try:
import cv2
resizer = lambda pic, newsize : cv2.resize(pic.astype('uint8'),
tuple(map(int, newsize)),
interpolation=cv2.INTER_AREA)
except ImportError:
try:
import Image
import numpy as np
def resizer(pic, newsize):
newsize = map(int, newsize)[::-1]
shape = pic.shape
newshape = (newsize[0],newsize[1],shape[2])
pilim = Image.fromarray(pic)
resized_pil = pilim.resize(newsize[::-1], Image.ANTIALIAS)
arr = np.fromstring(resized_pil.tostring(), dtype='uint8')
return arr.reshape(newshape)
except ImportError:
try:
import scipy.misc.imresize as imresize
resizer = lambda pic, newsize : imresize(pic,
map(int, newsize[::-1]))
except ImportError:
resize_possible = False
from moviepy.decorators import apply_to_mask
@apply_to_mask
def resize(clip, newsize=None, height=None, width=None):
"""
Returns a video clip that is a resized version of the clip.
:param newsize: can be either ``(height,width)`` in pixels or
a float representing a scaling factor. Or a function of time
returning one of these.
:param width: width of the new clip in pixel. The height is
then computed so that the width/height ratio is conserved.
:param height: height of the new clip in pixel. The width is
then computed so that the width/height ratio is conserved.
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.size
if newsize != None:
def trans_newsize(ns):
if isinstance(ns, (int, float)):
return [ns * w, ns * h]
else:
return ns
if hasattr(newsize, "__call__"):
newsize2 = lambda t : trans_newsize(newsize(t))
if clip.ismask:
fl = lambda gf,t: 1.0*resizer((255 * gf(t)).astype('uint8'),
newsize2(t))/255
else:
fl = lambda gf,t: resizer(gf(t).astype('uint8'),newsize2(t))
return clip.fl(fl)
else:
newsize = trans_newsize(newsize)
elif height != None:
newsize = [w * height / h, height]
elif width != None:
newsize = [width, h * width / w]
if clip.ismask:
fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'),
newsize)/255
else:
fl = lambda pic: resizer(pic.astype('uint8'), newsize)
return clip.fl_image(fl)
if not resize_possible:
doc = resize.__doc__
def resize(clip, newsize=None, height=None, width=None):
raise ImportError("fx resize needs OpenCV or Scipy or PIL")
resize.__doc__ = doc
| mit | -3,920,737,535,064,460,300 | 31.117647 | 76 | 0.53083 | false |
luzheqi1987/nova-annotation | nova/tests/unit/integrated/v3/test_flavor_access.py | 1 | 3181 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.unit.integrated.v3 import api_sample_base
class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
extension_name = 'flavor-access'
def _add_tenant(self):
subs = {
'tenant_id': 'fake_tenant',
'flavor_id': 10,
}
response = self._do_post('flavors/10/action',
'flavor-access-add-tenant-req',
subs)
self._verify_response('flavor-access-add-tenant-resp',
subs, response, 200)
def _create_flavor(self):
subs = {
'flavor_id': 10,
'flavor_name': 'test_flavor'
}
response = self._do_post("flavors",
"flavor-access-create-req",
subs)
subs.update(self._get_regexes())
self._verify_response("flavor-access-create-resp", subs, response, 200)
def test_flavor_access_create(self):
self._create_flavor()
def test_flavor_access_detail(self):
response = self._do_get('flavors/detail')
subs = self._get_regexes()
self._verify_response('flavor-access-detail-resp', subs, response, 200)
def test_flavor_access_list(self):
self._create_flavor()
self._add_tenant()
flavor_id = 10
response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
subs = {
'flavor_id': flavor_id,
'tenant_id': 'fake_tenant',
}
self._verify_response('flavor-access-list-resp', subs, response, 200)
def test_flavor_access_show(self):
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
subs = {
'flavor_id': flavor_id
}
subs.update(self._get_regexes())
self._verify_response('flavor-access-show-resp', subs, response, 200)
def test_flavor_access_add_tenant(self):
self._create_flavor()
self._add_tenant()
def test_flavor_access_remove_tenant(self):
self._create_flavor()
self._add_tenant()
subs = {
'tenant_id': 'fake_tenant',
}
response = self._do_post('flavors/10/action',
"flavor-access-remove-tenant-req",
subs)
exp_subs = {
"tenant_id": self.api.project_id,
"flavor_id": "10"
}
self._verify_response('flavor-access-remove-tenant-resp',
exp_subs, response, 200)
| apache-2.0 | 7,465,998,557,256,841,000 | 34.741573 | 79 | 0.561144 | false |
BrendanLeber/adventofcode | 2016/20-firewall_rules/firewall_rules.py | 1 | 1586 | # -*- coding: utf-8 -*-
import argparse
import pdb
import traceback
from typing import List, Tuple
def test_ip(ip: int, rules: List[Tuple[int, int]], max_addr: int) -> bool:
for (start, end) in rules:
if start <= ip <= end:
break
else:
if ip < max_addr:
return True
return False
def solve(rules: List[Tuple[int, int]], max_addr: int) -> Tuple[int, int]:
candidates = [rule[1] + 1 for rule in rules]
valids = [candidate for candidate in candidates if test_ip(candidate, rules, max_addr)]
one: int = valids[0]
two: int = 0
for ip in valids:
while test_ip(ip, rules, max_addr):
two += 1
ip += 1
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Advent of Code - 2016 - Day 20 - Firewall Rules.")
parser.add_argument(
"input",
type=str,
default="input.txt",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
parser.add_argument(
"max_addr",
type=int,
default=4294967296,
nargs="?",
help="The largest address. (Default %(default)s)",
)
args = parser.parse_args()
rules: List[Tuple[int, int]] = []
with open(args.input, "rt") as inf:
for line in inf:
parts = line.strip().split("-")
rules.append((int(parts[0]), int(parts[1])))
rules.sort()
try:
print(solve(rules, args.max_addr))
except Exception:
traceback.print_exc()
pdb.post_mortem()
| mit | 2,803,982,701,570,625,000 | 24.580645 | 100 | 0.552333 | false |
uwosh/UWOshOIE | tests/testTransitionApproveForFA.py | 1 | 4225 | import os, sys
if __name__ == '__main__':
execfile(os.path.join(sys.path[0], 'framework.py'))
from Products.UWOshOIE.tests.uwoshoietestcase import UWOshOIETestCase
from Products.CMFCore.WorkflowCore import WorkflowException
class TestTransitionApproveForFA(UWOshOIETestCase):
"""Ensure product is properly installed"""
def createApplication(self):
self.login(self._default_user)
self.portal.invokeFactory(type_name="OIEStudentApplication", id="testapplication")
app = self.portal['testapplication']
self.fill_out_application(app)
app.setHoldApplication('HOLD')
self.portal_workflow.doActionFor(app, 'submit')
return app
def test_program_manager_should_be_able_to_do_action(self):
app = self.createApplication()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'approveForFA')
self.assertEquals('waitingForPrintMaterials', self.getState(app))
def test_front_line_advisor_should_be_able_to_do_action(self):
app = self.createApplication()
self.login('front_line_advisor')
self.portal_workflow.doActionFor(app, 'approveForFA')
self.assertEquals('waitingForPrintMaterials', self.getState(app))
def test_all_other_roles_should_not_be_able_able_to_perform_action(self):
app = self.createApplication
for user in self._all_users:
if user != 'program_maanger' and user != 'front_line_advisor':
self.login(user)
self.assertRaises(WorkflowException, self.portal_workflow.doActionFor, app, 'approveForFA')
self.logout()
def test_should_send_email_when_fired(self):
app = self.createApplication()
self.portal.MailHost.clearEmails()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'approveForFA')
self.assertEquals(1, self.portal.MailHost.getEmailCount())
def test_should_send_correct_email_program_maanger(self):
app = self.createApplication()
self.portal.MailHost.clearEmails()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'approveForFA')
to = self.portal.MailHost.getTo()
f = self.portal.MailHost.getFrom()
subject = self.portal.MailHost.getSubject()
message = self.portal.MailHost.getMessage()
self.assertEquals(['[email protected]', '[email protected]'], to)
self.assertEquals('[email protected]', f)
self.assertEquals('Your study abroad application update (UW Oshkosh Office of International Education)', subject)
self.assertEquals("\n\nYour UW Oshkosh Office of International Education study abroad application has been updated.\n\nName: John Doe\nProgram Name: test\nProgram Year: 2009\n\nTransition\n\n\n\nYou can view your application here: http://nohost/plone/testapplication\n\nComment: \n\n\n", message)
def test_should_send_correct_email_front_line_advisor(self):
app = self.createApplication()
self.portal.MailHost.clearEmails()
self.login('front_line_advisor')
self.portal_workflow.doActionFor(app, 'approveForFA')
to = self.portal.MailHost.getTo()
f = self.portal.MailHost.getFrom()
subject = self.portal.MailHost.getSubject()
message = self.portal.MailHost.getMessage()
self.assertEquals(['[email protected]', '[email protected]'], to)
self.assertEquals('[email protected]', f)
self.assertEquals('Your study abroad application update (UW Oshkosh Office of International Education)', subject)
self.assertEquals("\n\nYour UW Oshkosh Office of International Education study abroad application has been updated.\n\nName: John Doe\nProgram Name: test\nProgram Year: 2009\n\nTransition\n\n\n\nYou can view your application here: http://nohost/plone/testapplication\n\nComment: \n\n\n", message)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestTransitionApproveForFA))
return suite
if __name__ == '__main__':
framework()
| gpl-2.0 | 7,559,430,899,610,069,000 | 41.676768 | 304 | 0.68213 | false |
dpshelio/sunpy | sunpy/util/net.py | 2 | 6983 | """
This module provides general net utility functions.
"""
import os
import re
import sys
import shutil
from unicodedata import normalize
from email.parser import FeedParser
from urllib.parse import urljoin, urlparse
from urllib.request import urlopen
from sunpy.util import replacement_filename
__all__ = ['slugify', 'get_content_disposition', 'get_filename', 'get_system_filename',
'download_file', 'download_fileobj', 'check_download_file']
# Characters not allowed in slugified version.
_punct_re = re.compile(r'[:\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim='_'):
"""
Slugify given unicode text.
Parameters
----------
text : `str`
A `str` to slugify.
delim : `str`, optional
The delimiter for the input ``text``. Default is "_".
Returns
-------
`str` :
The slugify `str` name.
"""
text = normalize('NFKD', text)
period = '.'
name_and_extension = text.rsplit(period, 1)
name = name_and_extension[0]
name = str(delim).join(
filter(None, (word for word in _punct_re.split(name.lower()))))
if len(name_and_extension) == 2:
extension = name_and_extension[1]
return str(period).join([name, extension])
else:
return name
def get_content_disposition(content_disposition):
"""
Get the content disposition filename from given header.
**Do not include "Content-Disposition:".**
Parameters
----------
content_disposition : `str`
The content disposition header.
Returns
-------
`str` :
The content disposition filename.
"""
parser = FeedParser()
parser.feed('Content-Disposition: ' + content_disposition)
name = parser.close().get_filename()
if name and not isinstance(name, str):
name = name.decode('latin1', 'ignore')
return name
def get_filename(sock, url):
"""
Get filename from given `~urllib.request.urlopen` object and URL.
First, tries the "Content-Disposition", if unavailable, extracts name from the URL.
Parameters
----------
sock : `~urllib.request.urlopen`
The `~urllib.request.urlopen` to parse for the filename.
url : `str`
The URL to parse for the filename.
Returns
-------
`str`:
The filename.
"""
name = None
cd = sock.headers.get('Content-Disposition', None)
if cd is not None:
try:
name = get_content_disposition(cd)
except IndexError:
pass
if not name:
parsed = urlparse(url)
name = parsed.path.rstrip('/').rsplit('/', 1)[-1]
return str(name)
def get_system_filename(sock, url, default="file"):
"""
Get filename from given `~urllib.request.urlopen` object and URL.
First, tries the "Content-Disposition", if unavailable, extracts name from the URL.
If this fails, the ``default`` keyword will be used.
Parameters
----------
sock : `~urllib.request.urlopen`
The `~urllib.request.urlopen` to parse for the filename.
url : `str`
The URL to parse for the filename.
default : `str`, optional
The name to use if the first two methods fail. Defaults to "file".
Returns
-------
`bytes`:
The filename in file system encoding.
"""
name = get_filename(sock, url)
if not name:
name = str(default)
return name.encode(sys.getfilesystemencoding(), 'ignore')
def download_fileobj(opn, directory, url='', default="file", overwrite=False):
"""
Download a file from a url into a directory.
Tries the "Content-Disposition", if unavailable, extracts name from the URL.
If this fails, the ``default`` keyword will be used.
Parameters
----------
opn : `~urllib.request.urlopen`
The `~urllib.request.urlopen` to download.
directory : `str`
The directory path to download the file in to.
url : `str`
The URL to parse for the filename.
default : `str`, optional
The name to use if the first two methods fail. Defaults to "file".
overwrite: `bool`, optional
If `True` will overwrite a file of the same name. Defaults to `False`.
Returns
-------
`str`:
The file path for the downloaded file.
"""
filename = get_system_filename(opn, url, default)
path = os.path.join(directory, filename.decode('utf-8'))
if overwrite and os.path.exists(path):
path = replacement_filename(path)
with open(path, 'wb') as fd:
shutil.copyfileobj(opn, fd)
return path
def download_file(url, directory, default="file", overwrite=False):
"""
Download a file from a url into a directory.
Tries the "Content-Disposition", if unavailable, extracts name from the URL.
If this fails, the ``default`` keyword will be used.
Parameters
----------
url : `str`
The file URL download.
directory : `str`
The directory path to download the file in to.
default : `str`, optional
The name to use if the first two methods fail. Defaults to "file".
overwrite: `bool`, optional
If `True` will overwrite a file of the same name. Defaults to `False`.
Returns
-------
`str`:
The file path for the downloaded file.
"""
opn = urlopen(url)
try:
path = download_fileobj(opn, directory, url, default, overwrite)
finally:
opn.close()
return path
def check_download_file(filename, remotepath, download_dir, remotename=None, replace=False):
"""
Downloads a file from a remotepath to a localpath if it isn't there.
This function checks whether a file with name ``filename`` exists in the user's local machine.
If it doesn't, it downloads the file from ``remotepath``.
Parameters
----------
filename : `str`
Name of file.
remotepath : `str`
URL of the remote location from which filename can be downloaded.
download_dir : `str`
The files directory.
remotename : `str`, optional
Filename under which the file is stored remotely.
Default is same as filename.
replace : `bool`, optional
If `True`, file will be downloaded whether or not file already exists locally.
Examples
--------
>>> from sunpy.util.net import check_download_file
>>> remotepath = "https://www.download_repository.com/downloads/"
>>> check_download_file("filename.txt", remotepath, download_dir='.') # doctest: +SKIP
"""
# Check if file already exists locally. If not, try downloading it.
if replace or not os.path.isfile(os.path.join(download_dir, filename)):
# set local and remote file names be the same unless specified
# by user.
if not isinstance(remotename, str):
remotename = filename
download_file(urljoin(remotepath, remotename),
download_dir, default=filename, overwrite=replace)
| bsd-2-clause | 134,006,119,759,047,330 | 28.340336 | 98 | 0.622798 | false |
msherry/PyXB-1.1.4 | tests/bugs/test-200908271556.py | 1 | 1990 | import pyxb_114.binding.generate
import pyxb_114.binding.datatypes as xs
import pyxb_114.binding.basis
import pyxb_114.utils.domutils
import gc
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="instance">
<xs:complexType>
<xs:all>
<xs:element name="inner" maxOccurs="unbounded">
<xs:complexType>
<xs:all>
<xs:element name="text" type="xs:string"/>
<xs:element name="number" type="xs:integer"/>
</xs:all>
</xs:complexType>
</xs:element>
</xs:all>
</xs:complexType>
</xs:element>
</xs:schema>
'''
#file('schema.xsd', 'w').write(xsd)
code = pyxb_114.binding.generate.GeneratePython(schema_text=xsd)
#file('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb_114.exceptions_ import *
import unittest
import os
class TestBug_200908271556(unittest.TestCase):
# No, this isn't portable. No, I don't care.
__statm = file('/proc/%d/statm' % (os.getpid(),))
def __getMem (self):
self.__statm.seek(0)
return int(self.__statm.read().split()[0])
def testMemory (self):
xmls = '<instance><inner><text>text</text><number>45</number></inner></instance>'
base_at = 10
check_at = 20
growth_limit = 1.10
iter = 0
gc.collect()
while True:
iter += 1
if base_at == iter:
gc.collect()
base_mem = self.__getMem()
elif check_at == iter:
gc.collect()
check_mem = self.__getMem()
growth = check_mem - base_mem
self.assertTrue(0 == growth, 'growth %s' % (growth,))
break
instance = CreateFromDocument(xmls)
xmls = instance.toxml("utf-8", root_only=True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 223,691,252,362,142,340 | 27.028169 | 89 | 0.554271 | false |
deuscoin-org/deuscoin-core | qa/rpc-tests/test_framework/authproxy.py | 1 | 6097 |
"""
Copyright 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import decimal
import json
import logging
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
log = logging.getLogger("DeuscoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
Exception.__init__(self)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return round(o, 8)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy(object):
__id_count = 0
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None):
self.__service_url = service_url
self._service_name = service_name
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
(user, passwd) = (self.__url.username, self.__url.password)
try:
user = user.encode('utf8')
except AttributeError:
pass
try:
passwd = passwd.encode('utf8')
except AttributeError:
pass
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
None, None, False,
timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
False, timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except httplib.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
def __call__(self, *args):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal)))
postdata = json.dumps({'version': '1.1',
'method': self._service_name,
'params': args,
'id': AuthServiceProxy.__id_count}, default=EncodeDecimal)
response = self._request('POST', self.__url.path, postdata)
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal)
log.debug("--> "+postdata)
return self._request('POST', self.__url.path, postdata)
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
if "error" in response and response["error"] is None:
log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal)))
else:
log.debug("<-- "+responsedata)
return response
| mit | -2,653,445,764,162,595,300 | 36.176829 | 105 | 0.599803 | false |
JasonLai256/plumbca | plumbca/cache.py | 1 | 2429 | # -*- coding:utf-8 -*-
"""
plumbca.cache
~~~~~~~~~~~~~
CacheHandler for the collections control.
:copyright: (c) 2015 by Jason Lai.
:license: BSD, see LICENSE for more details.
"""
import asyncio
import logging
import re
import os
from .config import DefaultConf
from .collection import IncreaseCollection
from .backend import BackendFactory
actlog = logging.getLogger('activity')
err_logger = logging.getLogger('errors')
class CacheCtl(object):
def __init__(self):
self.collmap = {}
self.info = {}
self.bk = BackendFactory(DefaultConf['backend'])
loop = asyncio.get_event_loop()
loop.run_until_complete(self.bk.init_connection())
def get_collection(self, name):
if name not in self.collmap:
actlog.info("Collection %s not exists.", name)
return
return self.collmap[name]
async def ensure_collection(self, name, ctype, expire, **kwargs):
rv = await self.bk.get_collection_index(name)
if name not in self.collmap and not rv:
actlog.info("Ensure collection - not exists in plumbca and redis")
self.collmap[name] = globals()[ctype](name, expire=expire, **kwargs)
await self.bk.set_collection_index(name, self.collmap[name])
actlog.info("Ensure collection - not exists in plumbca and redis, "
"create it, `%s`.", self.collmap[name])
elif name not in self.collmap and rv:
actlog.info("Ensure collection - not exists in plumbca")
rv_name, rv_instance_name = rv
assert name == rv_name
assert rv_instance_name == globals()[ctype].__class__.__name__
self.collmap[name] = globals()[ctype](name, expire=expire, **kwargs)
actlog.info("Ensure collection - not exists in plumbca, "
"create it, `%s`.", self.collmap[name])
elif name in self.collmap and not rv:
actlog.info("Ensure collection - not exists in redis")
await self.bk.set_collection_index(name, self.collmap[name])
actlog.info("Ensure collection - not exists in redis, "
"create it, `%s`.", self.collmap[name])
else:
actlog.info("Ensure collection already exists, `%s`.",
self.collmap[name])
def info(self):
pass
CacheCtl = CacheCtl()
| bsd-3-clause | -7,146,317,080,230,869,000 | 31.386667 | 80 | 0.599012 | false |
sdrogers/lda | code/lda_utilities.py | 1 | 14058 | import numpy as np
import pickle
import jsonpickle
def match_topics_across_dictionaries(lda1 = None,lda2 = None,file1 = None,file2 = None,
same_corpus = True,copy_annotations = False,copy_threshold = 0.5,summary_file = None,
new_file2 = None,mass_tol = 5.0):
# finds the closest topic matches from lda2 to lda1
if lda1 == None:
if file1 == None:
print "Must specify either an lda dictionary object or a dictionary file for lda1"
return
else:
with open(file1,'r') as f:
lda1 = pickle.load(f)
print "Loaded lda1 from {}".format(file1)
if lda2 == None:
if file2 == None:
print "Must specify either an lda dictionary object or a dictionary file for lda1"
return
else:
with open(file2,'r') as f:
lda2 = pickle.load(f)
print "Loaded lda2 from {}".format(file2)
word_index = lda1['word_index']
n_words = len(word_index)
n_topics1 = lda1['K']
n_topics2 = lda2['K']
# Put lda1's topics into a nice matrix
beta = np.zeros((n_topics1,n_words),np.float)
topic_pos = 0
topic_index1 = {}
for topic in lda1['beta']:
topic_index1[topic] = topic_pos
for word in lda1['beta'][topic]:
word_pos = word_index[word]
beta[topic_pos,word_pos] = lda1['beta'][topic][word]
topic_pos += 1
# Make the reverse index
ti = [(topic,topic_index1[topic]) for topic in topic_index1]
ti = sorted(ti,key = lambda x: x[1])
reverse1,_ = zip(*ti)
if not same_corpus:
fragment_masses = np.array([float(f.split('_')[1]) for f in word_index if f.startswith('fragment')])
fragment_names = [f for f in word_index if f.startswith('fragment')]
loss_masses = np.array([float(f.split('_')[1]) for f in word_index if f.startswith('loss')])
loss_names = [f for f in word_index if f.startswith('loss')]
beta /= beta.sum(axis=1)[:,None]
best_match = {}
temp_topics2 = {}
for topic2 in lda2['beta']:
temp_topics2[topic2] = {}
temp_beta = np.zeros((1,n_words))
if same_corpus:
total_probability = 0.0
for word in lda2['beta'][topic2]:
word_pos = word_index[word]
temp_beta[0,word_pos] = lda2['beta'][topic2][word]
temp_topics2[topic2][word] = lda2['beta'][topic2][word]
total_probability += temp_topics2[topic2][word]
for word in temp_topics2[topic2]:
temp_topics2[topic2][word] /= total_probability
temp_beta /= temp_beta.sum(axis=1)[:,None]
else:
# we need to match across corpus
total_probability = 0.0
for word in lda2['beta'][topic2]:
# try and match to a word in word_index
split_word = word.split('_')
word_mass = float(split_word[1])
if split_word[0].startswith('fragment'):
ppm_errors = 1e6*np.abs((fragment_masses - word_mass)/fragment_masses)
smallest_pos = ppm_errors.argmin()
if ppm_errors[smallest_pos] < mass_tol:
word1 = fragment_names[smallest_pos]
temp_topics2[topic2][word1] = lda2['beta'][topic2][word]
temp_beta[0,word_index[word1]] = lda2['beta'][topic2][word]
if split_word[0].startswith('loss'):
ppm_errors = 1e6*np.abs((loss_masses - word_mass)/loss_masses)
smallest_pos = ppm_errors.argmin()
if ppm_errors[smallest_pos] < 2*mass_tol:
word1 = loss_names[smallest_pos]
temp_topics2[topic2][word1] = lda2['beta'][topic2][word]
temp_beta[0,word_index[word1]] = lda2['beta'][topic2][word]
total_probability += lda2['beta'][topic2][word]
for word in temp_topics2[topic2]:
temp_topics2[topic2][word] /= total_probability
temp_beta /= total_probability
match_scores = np.dot(beta,temp_beta.T)
best_score = match_scores.max()
best_pos = match_scores.argmax()
topic1 = reverse1[best_pos]
w1 = lda1['beta'][topic1].keys()
if same_corpus:
w2 = lda2['beta'][topic2].keys()
else:
w2 = temp_topics2[topic2].keys()
union = set(w1) | set(w2)
intersect = set(w1) & set(w2)
p1 = 0.0
p2 = 0.0
for word in intersect:
word_pos = word_index[word]
p1 += beta[topic_index1[topic1],word_pos]
p2 += temp_topics2[topic2][word]
annotation = ""
if 'topic_metadata' in lda1:
if topic1 in lda1['topic_metadata']:
if type(lda1['topic_metadata'][topic1]) == str:
annotation = lda1['topic_metadata'][topic1]
else:
annotation = lda1['topic_metadata'][topic1].get('annotation',"")
best_match[topic2] = (topic1,best_score,len(union),len(intersect),p2,p1,annotation)
if summary_file:
with open(summary_file,'w') as f:
f.write('lda2_topic,lda1_topic,match_score,unique_words,shared_words,shared_p_lda2,shared_p_lda1,lda1_annotation\n')
for topic2 in best_match:
topic1 = best_match[topic2][0]
line = "{},{},{}".format(topic2,topic1,best_match[topic2][1])
line += ",{},{}".format(best_match[topic2][2],best_match[topic2][3])
line += ",{},{}".format(best_match[topic2][4],best_match[topic2][5])
line += ",{}".format(best_match[topic2][6])
f.write(line+'\n')
if copy_annotations and 'topic_metadata' in lda1:
print "Copying annotations"
if not 'topic_metadata' in lda2:
lda2['topic_metadata'] = {}
for topic2 in best_match:
lda2['topic_metadata'][topic2] = {'name':topic2}
topic1 = best_match[topic2][0]
p2 = best_match[topic2][4]
p1 = best_match[topic2][5]
if p1 >= copy_threshold and p2 >= copy_threshold:
annotation = best_match[topic2][6]
if len(annotation) > 0:
lda2['topic_metadata'][topic2]['annotation'] = annotation
if new_file2 == None:
with open(file2,'w') as f:
pickle.dump(lda2,f)
print "Dictionary with copied annotations saved to {}".format(file2)
else:
with open(new_file2,'w') as f:
pickle.dump(lda2,f)
print "Dictionary with copied annotations saved to {}".format(new_file2)
return best_match,lda2
def find_standards_in_dict(standards_file,lda_dict=None,lda_dict_file=None,mode='pos',mass_tol = 3,rt_tol = 12,new_lda_file = None):
if lda_dict == None:
if lda_dict_file == None:
print "Must provide either an lda dictionary or an lda dictionary file"
return
else:
with open(lda_dict_file,'r') as f:
lda_dict = pickle.load(f)
print "Loaded lda dictionary from {}".format(lda_dict_file)
# Load the standards
standard_molecules = []
found_heads = False
with open(standards_file,'r') as f:
for line in f:
if found_heads == False and line.startswith('Peak Num'):
found_heads = True
continue
elif found_heads == False:
continue
else:
split_line = line.rstrip().split(',')
if (mode == 'pos' and split_line[4] == '+') or (mode == 'neg' and split_line[3] == '-'):
# It's a keeper
name = split_line[2]
mz = split_line[6]
if mz == 'N':
continue
mz = float(mz)
rt = split_line[9]
if rt == '-':
continue
rt = float(rt)*60.0 # converted to seconds
formula = split_line[3]
standard_molecules.append((name,mz,rt,formula))
# mol = ()
print "Loaded {} molecules".format(len(standard_molecules))
doc_masses = np.array([float(d.split('_')[0]) for d in lda_dict['corpus']])
doc_names = [d for d in lda_dict['corpus']]
doc_rt = np.array([float(d.split('_')[1]) for d in lda_dict['corpus']])
hits = {}
for mol in standard_molecules:
mass_delta = mol[1]*mass_tol*1e-6
mass_hit = (doc_masses < mol[1] + mass_delta) & (doc_masses > mol[1] - mass_delta)
rt_hit = (doc_rt < mol[2] + rt_tol) & (doc_rt > mol[2] - rt_tol)
match = np.where(mass_hit & rt_hit)[0]
if len(match) > 0:
if len(match) == 1:
hits[mol] = doc_names[match[0]]
else:
# Multiple hits
min_dist = 1e6
best_match = match[0]
for individual_match in match:
match_mass = doc_masses[individual_match]
match_rt = doc_rt[individual_match]
dist = np.sqrt((match_rt - mol[2])**2 + (match_mass - mol[1])**2)
if dist < min_dist:
best_match = individual_match
hits[mol] = doc_names[best_match]
print "Found hits for {} standard molecules".format(len(hits))
# Add the hits to the lda_dict as document metadata
for mol in hits:
doc_name = hits[mol]
lda_dict['doc_metadata'][doc_name]['standard_mol'] = mol[0]
lda_dict['doc_metadata'][doc_name]['annotation'] = mol[0]
if new_lda_file:
with open(new_lda_file,'w') as f:
pickle.dump(lda_dict,f)
print "Wrote annotated dictionary to {}".format(new_lda_file)
return lda_dict
def alpha_report(vlda,overlap_scores = None,overlap_thresh = 0.3):
ta = []
for topic,ti in vlda.topic_index.items():
ta.append((topic,vlda.alpha[ti]))
ta = sorted(ta,key = lambda x: x[1],reverse = True)
for t,a in ta:
to = []
if overlap_scores:
for doc in overlap_scores:
if t in overlap_scores[doc]:
if overlap_scores[doc][t]>=overlap_thresh:
to.append((doc,overlap_scores[doc][t]))
print t,vlda.topic_metadata[t].get('SHORT_ANNOTATION',None),a
to = sorted(to,key = lambda x: x[1],reverse = True)
for t,o in to:
print '\t',t,o
def decompose(vlda,corpus):
# decompose the documents in corpus
# CHECK THE INTENSITY NORMALISATION
K = vlda.K
phi = {}
gamma_mat = {}
n_done = 0
n_total = len(corpus)
p_in = {}
for doc,spectrum in corpus.items():
intensity_in = 0.0
intensity_out = 0.0
max_i = 0.0
for word in spectrum:
if spectrum[word] > max_i:
max_i = spectrum[word]
if word in vlda.word_index:
intensity_in += spectrum[word]
else:
intensity_out += spectrum[word]
p_in[doc] = (1.0*intensity_in)/(intensity_in + intensity_out)
# print max_i
print "Decomposing document {} ({}/{})".format(doc,n_done,n_total)
phi[doc] = {}
# gamma_mat[doc] = np.zeros(K) + vlda.alpha
gamma_mat[doc] = np.ones(K)
for it in range(20):
# temp_gamma = np.zeros(K) + vlda.alpha
temp_gamma = np.ones(K)
for word in spectrum:
if word in vlda.word_index:
w = vlda.word_index[word]
log_phi_matrix = np.log(vlda.beta_matrix[:,w]) + psi(gamma_mat[doc])
log_phi_matrix = np.exp(log_phi_matrix - log_phi_matrix.max())
phi[doc][word] = log_phi_matrix/log_phi_matrix.sum()
temp_gamma += phi[doc][word]*spectrum[word]
gamma_mat[doc] = temp_gamma
n_done += 1
return gamma_mat,phi,p_in
def decompose_overlap(vlda,decomp_phi):
# computes the overlap score for a decomposition phi
o = {}
K = vlda.K
for doc in decomp_phi:
o[doc] = {}
os = np.zeros(K)
for word,phi_vec in decomp_phi[doc].items():
word_pos = vlda.word_index[word]
os += phi_vec*vlda.beta_matrix[:,word_pos]
for topic,pos in vlda.topic_index.items():
o[doc][topic] = os[pos]
return o
def decompose_from_dict(vlda_dict,corpus):
# step 1, get the betas into a matrix
K = vlda_dict['K']
skeleton = VariationalLDA({},K)
skeleton.word_index = vlda_dict['word_index']
skeleton.topic_index = vlda_dict['topic_index']
n_words = len(skeleton.word_index)
skeleton.beta_matrix = np.zeros((K,n_words),np.double) + 1e-6
beta_dict = vlda_dict['beta']
for topic in beta_dict:
topic_pos = skeleton.topic_index[topic]
for word,prob in beta_dict[topic].items():
word_pos = skeleton.word_index[word]
skeleton.beta_matrix[topic_pos,word_pos] = prob
# normalise
skeleton.beta_matrix /= skeleton.beta_matrix.sum(axis=1)[:,None]
g,phi,p_in = decompose(skeleton,corpus)
return g,phi,p_in,skeleton
def doc_feature_counts(vlda_dict,p_thresh = 0.01,o_thresh = 0.3):
theta = vlda_dict['theta']
decomp_gamma,decomp_phi,decomp_p_in,skeleton = decompose_from_dict(vlda_dict,vlda_dict['corpus'])
overlap_scores = decompose_overlap(skeleton,vlda_dict['corpus'])
phi_thresh = 0.5
motif_doc_counts = {}
motif_word_counts = {}
for doc in theta:
for motif in theta[doc]:
if theta[doc][motif] >= p_thresh and overlap_scores[doc][motif] >= o_thresh:
if not motif in motif_doc_counts:
motif_doc_counts[motif] = 0
motif_doc_counts[motif] += 1
for word,phi_vec in decomp_phi[doc].items():
motif_pos = vlda_dict['topic_index'][motif]
if phi_vec[motif_pos] >= phi_thresh:
if not motif in motif_word_counts:
motif_word_counts[motif] = {}
if not word in motif_word_counts[motif]:
motif_word_counts[motif][word] = 0
motif_word_counts[motif][word] += 1
word_total_counts = {}
for doc,spectrum in vlda_dict['corpus'].items():
for word,intensity in spectrum.items():
if not word in word_total_counts:
word_total_counts[word] = 0
word_total_counts[word] += 1
return motif_doc_counts,motif_word_counts,word_total_counts
def compute_overlap_scores(vlda):
import numpy as np
K = len(vlda.topic_index)
overlap_scores = {}
for doc in vlda.doc_index:
overlap_scores[doc] = {}
os = np.zeros(K)
pm = vlda.phi_matrix[doc]
for word,probs in pm.items():
word_index = vlda.word_index[word]
os += probs*vlda.beta_matrix[:,word_index]
for motif,m_pos in vlda.topic_index.items():
overlap_scores[doc][motif] = os[m_pos]
return overlap_scores
def write_csv(vlda,overlap_scores,filename,metadata,p_thresh=0.01,o_thresh=0.3):
import csv
probs = vlda.get_expect_theta()
motif_dict = {}
with open(filename,'w') as f:
writer = csv.writer(f)
heads = ['Document','Motif','Probability','Overlap Score','Precursor Mass','Retention Time','Document Annotation']
writer.writerow(heads)
all_rows = []
for doc,doc_pos in vlda.doc_index.items():
for motif,motif_pos in vlda.topic_index.items():
if probs[doc_pos,motif_pos] >= p_thresh and overlap_scores[doc][motif] >= o_thresh:
new_row = []
new_row.append(doc)
new_row.append(motif)
new_row.append(probs[doc_pos,motif_pos])
new_row.append(overlap_scores[doc][motif])
new_row.append(metadata[doc]['parentmass'])
new_row.append("None")
new_row.append(metadata[doc]['featid'])
all_rows.append(new_row)
motif_dict[motif] = True
all_rows = sorted(all_rows,key = lambda x:x[0])
for new_row in all_rows:
writer.writerow(new_row)
return motif_dict | gpl-3.0 | 7,522,224,093,427,851,000 | 33.374083 | 132 | 0.636862 | false |
our-city-app/oca-backend | src/rogerthat/models/auth/acm.py | 1 | 2211 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from rogerthat.models.common import NdbModel
from google.appengine.ext import ndb
# DOCS https://authenticatie.vlaanderen.be/docs/beveiligen-van-toepassingen/integratie-methoden/oidc/
# T&I https://authenticatie-ti.vlaanderen.be/op/.well-known/openid-configuration
# PROD https://authenticatie.vlaanderen.be/op/.well-known/openid-configuration
class ACMSettings(NdbModel):
client_id = ndb.TextProperty()
client_secret = ndb.TextProperty()
openid_config_uri = ndb.TextProperty()
auth_redirect_uri = ndb.TextProperty()
logout_redirect_uri = ndb.TextProperty()
@classmethod
def create_key(cls, app_id):
return ndb.Key(cls, app_id)
class ACMLoginState(NdbModel):
creation_time = ndb.DateTimeProperty(auto_now_add=True)
app_id = ndb.TextProperty()
scope = ndb.TextProperty()
code_challenge = ndb.TextProperty()
token = ndb.JsonProperty()
id_token = ndb.JsonProperty()
@property
def state(self):
return self.key.id()
@classmethod
def create_key(cls, state):
return ndb.Key(cls, state)
@classmethod
def list_before_date(cls, date):
return cls.query(cls.creation_time < date)
class ACMLogoutState(NdbModel):
creation_time = ndb.DateTimeProperty(auto_now_add=True)
app_id = ndb.TextProperty()
@property
def state(self):
return self.key.id()
@classmethod
def create_key(cls, state):
return ndb.Key(cls, state)
@classmethod
def list_before_date(cls, date):
return cls.query(cls.creation_time < date) | apache-2.0 | 6,988,458,327,065,646,000 | 28.891892 | 101 | 0.703754 | false |
cartertech/odoo-hr-ng | hr_report_payroll_attendance_summary/report/attendance_summary.py | 1 | 14914 | #-*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 One Click Software (http://oneclick.solutions)
# and Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
from report import report_sxw
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_employee_data': self.get_employee_data,
'get_worked_days': self.get_worked_days,
'get_daily_ot': self.get_daily_ot,
'get_nightly_ot': self.get_nightly_ot,
'get_restday_ot': self.get_restday_ot,
'get_holiday_ot': self.get_holiday_ot,
'get_bunch_no': self.get_bunch_no,
'get_awol': self.get_awol,
'get_sickleave': self.get_sickleave,
'get_no': self.get_no,
'get_start': self.get_start,
'get_end': self.get_end,
'lose_bonus': self.lose_bonus,
'get_paid_leave': self.get_paid_leave,
'get_employee_list': self.get_employee_list,
'get_lu': self.get_lu,
'get_wage': self.get_adjusted_wage,
})
self.start_date = False
self.end_date = False
self.ee_lines = {}
self.no = 0
self.department_id = False
self.regular_hours = 8.0
self.get_employee_data_ids = []
self.get_employee_list_ids = []
def set_context(self, objects, data, ids, report_type=None):
if data.get('form', False) and data['form'].get('start_date', False):
self.start_date = data['form']['start_date']
if data.get('form', False) and data['form'].get('end_date', False):
self.end_date = data['form']['end_date']
return super(Parser, self).set_context(objects, data, ids, report_type=report_type)
def calculate_wage_by_ppf(self, dFullStart, dFullEnd, contracts_list):
full_days = 0
d = dFullStart
while d <= dFullEnd:
full_days += 1
d += relativedelta(days= +1)
wage = 0.0
for line in contracts_list:
ppf = 0.0
dates = line[0]
contract_wage = line[1]
ppf = float(relativedelta(dates[1], dates[0]).days + 1) / float(full_days)
wage += (contract_wage * ppf)
return wage
def get_adjusted_wage(self, ee_id):
con_obj = self.pool.get('hr.contract')
dS = datetime.strptime(self.start_date, OE_DATEFORMAT).date()
dE = datetime.strptime(self.end_date, OE_DATEFORMAT).date()
wage = 0
# Get contract in effect on first day of period (or first contract date for new employees)
cons= []
d = dS
while d <= dE:
con_ids = con_obj.search(self.cr, self.uid,
[('employee_id', '=', ee_id),
('date_start', '<=', d.strftime(OE_DATEFORMAT)),
'|', ('date_end', '=', False),
('date_end', '>=', d.strftime(OE_DATEFORMAT))])
if len(con_ids) > 0:
con = con_obj.browse(self.cr, self.uid, con_ids[0])
_seen = False
for c in cons:
if con.id == c[2]:
_seen = True
break
if _seen:
d += relativedelta(days= +1)
continue
dTempStart = dS
dTempEnd = dE
dConStart = datetime.strptime(con.date_start, OE_DATEFORMAT).date()
if dConStart > dS:
dTempStart = dConStart
if con.date_end:
dConEnd = datetime.strptime(con.date_end, OE_DATEFORMAT).date()
if dConEnd < dE:
dTempEnd = dConEnd
cons.append([(dTempStart, dTempEnd), con.wage, con.id])
d += relativedelta(days= +1)
if len(cons) > 0:
wage = self.calculate_wage_by_ppf(dS, dE, cons)
return wage
def get_employee_ids(self, department_id, seen_ids):
c_obj = self.pool.get('hr.contract')
ee_obj = self.pool.get('hr.employee')
c_ids = c_obj.search(self.cr, self.uid, ['|', ('job_id.department_id', '=', department_id),
('end_job_id.department_id', '=', department_id),
('date_start', '<=', self.end_date),
'|', ('date_end', '=', False),
('date_end', '>=', self.start_date)])
ee_ids = []
cdata = c_obj.read(self.cr, self.uid, c_ids, ['employee_id'])
ee_ids = [data['employee_id'][0] for data in cdata if ((data['employee_id'][0] not in ee_ids) and (data['employee_id'][0] not in seen_ids))]
seen_ids += ee_ids
# re-order
return ee_obj.search(self.cr, self.uid, [('id', 'in', ee_ids),
'|', ('active', '=', True),
('active', '=', False)])
def get_employee_list(self, department_id):
ee_ids = self.get_employee_ids(department_id, self.get_employee_list_ids)
return self.pool.get('hr.employee').browse(self.cr, self.uid, ee_ids)
def get_employee_data(self, department_id):
payslip_obj = self.pool.get('hr.payslip')
ee_obj = self.pool.get('hr.employee')
dtStart = datetime.strptime(self.start_date, OE_DATEFORMAT).date()
dtEnd = datetime.strptime(self.end_date, OE_DATEFORMAT).date()
ee_ids = self.get_employee_ids(department_id, self.get_employee_data_ids)
for ee in ee_obj.browse(self.cr, self.uid, ee_ids):
datas = []
for c in ee.contract_ids:
dtCStart = False
dtCEnd = False
if c.date_start: dtCStart = datetime.strptime(c.date_start, OE_DATEFORMAT).date()
if c.date_end: dtCEnd = datetime.strptime(c.date_end, OE_DATEFORMAT).date()
if (dtCStart and dtCStart <= dtEnd) and ((dtCEnd and dtCEnd >= dtStart) or not dtCEnd):
datas.append({'contract_id': c.id,
'date_start': dtCStart > dtStart and dtCStart.strftime(OE_DATEFORMAT) or dtStart.strftime(OE_DATEFORMAT),
'date_end': (dtCEnd and dtCEnd < dtEnd) and dtCEnd.strftime(OE_DATEFORMAT) or dtEnd.strftime(OE_DATEFORMAT)})
wd_lines = []
for d in datas:
wd_lines += payslip_obj.get_worked_day_lines(self.cr, self.uid, [d['contract_id']],
d['date_start'], d['date_end'])
self.ee_lines.update({ee.id: wd_lines})
def get_start(self):
return datetime.strptime(self.start_date, OE_DATEFORMAT).strftime('%B %d, %Y')
def get_end(self):
return datetime.strptime(self.end_date, OE_DATEFORMAT).strftime('%B %d, %Y')
def get_no(self, department_id):
if not self.department_id or self.department_id != department_id:
self.department_id = department_id
self.no = 1
else:
self.no += 1
return self.no
def get_lu(self, employee_id):
data = self.pool.get('hr.employee').read(self.cr, self.uid, employee_id, ['is_labour_union'])
return data.get('is_labour_union', False) and 'Y' or 'N'
def get_employee_start_date(self, employee_id):
first_day = False
c_obj = self.pool.get('hr.contract')
c_ids = c_obj.search(self.cr, self.uid, [('employee_id', '=', employee_id)])
for contract in c_obj.browse(self.cr, self.uid, c_ids):
if not first_day or contract.date_start < first_day:
first_day = contract.date_start
return first_day
def get_worked_days(self, employee_id):
total = 0.0
hol = 0.0
maxw = 0.0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORK100']:
total += float(line['number_of_hours']) / self.regular_hours
elif line['code'] == ['WORKHOL']:
hol += float(line['number_of_hours']) / self.regular_hours
elif line['code'] == ['MAX']:
maxw += float(line['number_of_hours']) / self.regular_hours
total += hol + self.get_paid_leave(employee_id)
awol = self.get_awol(employee_id)
# Take care to identify and handle employee's who didn't work the
# full month: newly hired and terminated employees
#
hire_date = self.get_employee_start_date(employee_id)
term_ids = self.pool.get('hr.employee.termination').search(self.cr, self.uid,
[('name', '<=', self.end_date),
('name', '>=', self.start_date),
('employee_id', '=', employee_id),
('employee_id.status', 'in', ['pending_inactive', 'inactive']),
('state', 'not in', ['cancel'])])
if hire_date <= self.start_date and len(term_ids) == 0:
if total >= maxw:
total = 26
total = total - awol
return total
def get_paid_leave(self, employee_id):
total = 0
paid_leaves = ['LVANNUAL', 'LVBEREAVEMENT', 'LVCIVIC', 'LVMATERNITY',
'LVMMEDICAL', 'LVPTO', 'LVWEDDING', 'LVSICK']
for line in self.ee_lines[employee_id]:
if line['code'] in paid_leaves:
total += float(line['number_of_hours']) / self.regular_hours
return total
def get_daily_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTD']:
total += line['number_of_hours']
return total
def get_nightly_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTN']:
total += line['number_of_hours']
return total
def get_restday_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTR', 'WORKRST']:
total += line['number_of_hours']
return total
def get_holiday_ot(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['WORKOTH', 'WORKHOL']:
total += line['number_of_hours']
return total
def get_bunch_no(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['BUNCH']:
total += int(line['number_of_hours'])
return total
def get_awol(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['AWOL']:
total += float(line['number_of_hours']) / self.regular_hours
return total
def get_sickleave(self, employee_id):
total = 0
for line in self.ee_lines[employee_id]:
if line['code'] in ['LVSICK']:
total += float(line['number_of_hours']) / self.regular_hours
elif line['code'] in ['LVSICK50']:
total += float(line['number_of_hours']) * 0.5
return total
def lose_bonus(self, employee_id):
loseit = False
for line in self.ee_lines[employee_id]:
if line['code'] in ['AWOL', 'TARDY', 'NFRA', 'WARNW'] and line['number_of_hours'] > 0.01:
loseit = True
# Check if the employee's contract spans the full month
if not loseit:
dStart = False
dEnd = None
con_obj = self.pool.get('hr.contract')
con_ids = con_obj.search(self.cr, self.uid, [('employee_id', '=', employee_id),
('state', '!=', 'draft'),
('date_start', '<=', self.end_date),
'|', ('date_end', '=', False),
('date_end', '>=', self.start_date)])
for con in con_obj.browse(self.cr, self.uid, con_ids):
dTempStart = datetime.strptime(con.date_start, OE_DATEFORMAT).date()
dTempEnd = False
if con.date_end:
dTempEnd = datetime.strptime(con.date_end, OE_DATEFORMAT).date()
if not dStart or dTempStart < dStart:
dStart = dTempStart
if (dEnd == None) or (not dTempEnd or (dEnd and dTempEnd > dEnd)):
dEnd = dTempEnd
if dStart and dStart > datetime.strptime(self.start_date, OE_DATEFORMAT).date():
loseit = True
elif (dEnd != None) and dEnd and (dEnd < datetime.strptime(self.end_date, OE_DATEFORMAT).date()):
loseit = True
return loseit
| agpl-3.0 | -5,935,333,586,226,009,000 | 42.48105 | 148 | 0.495575 | false |
adlius/osf.io | api/providers/urls.py | 1 | 5275 | from django.conf.urls import include, url
from api.providers import views
app_name = 'osf'
urlpatterns = [
url(
r'^preprints/', include(
(
[
url(r'^$', views.PreprintProviderList.as_view(), name=views.PreprintProviderList.view_name),
url(r'^(?P<provider_id>\w+)/$', views.PreprintProviderDetail.as_view(), name=views.PreprintProviderDetail.view_name),
url(r'^(?P<provider_id>\w+)/licenses/$', views.PreprintProviderLicenseList.as_view(), name=views.PreprintProviderLicenseList.view_name),
url(r'^(?P<provider_id>\w+)/preprints/$', views.PreprintProviderPreprintList.as_view(), name=views.PreprintProviderPreprintList.view_name),
url(r'^(?P<provider_id>\w+)/subjects/$', views.PreprintProviderSubjects.as_view(), name=views.PreprintProviderSubjects.view_name),
url(r'^(?P<provider_id>\w+)/subjects/highlighted/$', views.PreprintProviderHighlightedSubjectList.as_view(), name=views.PreprintProviderHighlightedSubjectList.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/$', views.PreprintProviderTaxonomies.as_view(), name=views.PreprintProviderTaxonomies.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/highlighted/$', views.PreprintProviderHighlightedTaxonomyList.as_view(), name=views.PreprintProviderHighlightedTaxonomyList.view_name),
url(r'^(?P<provider_id>\w+)/withdraw_requests/$', views.PreprintProviderWithdrawRequestList.as_view(), name=views.PreprintProviderWithdrawRequestList.view_name),
url(r'^(?P<provider_id>\w+)/moderators/$', views.PreprintProviderModeratorsList.as_view(), name=views.PreprintProviderModeratorsList.view_name),
url(r'^(?P<provider_id>\w+)/moderators/(?P<moderator_id>\w+)/$', views.PreprintProviderModeratorsDetail.as_view(), name=views.PreprintProviderModeratorsDetail.view_name),
], 'preprints',
),
namespace='preprint-providers',
),
),
url(
r'^collections/', include(
(
[
url(r'^$', views.CollectionProviderList.as_view(), name=views.CollectionProviderList.view_name),
url(r'^(?P<provider_id>\w+)/$', views.CollectionProviderDetail.as_view(), name=views.CollectionProviderDetail.view_name),
url(r'^(?P<provider_id>\w+)/licenses/$', views.CollectionProviderLicenseList.as_view(), name=views.CollectionProviderLicenseList.view_name),
url(r'^(?P<provider_id>\w+)/submissions/$', views.CollectionProviderSubmissionList.as_view(), name=views.CollectionProviderSubmissionList.view_name),
url(r'^(?P<provider_id>\w+)/subjects/$', views.CollectionProviderSubjects.as_view(), name=views.CollectionProviderSubjects.view_name),
url(r'^(?P<provider_id>\w+)/subjects/highlighted/$', views.CollectionProviderHighlightedSubjectList.as_view(), name=views.CollectionProviderHighlightedSubjectList.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/$', views.CollectionProviderTaxonomies.as_view(), name=views.CollectionProviderTaxonomies.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/highlighted/$', views.CollectionProviderHighlightedTaxonomyList.as_view(), name=views.CollectionProviderHighlightedTaxonomyList.view_name),
], 'collections',
),
namespace='collection-providers',
),
),
url(
r'^registrations/', include(
(
[
url(r'^$', views.RegistrationProviderList.as_view(), name=views.RegistrationProviderList.view_name),
url(r'^(?P<provider_id>\w+)/$', views.RegistrationProviderDetail.as_view(), name=views.RegistrationProviderDetail.view_name),
url(r'^(?P<provider_id>\w+)/licenses/$', views.RegistrationProviderLicenseList.as_view(), name=views.RegistrationProviderLicenseList.view_name),
url(r'^(?P<provider_id>\w+)/schemas/$', views.RegistrationProviderSchemaList.as_view(), name=views.RegistrationProviderSchemaList.view_name),
url(r'^(?P<provider_id>\w+)/submissions/$', views.RegistrationProviderSubmissionList.as_view(), name=views.RegistrationProviderSubmissionList.view_name),
url(r'^(?P<provider_id>\w+)/subjects/$', views.RegistrationProviderSubjects.as_view(), name=views.RegistrationProviderSubjects.view_name),
url(r'^(?P<provider_id>\w+)/subjects/highlighted/$', views.RegistrationProviderHighlightedSubjectList.as_view(), name=views.RegistrationProviderHighlightedSubjectList.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/$', views.RegistrationProviderTaxonomies.as_view(), name=views.RegistrationProviderTaxonomies.view_name),
url(r'^(?P<provider_id>\w+)/taxonomies/highlighted/$', views.RegistrationProviderHighlightedTaxonomyList.as_view(), name=views.RegistrationProviderHighlightedTaxonomyList.view_name),
], 'registrations',
),
namespace='registration-providers',
),
),
]
| apache-2.0 | 7,185,396,797,768,840,000 | 80.153846 | 202 | 0.653649 | false |
tensorflow/lingvo | lingvo/tasks/punctuator/input_generator_test.py | 1 | 3305 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input generator."""
import string
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import py_utils
from lingvo.core import test_helper
from lingvo.core import test_utils
from lingvo.tasks.punctuator import input_generator
class InputGeneratorTest(test_utils.TestCase):
def _CreatePunctuatorInputParams(self):
p = input_generator.PunctuatorInput.Params()
input_file = 'text:' + test_helper.test_src_dir_path(
'tasks/lm/testdata/lm1b_100.txt')
p.tokenizer.vocab_filepath = test_helper.test_src_dir_path(
'tasks/punctuator/params/brown_corpus_wpm.16000.vocab')
p.tokenizer.vocab_size = 16000
p.file_pattern = input_file
p.file_random_seed = 314
p.file_parallelism = 1
p.source_max_length = 200
p.target_max_length = 200
p.bucket_upper_bound = [20, 40]
p.bucket_batch_limit = [1, 1]
return p
def testBasic(self):
p = self._CreatePunctuatorInputParams()
with self.session(use_gpu=False):
inp = input_generator.PunctuatorInput(p)
# Runs a few steps.
for _ in range(10):
self.evaluate(inp.GetPreprocessedInputBatch())
def testSourceTargetValues(self):
max_length = 50
p = self._CreatePunctuatorInputParams()
with self.session(use_gpu=False):
inp = input_generator.PunctuatorInput(p)
tokenizer = inp.tokenizer_dict[base_input_generator.DEFAULT_TOKENIZER_KEY]
fetched = py_utils.NestedMap(
self.evaluate(inp.GetPreprocessedInputBatch()))
source_ids = fetched.src.ids
tgt_ids = fetched.tgt.ids
tgt_labels = fetched.tgt.labels
expected_ref = (
b'Elk calling -- a skill that hunters perfected long ago to lure '
b'game with the promise of a little romance -- is now its own sport .'
)
normalized_ref = expected_ref.lower().translate(
None, string.punctuation.encode('utf-8'))
normalized_ref = b' '.join(normalized_ref.split())
_, expected_src_ids, _ = self.evaluate(
tokenizer.StringsToIds(
tf.convert_to_tensor([normalized_ref]), max_length=max_length))
expected_tgt_ids, expected_tgt_labels, _ = self.evaluate(
tokenizer.StringsToIds(
tf.convert_to_tensor([expected_ref]), max_length=max_length))
self.assertAllEqual(expected_src_ids[0], source_ids[0, :max_length])
self.assertAllEqual(expected_tgt_ids[0], tgt_ids[0, :max_length])
self.assertAllEqual(expected_tgt_labels[0], tgt_labels[0, :max_length])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -7,914,446,482,851,049,000 | 36.988506 | 80 | 0.673222 | false |
ocadotechnology/aimmo | aimmo-game/tests/test_interactables/mock_world.py | 1 | 1445 | import random
from simulation import map_generator
from simulation.simulation_runner import SequentialSimulationRunner
from tests.test_simulation.mock_communicator import MockCommunicator
from tests.test_simulation.dummy_avatar import DummyAvatarManager, MoveEastDummy
from tests.test_simulation.mock_turn_collector import MockTurnCollector
SETTINGS = {"START_HEIGHT": 5, "START_WIDTH": 5, "OBSTACLE_RATIO": 0}
class MockWorld(object):
"""
Creates an object that mocks the whole game and can be used in various testing.
It holds the map generator, avatar manager, game state and turn manager. Takes settings as a parameter,
if defaults are unsuitable.
By default, the first avatar added to the world will be a MoveEastDummy.
"""
def __init__(
self,
settings=SETTINGS,
dummies_list=None,
map_generator_class=map_generator.Main,
simulation_runner_class=SequentialSimulationRunner,
):
random.seed(0)
if dummies_list is None:
dummies_list = [MoveEastDummy]
self.generator = map_generator_class(settings)
self.avatar_manager = DummyAvatarManager(dummies_list)
self.game_state = self.generator.get_game_state(self.avatar_manager)
self.simulation_runner = simulation_runner_class(
game_state=self.game_state, communicator=MockCommunicator()
)
self.turn_collector = MockTurnCollector()
| agpl-3.0 | 7,244,592,675,952,076,000 | 37.026316 | 107 | 0.715571 | false |
akraft196/pyASC | examples/mplot1.py | 1 | 7267 | #! /usr/bin/env python
#
# quick and dirty processing of the MD All Sky images
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.misc import imsave
import numpy as np
import aplpy
import argparse as ap
import os.path
import logging
import time
def d(ff,box=[]):
#very specific for 16 bit data, since we want to keep the data in uint16
h = fits.open(ff, do_not_scale_image_data=True)
if len(box)==0:
return h[0].header, h[0].data
else:
# figure out 0 vs. 1 based offsets; box is 1 based
return h[0].header, h[0].data[box[1]:box[3], box[0]:box[2]]
def dsum(i0,i1,step = 1, box=[]):
""" for a range of fits files
compute the mean and dispersion from the mean
"""
for i in range(i0,i1+1,step):
ff = 'IMG%05d.FIT' % i
h1, d1 = d(ff,box)
#very specific for 16 bit data, since we want to keep the data in uint16
bzero = h1['BZERO']
bscale = h1['BSCALE']
if i == i0:
sum0 = 1.0
sum1 = d1*bscale+bzero
sum2 = sum1*sum1
#sum1 = d1
#sum2 = d1*d1
h = h1
nx = d1.shape[1]
ny = d1.shape[0]
nz = i1 + 1 - i0
c = np.zeros((nz, ny, nx))
c[0,:,:] = d1.reshape(ny,nx)
else:
sum0 = sum0 + 1.0
sum1 = sum1 + (d1 * bscale + bzero)
sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)
#sum2 = sum2+d1*d1
c[i - i0,:,:] = d1.reshape(ny,nx)
sum1 = sum1 / sum0
sum2 = sum2 / sum0 - sum1*sum1
print type(sum1), type(sum2)
return h,sum1,np.sqrt(sum2),c
def show(sum):
""" some native matplotlib display,
doesn't show pointsources well at all
"""
ip = plt.imshow(sum)
plt.show()
def show2(sum):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum)
#fig.show_grayscale()
fig.show_colorscale()
def show3(sum1,sum2):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum1,subplot=(2,2,1))
#fig = aplpy.FITSFigure(sum2,subplot=(2,2,2),figure=1)
fig.show_grayscale()
# For some variations on this theme, e.g. time.time vs. time.clock, see
# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python
#
class Dtime(object):
""" Class to help measuring the wall clock time between tagged events
Typical usage:
dt = Dtime()
...
dt.tag('a')
...
dt.tag('b')
"""
def __init__(self, label=".", report=True):
self.start = self.time()
self.init = self.start
self.label = label
self.report = report
self.dtimes = []
dt = self.init - self.init
if self.report:
logging.info("Dtime: %s ADMIT " % self.label + str(self.start))
logging.info("Dtime: %s BEGIN " % self.label + str(dt))
def reset(self, report=True):
self.start = self.time()
self.report = report
self.dtimes = []
def tag(self, mytag):
t0 = self.start
t1 = self.time()
dt = t1 - t0
self.dtimes.append((mytag, dt))
self.start = t1
if self.report:
logging.info("Dtime: %s " % self.label + mytag + " " + str(dt))
return dt
def show(self):
if self.report:
for r in self.dtimes:
logging.info("Dtime: %s " % self.label + str(r[0]) + " " + str(r[1]))
return self.dtimes
def end(self):
t0 = self.init
t1 = self.time()
dt = t1 - t0
if self.report:
logging.info("Dtime: %s END " % self.label + str(dt))
return dt
def time(self):
""" pick the actual OS routine that returns some kind of timer
time.time : wall clock time (include I/O and multitasking overhead)
time.clock : cpu clock time
"""
return np.array([time.clock(), time.time()])
if __name__ == '__main__':
logging.basicConfig(level = logging.INFO)
dt = Dtime("mplot1")
#--start, -s n
#--end, -e n
#--box x1 y1 x2 y2
parser = ap.ArgumentParser(description='Plotting .fits files.')
parser.add_argument('-f', '--frame', nargs = '*', type = int, help = 'Starting and ending parameters for the frames analyzed')
parser.add_argument('-b', '--box', nargs = 4, type = int, help = 'Coordinates for the bottom left corner and top right corner of a rectangle of pixels to be analyzed from the data. In the structure x1, y1, x2, y2 (1 based numbers)')
parser.add_argument('-g', '--graphics', nargs = 1, type = int, default = 0, help = 'Controls whether to display or save graphics. 0: no graphics, 1: display graphics, 2: save graphics as .png')
args = vars(parser.parse_args())
if args['frame'] == None:
count = 0
start = None
end = None
step = 1
#while we have yet to find an end
while end == None:
filename = 'IMG%05d.FIT' % count
#if start has not been found yet, and this file exists
if start == None and os.path.isfile(filename):
start = count
#if start has been found and we finally found a file that doesn't exist, set end to the last file that existed (count - 1.FIT)
elif start != None and not os.path.isfile(filename):
end = count - 1
count += 1
elif len(args['frame']) >= 2 and len(args['frame']) <= 3:
start = args['frame'][0] # starting frame (IMGnnnnn.FIT)
end = args['frame'][1] # ending frame
if len(args['frame']) == 3:
step = args['frame']
else:
step = 1
else:
raise Exception,"-f needs 0, 2, or 3 arguments."
box = args['box'] # BLC and TRC
if box == None:
box = []
dt.tag("start")
# compute the average and dispersion of the series
h1,sum1,sum2,cube = dsum(start,end,step,box=box) # end can be uninitialized here might throw an error?
dt.tag("dsum")
nz = cube.shape[0]
# delta X and Y images
dsumy = sum1 - np.roll(sum1, 1, axis = 0) # change in the y axis
dsumx = sum1 - np.roll(sum1, 1, axis = 1) # change in the x axis
# write them to FITS
fits.writeto('dsumx.fits', dsumx, h1, clobber=True)
fits.writeto('dsumy.fits', dsumy, h1, clobber=True)
fits.writeto('sum1.fits', sum1, h1, clobber=True)
fits.writeto('sum2.fits', sum2, h1, clobber=True)
dt.tag("write2d")
# 3D cube to
h1['NAXIS'] = 3
h1['NAXIS3'] = nz
fits.writeto('cube.fits', cube, h1, clobber=True)
dt.tag("write3d")
if args['graphics'][0] == 1:
# plot the sum1 and sum2 correllation (glueviz should do this)
s1 = sum1.flatten()
s2 = sum2.flatten()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(s1,s2)
plt.show()
show2(sum1)
show2(sum2)
if args['graphics'][0] == 2:
imsave('sum1.png', sum1)
imsave('sum2.png', sum2)
dt.tag("done")
dt.end()
| mit | 8,728,276,588,416,666,000 | 31.734234 | 236 | 0.551534 | false |
Daeinar/dfa-aes | simulator/inject.py | 1 | 1300 | #!/usr/bin/env python
#
# Licensed by "The MIT License". See file LICENSE.
#
# Script to simulate fault injections on AES-128.
# Prints correct and corresponding faulty ciphertext.
#
# Usage: ./inject nr_of_example fault_location
#
# fault_location must be in {0,...,15}.
#
import sys
from aes import *
# (plaintext,key,fault_value)
vectors = [
(0x00000000000000000000000000000000,0x00000000000000000000000000000000,0x01), # 0
(0xffffffffffffffffffffffffffffffff,0xffffffffffffffffffffffffffffffff,0xff), # 1
(0x1234567890abcdef1234567890abcdef,0x1234567890abcdef1234567890abcdef,0x02), # 2
(0xfedcba0987654321fedcba0987654321,0xfedcba0987654321fedcba0987654321,0x02), # 3
(0x636f64656d696c6573636f64656d696c,0x6a6d736a75657475736a6d7374756574,0x02), # 4
(0xbaddecafbaddecafbaddecafbaddecaf,0xbaddecafbaddecafbaddecafbaddecaf,0xba), # 5
(0xdeadc0dedeadc0dedeadc0dedeadc0de,0xdeadc0dedeadc0dedeadc0dedeadc0de,0xde), # 6
(0x1234567890abcdef1234567890abcdef,0x10000005200000063000000740000008,0x02)] # 7
if __name__ == '__main__':
assert int(sys.argv[1]) < len(vectors)
assert int(sys.argv[1]) >= 0
v = vectors[int(sys.argv[1])]
p = v[0]
key = v[1]
c = encrypt(p,key)
ct = encrypt(p,key,fault=v[2],floc=int(sys.argv[2]))
print "{:032x} {:032x}".format(c,ct)
| mit | -8,040,455,792,532,726,000 | 34.135135 | 84 | 0.754615 | false |
wooga/airflow | airflow/providers/google/cloud/operators/natural_language.py | 1 | 10959 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Cloud Language operators.
"""
from typing import Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.language_v1 import enums
from google.cloud.language_v1.types import Document
from google.protobuf.json_format import MessageToDict
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.natural_language import CloudNaturalLanguageHook
MetaData = Sequence[Tuple[str, str]]
class CloudNaturalLanguageAnalyzeEntitiesOperator(BaseOperator):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeEntitiesOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.enums.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START natural_language_analyze_entities_template_fields]
template_fields = ("document", "gcp_conn_id")
# [END natural_language_analyze_entities_template_fields]
def __init__(
self,
document: Union[dict, Document],
encoding_type: Optional[enums.EncodingType] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[MetaData] = None,
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudNaturalLanguageHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Start analyzing entities")
response = hook.analyze_entities(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished analyzing entities")
return MessageToDict(response)
class CloudNaturalLanguageAnalyzeEntitySentimentOperator(BaseOperator):
"""
Finds entities, similar to AnalyzeEntities in the text and analyzes sentiment associated with each
entity and its mentions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeEntitySentimentOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.enums.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
# [START natural_language_analyze_entity_sentiment_template_fields]
template_fields = ("document", "gcp_conn_id")
# [END natural_language_analyze_entity_sentiment_template_fields]
def __init__(
self,
document: Union[dict, Document],
encoding_type: Optional[enums.EncodingType] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[MetaData] = None,
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudNaturalLanguageHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Start entity sentiment analyze")
response = hook.analyze_entity_sentiment(
document=self.document,
encoding_type=self.encoding_type,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Finished entity sentiment analyze")
return MessageToDict(response)
class CloudNaturalLanguageAnalyzeSentimentOperator(BaseOperator):
"""
Analyzes the sentiment of the provided text.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageAnalyzeSentimentOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.enums.EncodingType
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse
"""
# [START natural_language_analyze_sentiment_template_fields]
template_fields = ("document", "gcp_conn_id")
# [END natural_language_analyze_sentiment_template_fields]
def __init__(
self,
document: Union[dict, Document],
encoding_type: Optional[enums.EncodingType] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[MetaData] = None,
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.encoding_type = encoding_type
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudNaturalLanguageHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Start sentiment analyze")
response = hook.analyze_sentiment(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished sentiment analyze")
return MessageToDict(response)
class CloudNaturalLanguageClassifyTextOperator(BaseOperator):
"""
Classifies a document into categories.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudNaturalLanguageClassifyTextOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param retry: A retry object used to retry requests. If None is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
retry is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: str
"""
# [START natural_language_classify_text_template_fields]
template_fields = ("document", "gcp_conn_id")
# [END natural_language_classify_text_template_fields]
def __init__(
self,
document: Union[dict, Document],
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[MetaData] = None,
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.document = document
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudNaturalLanguageHook(gcp_conn_id=self.gcp_conn_id)
self.log.info("Start text classify")
response = hook.classify_text(
document=self.document, retry=self.retry, timeout=self.timeout, metadata=self.metadata
)
self.log.info("Finished text classify")
return MessageToDict(response)
| apache-2.0 | 3,930,899,505,662,823,400 | 40.044944 | 102 | 0.686559 | false |
goinnn/django-multiselectfield | example/app/urls.py | 1 | 1723 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 by Pablo Martín <[email protected]>
#
# This software is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from django import VERSION
try:
from django.conf.urls import url
# Compatibility for Django > 1.8
def patterns(prefix, *args):
if VERSION < (1, 9):
from django.conf.urls import patterns as django_patterns
return django_patterns(prefix, *args)
elif prefix != '':
raise NotImplementedError("You need to update your URLConf for "
"Django 1.10, or tweak it to remove the "
"prefix parameter")
else:
return list(args)
except ImportError: # Django < 1.4
if VERSION < (4, 0):
from django.conf.urls.defaults import patterns, url
else:
from django.urls import re_path as url
from .views import app_index
if VERSION < (1, 11):
urlpatterns = patterns(
'',
url(r'^$', app_index, name='app_index'),
)
else:
urlpatterns = [
url(r'^$', app_index, name='app_index'),
]
| lgpl-3.0 | 1,116,952,307,818,030,800 | 34.875 | 79 | 0.639373 | false |
bunnylin/supersakura | translate.py | 1 | 4235 | #!/usr/bin/python
# CC0, 2017 :: Kirinn Bunnylin / Mooncore
# https://creativecommons.org/publicdomain/zero/1.0/
import sys, re, time, subprocess
#from subprocess import check_output
if len(sys.argv) < 2:
print("Usage: python translate.py inputfile.tsv >outputfile.tsv")
print("The input file should be a tsv. The leftmost column is preserved in")
print("the output as unique string IDs, and the rightmost column is taken")
print("as the text to be translated.")
print("The translated output is printed in stdout in tsv format. You should")
print('pipe it into a suitable file, for example "outputfile.tsv".')
sys.exit(1)
def GetTranslation(com):
# Handy retry loop with a timeout for easily invoking translate-shell.
tries = 8
while tries != 0:
tries -= 1
try:
transres = subprocess.check_output(com, timeout = 16)
transres = transres.decode(sys.stdout.encoding).split("\n")
except:
transres = [""]
if len(transres) != 0: tries = 0
else: time.sleep(16)
return transres
# Read the constant substitutions list into memory. The file trans-subs.txt
# should contain one substitution per line, in the form "source/new text".
# Lines starting with a # are treated as comments.
sublist = []
with open("trans-subs.txt") as subfile:
for line in subfile:
if line[0] != "#":
line = line.rstrip()
if line != "":
splitline = line.split("/")
sublist.append({"from": splitline[0], "to": splitline[-1]})
# Print the output header.
print("String IDs\tOriginal\tPhonetic\tGoogle\tBing\tYandex")
sys.stdout.flush()
with open(sys.argv[1]) as infile:
for line in infile:
delaytime = time.time() + 1.024
# If this line has no tabs, the line as a whole is used as translatable
# input. Otherwise everything before the first tab is saved as the
# string ID, and everything after the last tab is used as the
# translatable input.
stringid = ""
splitline = line.rstrip().split("\t")
if len(splitline) > 1:
stringid = splitline[0]
line = splitline[-1]
# Output the string ID and translatable input.
linepart = stringid + "\t" + line + "\t"
sys.stdout.buffer.write(linepart.encode("utf-8"))
# Apply pre-translation substitutions.
for subitem in sublist:
line = line.replace(subitem["from"], subitem["to"])
# Replace backslashes with a double backslash. At least Bing sometimes
# drops backslashes if not doubled.
line = line.replace("\\", "\\\\")
# Google translate, wrapped in a retry loop.
transgoo = GetTranslation(["translate-shell","ja:en","-e","google",
"-no-ansi","-no-autocorrect","-show-alternatives","n",
"-show-languages","n","-show-prompt-message","n","--",line])
# transgoo is now expected to have the original on line 1, the phonetic
# on line 2 in brackets, and the translation on line 4.
trans0 = transgoo[1][1:-1]
trans1 = transgoo[3]
# Get the other translations.
trans2 = GetTranslation(["translate-shell","-b","ja:en","-e","bing",
"-no-ansi","--",line])[0]
trans3 = GetTranslation(["translate-shell","-b","ja:en","-e","yandex",
"-no-ansi","--",line])[0]
# A brief wait between requests is polite to the translation servers.
delaylen = delaytime - time.time()
if delaylen > 0: time.sleep(delaylen)
# Pack the translated strings in a single variable for post-processing.
# Delimit with tab characters.
transall = trans0 + "\t" + trans1 + "\t" + trans2 + "\t" + trans3 + "\n"
# If the output contains ": ", but the input doesn't, then the space was
# added unnecessarily and should be removed.
if transall.find(": ") != -1 and line.find(": ") == -1:
transall = transall.replace(": ", ":")
# The translators tend to add spaces after some backslashes, remove.
transall = transall.replace("\\ ", "\\")
# Change double-backslashes back to normal.
transall = transall.replace("\\\\", "\\")
# Some translators also add spaces after dollars, remove them.
transall = transall.replace("\\$ ", "\\$")
# Output the translated, processed strings.
sys.stdout.buffer.write(transall.encode("utf-8"))
sys.stdout.flush()
# end.
| gpl-3.0 | 9,078,087,450,414,523,000 | 35.508621 | 79 | 0.654782 | false |
marcusmchale/breedcafs | app/cypher.py | 1 | 76573 | class Cypher:
def __init__(self):
pass
# user procedures
allowed_emails = (
' MATCH '
' (e: Emails) '
' RETURN '
' e.allowed '
)
user_allowed_emails = (
' MATCH '
' (u:User) '
' WITH '
' COLLECT (DISTINCT u.email) as registered_emails '
' MATCH '
' (user:User {'
' username_lower : toLower(trim($username)) '
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(e: Emails) '
' RETURN '
' FILTER (n in e.allowed WHERE NOT n in registered_emails) as user_allowed '
)
email_find = (
' MATCH '
' (user: User { '
' email: toLower(trim($email)) '
' }) '
' RETURN '
' user '
)
confirm_email = (
' MATCH '
' (user: User { '
' email: toLower(trim($email)) '
' }) '
' SET '
' user.confirmed = true '
)
user_find = (
' MATCH '
' (user: User) '
' WHERE '
' user.username_lower = toLower($username) '
' OR '
' user.email = toLower(trim($email)) '
' RETURN '
' user '
)
username_find = (
' MATCH '
' (user: User { '
' username_lower: toLower($username)'
' }) '
' RETURN '
' user '
)
user_affiliations = (
' MATCH '
' (u: User { '
' username_lower: toLower($username) '
' }) '
' -[a: AFFILIATED]->(p: Partner) '
' OPTIONAL MATCH '
' (p)<-[: AFFILIATED {admin: true}]-(admin: User) '
' RETURN '
' p.name , '
' p.fullname , '
' a.confirmed as confirmed, '
' a.data_shared as data_shared , '
' admin.email as admin_email'
)
add_affiliations = (
' UNWIND '
' $partners as partner '
' MATCH '
' (u:User { '
' username_lower: toLower(trim($username)) '
' }), '
' (p:Partner { '
' name_lower: toLower(trim(partner)) '
' }) '
' MERGE '
' (u)-[a: AFFILIATED { '
' data_shared: false, '
' admin: false, '
' confirm_timestamp: [], '
' confirmed: false '
' }]->(p) '
' ON CREATE SET '
' a.add_timestamp = datetime.transaction().epochMillis '
' RETURN '
' p.name '
)
remove_affiliations = (
' UNWIND '
' $partners as partner '
' MATCH '
' (u:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[a:AFFILIATED { '
' data_shared: false '
' }]->(p: Partner {'
' name_lower: toLower(trim(partner)) '
' }) '
' WHERE '
' size(a.confirm_timestamp = 0 '
' DELETE '
' a '
' RETURN p.name '
)
password_reset = (
' MATCH '
' (user: User { '
' email : toLower(trim($email)) '
' }) '
' SET user.password = $password '
)
user_register = (
# This is a little cautious using merge to prevent overwriting a user profile if it is called in error
' MATCH '
' (partner:Partner {'
' name_lower: toLower(trim($partner)) '
' }) '
' MERGE '
' (user:User { '
' username_lower: toLower(trim($username)) '
' }) '
' ON CREATE SET '
' user.username = trim($username), '
' user.password = $password, '
' user.email = toLower(trim($email)), '
' user.name = $name, '
' user.time = datetime.transaction().epochMillis, '
' user.access = ["user"], '
' user.confirmed = false, '
' user.found = false '
' ON MATCH SET '
' user.found = TRUE '
' WITH '
' user, partner '
' WHERE '
' user.found = false '
' CREATE '
' (user)-[r: AFFILIATED { '
' data_shared: true, '
' confirmed: false, '
' confirm_timestamp: [], '
' admin: false '
' }]->(partner), '
' (user)-[: SUBMITTED]->(sub: Submissions), '
' (sub)-[: SUBMITTED]->(: Emails {allowed :[]}),'
' (sub)-[: SUBMITTED]->(locations: Locations), '
' (locations)-[: SUBMITTED]->(: Countries), '
' (locations)-[: SUBMITTED]->(: Regions), '
' (locations)-[: SUBMITTED]->(: Farms), '
' (sub)-[:SUBMITTED]->(items: Items), '
' (items)-[: SUBMITTED]->(: Fields), '
' (items)-[: SUBMITTED]->(: Blocks), '
' (items)-[: SUBMITTED]->(: Trees), '
' (items)-[: SUBMITTED]->(: Samples), '
' (sub)-[:SUBMITTED]->(: Records) '
)
add_allowed_email = (
' MATCH '
' (all: Emails) '
' WITH '
' all.allowed as allowed_emails '
' UNWIND '
' allowed_emails as email '
' WITH '
' COLLECT(DISTINCT email) as set '
' WHERE '
' NOT toLower(trim($email)) IN set '
' MATCH '
' (:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[:SUBMITTED]->(: Submissions) '
' -[:SUBMITTED]->(e: Emails) '
' SET e.allowed = e.allowed + [toLower(trim($email))] '
' RETURN toLower(trim($email)) '
)
remove_allowed_email = (
' MATCH '
' (:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[:SUBMITTED]->(: Submissions) '
' -[:SUBMITTED]->(e: Emails) '
' WITH e, extract(x in $email | toLower(trim(x))) as emails'
' SET e.allowed = FILTER (n in e.allowed WHERE NOT n IN emails) '
' RETURN emails '
)
user_del = (
' MATCH '
' (u:User { '
' email: toLower(trim($email)), '
' confirmed: false '
' }) '
' OPTIONAL MATCH '
' (u)-[:SUBMITTED*..3]->(n) '
' DETACH DELETE '
' u,n '
)
partner_admin_users = (
' MATCH '
' (:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[: AFFILIATED { '
' admin: true '
' }]->(p:Partner) '
' WITH p '
' MATCH '
' (p)<-[a:AFFILIATED]-(u:User) '
' RETURN { '
' Username: u.username, '
' Email: u.email, '
' Name: u.name, '
' Partner: p.name, '
' PartnerFullName: p.fullname, '
' Confirmed: a.confirmed '
' } '
)
global_admin_users = (
' MATCH '
' (u:User)-[a:AFFILIATED]->(p:Partner) '
' RETURN { '
' Username : u.username, '
' Email : u.email, '
' Name : u.name, '
' Partner : p.name, '
' PartnerFullName : p.fullname, '
' Confirmed : a.confirmed '
' } '
)
# these functions toggle the confirmed status so do both confirm/un-confirm operations
partner_confirm_users = (
' MATCH '
' (user:User { '
' username_lower: toLower(trim($username)) '
' }) '
' -[:AFFILIATED {admin : true}]->(p:Partner) '
' WHERE '
' "partner_admin" in user.access'
' MATCH '
' (p)<-[a:AFFILIATED]-(u:User) '
' UNWIND '
' $confirm_list as confirm '
' WITH '
' p,a,u '
' WHERE '
' p.name_lower = toLower(trim(confirm["partner"])) '
' AND '
' u.username_lower = toLower(trim(confirm["username"])) '
' SET '
' a.confirmed = NOT a.confirmed, '
' a.confirm_timestamp = a.confirm_timestamp + datetime.transaction().epochMillis '
' RETURN u.name '
)
global_confirm_users = (
' MATCH '
' (p:Partner)<-[a:AFFILIATED]-(u:User) '
' UNWIND '
' $confirm_list as confirm '
' WITH '
' p,a,u '
' WHERE '
' p.name_lower = toLower(trim(confirm["partner"])) '
' AND '
' u.username_lower = toLower(trim(confirm["username"])) '
' SET '
' a.confirmed = NOT a.confirmed, '
' a.confirm_timestamp = a.confirm_timestamp + datetime.transaction().epochMillis '
' RETURN u.name '
)
partner_admins = (
' MATCH '
' (u:User)-[a:AFFILIATED]->(p:Partner) '
' RETURN { '
' Username : u.username, '
' Email : u.email, '
' Name : u.name, '
' Partner : p.name, '
' PartnerFullName : p.fullname, '
' Confirmed : a.admin '
' } '
)
confirm_admins = (
' MATCH '
' (p:Partner)<-[a:AFFILIATED]-(u:User) '
' UNWIND $admins as admin '
' WITH '
' p,a,u '
' WHERE '
' p.name_lower = toLower(trim(admin["partner"])) '
' AND '
' u.username_lower = toLower(trim(admin["username"])) '
' SET '
' a.admin = NOT a.admin '
' WITH u '
' MATCH (u)-[a:AFFILIATED]->(:Partner) '
' WITH u, collect(a.admin) as admin_rights '
' set u.access = CASE '
' WHEN true IN admin_rights '
' THEN ["user","partner_admin"] '
' ELSE ["user"] '
' END '
' RETURN '
' u.name '
)
# Upload procedures
upload_check_value = (
# make sure that all the entries match accepted entries
# handles empty items and white space
# forces strings to lower case and float/integer types
# removes % symbols
# ! ensure to declare input (as node) and value (from file) before including
' CASE '
' WHEN input.format = "multicat" '
' THEN CASE '
' WHEN size(FILTER (n in split(value, ":") WHERE size(n) > 0)) '
' = size(FILTER (n in split(value, ":") WHERE toLower(trim(n)) in '
' EXTRACT(item in input.category_list | toLower(item)))) '
' THEN trim(value) '
' ELSE Null '
' END '
' WHEN input.format = "categorical" '
' THEN [category IN input.category_list WHERE toLower(category) = toLower(trim(value)) | category][0] '
' WHEN input.format = "text" '
' THEN CASE '
' WHEN input.name_lower IN [ '
' "assign field sample to sample(s) by id", '
' "assign field sample to tree(s) by id", '
' "assign field sample to block(s) by id" '
' ] THEN CASE '
' WHEN size(split(value, "," )) = size( '
' filter(x in split(value, ",") WHERE '
' toInteger(trim(x)) IS NOT NULL '
' OR ( '
' size(split(x, "-")) = 2'
' AND toInteger(split(x, "-")[0]) IS NOT NULL '
' AND toInteger(split(x, "-")[1]) IS NOT NULL'
' ) '
' ) '
' ) '
' THEN value '
' ELSE Null '
' END '
' WHEN input.name_lower IN [ '
' "assign field sample to block by name", '
' "assign tree to block by name" '
' ] '
' THEN trim(value) '
' WHEN input.name contains "time" '
' THEN CASE '
' WHEN size(split(value, ":")) = 2 '
' AND size(split(value, ":")[0]) <= 2 '
' AND toInteger(trim(split(value, ":")[0])) <=24 '
' AND toInteger(trim(split(value, ":")[0])) >= 0 '
' AND size(split(value, ":")[1]) <= 2 '
' AND toInteger(trim(split(value, ":")[1])) < 60 '
' AND toInteger(trim(split(value, ":")[1])) >= 0 '
' THEN trim(value) '
' ELSE Null '
' END '
' ELSE '
' toString(value) '
' END '
' WHEN input.format = "percent" '
' THEN CASE '
' WHEN toFloat(replace(value, "%", "")) IS NOT NULL '
' THEN toFloat(replace(value, "%", "")) '
' ELSE Null '
' END '
' WHEN input.format = "counter" '
' THEN CASE '
' WHEN toInteger(value) IS NOT NULL '
' THEN toInteger(value) '
' ELSE '
' Null '
' END '
' WHEN input.format = "numeric" '
' THEN CASE '
' WHEN toFloat(value) IS NOT NULL '
' THEN toFloat(value) '
' ELSE Null '
' END '
' WHEN input.format = "boolean" '
' THEN CASE '
' WHEN toLower(value) in ["yes","y"] '
' THEN True '
' WHEN toLower(value) in ["no","n"] '
' THEN False '
' WHEN toBoolean(value) IS NOT NULL '
' THEN toBoolean(value) '
' ELSE Null '
' END '
' WHEN input.format = "location" '
' THEN CASE '
' WHEN size(split(value, ";")) = 2 '
' AND toFloat(trim(split(value, ";")[0])) IS NOT NULL '
' AND toFloat(trim(split(value, ";")[1])) IS NOT NULL '
' THEN trim(value) '
' ELSE Null '
' END '
' WHEN input.format = "date" '
' THEN CASE '
' WHEN size(split(value, "-")) = 3 '
' AND size(trim(split(value, "-")[0])) = 4 '
' AND size(trim(split(value, "-")[1])) <= 2 '
' AND size(trim(split(value, "-")[1])) >= 1 '
' AND toInteger(trim(split(value, "-")[1])) >= 1 '
' AND toInteger(trim(split(value, "-")[1])) <= 12 '
' AND size(trim(split(value, "-")[2])) <= 2 '
' AND size(trim(split(value, "-")[2])) >= 1 '
' AND toInteger(trim(split(value, "-")[2])) >= 1 '
' AND toInteger(trim(split(value, "-")[2])) <= 31 '
' THEN '
' trim(value) '
' ELSE '
' Null '
' END '
' ELSE Null '
' END '
)
upload_fb_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 1) as replicate, '
' toLower(trim(csvLine.trait)) as input_name, '
' trim(csvLine.value) as value, '
' apoc.date.parse(csvLine.timestamp, "ms", "yyyy-MM-dd HH:mm:sszzz") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' WHERE trim(csvLine.value) <> "" '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' OPTIONAL MATCH '
' (:RecordType { '
' name_lower: "trait" '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' input, '
' input_name, '
' item, replicate, '
' time, '
' value '
' OPTIONAL MATCH '
' (item)'
' <-[:FOR_ITEM]-(if: ItemInput)'
' -[:FOR_INPUT*..2]->(input), '
' (if)'
' <-[:RECORD_FOR]-(r: Record { '
' replicate: replicate, '
' time: time '
' }) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED {data_shared: true}]->(p:Partner) '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' time, '
+ upload_check_value +
' AS value, '
' CASE '
' WHEN a.confirmed '
' THEN r.value '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_value, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' value IS NULL '
' OR '
' a.confirmed <> True '
' OR'
' r.value <> value '
' ) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' value, '
' COLLECT(DISTINCT({ '
' existing_value: toString(r_value), '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: input_name, '
' UID: item.uid, '
' Replicate: replicate, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: value, '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_fb = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' trim(csvLine.timestamp) as text_time, '
' trim(csvLine.person) as person, '
' trim(csvLine.location) as location, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 1) as replicate, '
' toLower(trim(csvLine.trait)) as input_name, '
' trim(csvLine.value) as value, '
' apoc.date.parse(csvLine.timestamp, "ms", "yyyy-MM-dd HH:mm:sszzz") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' WHERE trim(csvLine.value) <> "" '
# And identify the fields and inputs assessed
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }), '
' (:RecordType {'
' name_lower: "trait" '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(item_level: ItemLevel { '
' name_lower: level '
' }) '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ON CREATE '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' location, '
' time, '
' replicate, '
' text_time, '
+ upload_check_value +
' AS value '
' WHERE value IS NOT NULL '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput)'
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record { '
' time : time, '
' replicate: replicate '
' }) '
' -[:RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.location = location, '
' r.value = CASE '
' WHEN input.format <> "multicat" THEN value '
' ELSE extract(i in FILTER (n in split(value, ":") WHERE size(n) > 0 )| toLower(trim(i)))'
' END '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' value, '
' r '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN r.value '
' WHEN access IS NOT NULL '
' THEN r.value '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: value, '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' Replicate: r.replicate, '
' Time: r.time, '
' UID: item.uid, '
' `Input variable`: input.name, '
' Partner: partner.name '
' } '
' ORDER BY input.name_lower, field.uid, item.id, r.replicate '
)
upload_table_property_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toUpper(csvLine.uid) '
' END as uid, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' OPTIONAL MATCH '
' (:RecordType {'
' name_lower: "property" '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' input_name, '
' item, '
' input, '
' csvLine[input_name] as value '
' WHERE trim(csvLine[input_name]) <> ""'
' OPTIONAL MATCH '
' (item)'
' <-[:FOR_ITEM]-(if: ItemInput)'
' -[:FOR_INPUT*..2]->(input), '
' (if)'
' <-[:RECORD_FOR]-(r: Record) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED {data_shared: true}]->(p:Partner) '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' input_name, '
' item, '
' input, '
+ upload_check_value +
' AS value, '
' CASE '
' WHEN a.confirmed '
' THEN r.value '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_value, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' value IS NULL '
' ) OR ( '
' a.confirmed <> True '
' OR'
' r.value <> value '
' ) '
' WITH '
' row_index, '
' item, '
' input, '
' input_name, '
' value, '
' COLLECT(DISTINCT({ '
' existing_value: toString(r_value), '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: input_name, '
' UID: item.uid, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: value, '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_table_trait_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.date, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.date, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.time, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.time, " ", "") '
' ELSE '
' "12:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' OPTIONAL MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' time, '
' csvLine[input_name] as value '
' WHERE trim(csvLine[input_name]) <> "" '
' OPTIONAL MATCH '
' (item)'
' <-[:FOR_ITEM]-(if: ItemInput)'
' -[:FOR_INPUT*..2]->(input), '
' (if)'
' <-[:RECORD_FOR]-(r: Record { '
' replicate: replicate, '
' time: time '
' }) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED {data_shared: true}]->(p:Partner) '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' time, '
+ upload_check_value +
' AS value, '
' CASE '
' WHEN a.confirmed '
' THEN r.value '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_value, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' value IS NULL '
' OR '
' a.confirmed <> True '
' OR'
' r.value <> value '
' ) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' value, '
' COLLECT(DISTINCT({ '
' existing_value: toString(r_value), '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: input_name, '
' UID: item.uid, '
' Replicate: replicate, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: value, '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_table_condition_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level, '
# start time from start date and start time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.`start date`, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.`start date`, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.`start time`, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.`start time`, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.`start time`, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.`start time`, " ", "") '
' ELSE '
' "00:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as start, '
# end time from end date and end time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.`end date`, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.`end date`, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.`end time`, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.`end time`, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.`end time`, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.`end time`, " ", "") '
' ELSE '
' "24:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as end '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' OPTIONAL MATCH '
' (:RecordType { '
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' start, end, '
' csvLine[input_name] as value '
' WHERE trim(csvLine[input_name]) <> "" '
' OPTIONAL MATCH '
' (item) '
' <-[:FOR_ITEM]-(if: ItemInput) '
' -[:FOR_INPUT*..2]->(input), '
' (if) '
' <-[:RECORD_FOR]-(r: Record { '
' replicate: replicate '
' }) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED { '
' data_shared: true'
' }]->(p:Partner) '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' start, end, '
+ upload_check_value +
' AS value, '
' CASE WHEN r.start <> False THEN r.start ELSE Null END AS r_start, '
' CASE WHEN r.end <> False THEN r.end ELSE Null END AS r_end, '
' CASE '
' WHEN a.confirmed '
' THEN r.value '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_value, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' value IS NULL '
' ) OR ( '
# condition conflicts
' ( '
' a.confirmed <> True '
' OR '
' r.value <> value '
' ) AND ( '
# handle fully bound records
# - any overlapping records
' ( '
' r_start < end '
' AND '
' r_end > start '
' ) OR ( '
# - a record that has a lower bound in the bound period
' r_start >= start '
' AND '
' r_start < end '
' ) OR ( '
# - a record that has an upper bound in the bound period
' r_end > start '
' AND '
' r_end <= end '
' ) OR ( '
# now handle lower bound only records
' end IS NULL '
' AND ( '
' ( '
# - existing bound period includes start
' r_end > start '
' AND '
' r_start <= start '
# - record with same lower bound
' ) OR ( '
' r_start = start '
# - record with upper bound only greater than this lower bound
' ) OR ( '
' r_start IS NULL '
' AND '
' r_end > start '
' )'
' ) '
' ) OR ( '
# now handle upper bound only records
' start IS NULL '
' AND ( '
' ( '
# - existing bound period includes end
' r_end >= end '
' AND '
' r_start < end '
# - record with same upper bound
' ) OR ( '
' r_end = end '
# - record with lower bound only less than this upper bound
' ) OR ( '
' r_end IS NULL '
' AND '
' r_start < end '
' ) '
' )'
' ) OR ( '
# always conflict with unbound records
' r_end IS NULL '
' AND '
' r_start IS NULL '
' ) '
' ) '
' ) '
' WITH '
' row_index, '
' input_name, '
' item, replicate, '
' input, '
' value, '
' COLLECT(DISTINCT({ '
' start: r_start, '
' end: r_end, '
' existing_value: toString(r_value), '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: input_name, '
' UID: item.uid, '
' Replicate: replicate, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: value, '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_table_curve_check = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' ['
' key in keys(csvLine) WHERE toFloat(key) in $x_values AND toFloat(csvLine[key]) <> "" '
' | [toFloat(key), toFloat(csvLine[key])]'
' ] as x_y_list '
' UNWIND x_y_list as x_y '
' WITH '
' csvLine, '
' x_y '
' ORDER BY x_y '
' WITH '
' csvLine, '
' collect(x_y[0]) as x_values, '
' collect(x_y[1]) as y_values '
' WITH '
' csvLine, '
' x_values, '
' y_values, '
' toInteger(csvLine.row_index) as row_index, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.date, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.date, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.time, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.time, " ", "") '
' ELSE '
' "12:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' OPTIONAL MATCH '
' (item: Item { '
' uid: uid '
' }) '
' OPTIONAL MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower($input_name) '
' })-[:AT_LEVEL]->(:ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' row_index, '
' item, replicate, '
' input, '
' time, '
' x_values,'
' y_values '
' WHERE size(y_values) > 0 '
' OPTIONAL MATCH '
' (item)'
' <-[:FOR_ITEM]-(if: ItemInput)'
' -[:FOR_INPUT*..2]->(input), '
' (if)'
' <-[:RECORD_FOR]-(r: Record { '
' replicate: replicate, '
' time: time '
' }) '
' <-[s: SUBMITTED]-(: UserFieldInput) '
' <-[: SUBMITTED]-(: Records) '
' <-[: SUBMITTED]-(: Submissions) '
' <-[: SUBMITTED]-(u: User) '
' -[:AFFILIATED {data_shared: true}]->(p:Partner) '
' WHERE '
# compare r.y_values with y_values
# list relevant [x_value, y_value] pairs
' [i IN range(0, size(x_values) - 1) WHERE x_values[i] in r.x_values | [x_values[i], y_values[i]]] <> '
# list of relevant [r.x_value, r.y_value] pairs
' [i IN range(0, size(r.x_values) - 1) WHERE r.x_values[i] in x_values | [r.x_values[i], r.y_values[i]]] '
' OPTIONAL MATCH '
' (p)<-[a: AFFILIATED]-(: User {username_lower: toLower($username)}) '
' WITH '
' row_index, '
' item, replicate, '
' input, '
' time, '
' x_values, '
' y_values, '
' CASE '
' WHEN a.confirmed '
' THEN r.y_values '
' ELSE CASE '
' WHEN r IS NOT NULL '
' THEN "ACCESS DENIED" '
' ELSE null '
' END '
' END AS r_y_values, '
' r.x_values as r_x_values, '
' s.time AS `submitted at`, '
' CASE WHEN a.confirmed THEN u.name ELSE p.name END AS user, '
' a.confirmed AS access '
' WHERE '
' ( '
' item IS NULL '
' OR '
' input IS NULL '
' OR '
' a.confirmed <> True '
' OR '
' r.y_values <> y_values '
' ) '
' WITH '
' row_index, '
' item, replicate, '
' input, '
' x_values, '
' y_values, '
' COLLECT(DISTINCT({ '
' existing_value: [i in range(0, size(r_x_values) - 1) | [r_x_values[i], r_y_values[i]]], '
' `submitted at`: `submitted at`, '
' user: user, '
' access: access '
' })) as conflicts '
' RETURN { '
' row_index: row_index, '
' `Supplied input name`: $input_name, '
' UID: item.uid, '
' Replicate: replicate, '
' `Input variable`: input.name, '
' Format: input.format, '
' `Category list`: input.category_list, '
' Value: [i in range(0, size(x_values) - 1) | [x_values[i], y_values[i]]], '
' Conflicts: conflicts '
' } '
' ORDER BY row_index '
' LIMIT 50 '
)
upload_table_property = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' trim(csvLine.person) as person, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toUpper(csvLine.uid) '
' END as uid, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(item_level: ItemLevel { '
' name_lower: level '
' }) '
# Check for data in table
' WHERE trim(csvLine[input_name]) <> "" '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' csvLine[input_name] as value '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' person, '
+ upload_check_value +
' AS value '
' WHERE value IS NOT NULL '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput) '
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record) '
' -[: RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.value = CASE '
' WHEN input.format <> "multicat" THEN value '
' ELSE extract(i in FILTER (n in split(value, ":") WHERE size(n) > 0 )| toLower(trim(i)))'
' END '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' value, '
' r '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN r.value '
' WHEN access IS NOT NULL '
' THEN r.value '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: value, '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' UID: item.uid, '
' `Input variable`: input.name, '
' Partner: partner.name '
' } '
' ORDER BY input.name_lower, field.uid, item.id '
)
upload_table_trait = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' trim(csvLine.person) as person, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
# time from date and time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.date, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.date, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.time, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.time, " ", "") '
' ELSE '
' "12:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
# And identify the fields and input variables assessed
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(item_level: ItemLevel { '
' name_lower: level '
' }) '
# Check for data in table
' WHERE trim(csvLine[input_name]) <> "" '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' time, '
' replicate, '
' csvLine[input_name] as value, '
# to allow differentiation of defaulted time and set time
' csvLine.time as text_time '
# for trait data if no time is set then drop the row
' WHERE '
' time IS NOT NULL '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' time, '
' replicate, '
' text_time, '
+ upload_check_value +
' AS value '
' WHERE value IS NOT NULL '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput)'
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record { '
' time : time, '
' replicate: replicate '
' }) '
' -[:RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.text_time = text_time, '
' r.value = CASE '
' WHEN input.format <> "multicat" THEN value '
' ELSE extract(i in FILTER (n in split(value, ":") WHERE size(n) > 0 )| toLower(trim(i)))'
' END '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' value, '
' r '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN r.value '
' WHEN access IS NOT NULL '
' THEN r.value '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: value, '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' Replicate: r.replicate, '
' Time: r.time, '
' UID: item.uid, '
' `Input variable`: input.name, '
' Partner: partner.name '
' } '
' ORDER BY input.name_lower, field.uid, item.id, r.replicate '
)
upload_table_curve = (
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' [ '
' key in keys(csvLine) WHERE toFloat(key) in $x_values AND toFloat(csvLine[key]) <> "" '
' | [toFloat(key), toFloat(csvLine[key])]'
' ] as x_y_list '
' UNWIND x_y_list as x_y '
' WITH '
' csvLine, '
' x_y '
' ORDER BY x_y '
' WITH '
' csvLine, '
' collect(x_y[0]) as x_values, '
' collect(x_y[1]) as y_values '
' WITH '
' csvLine, '
' x_values, '
' y_values, '
' trim(csvLine.person) as person, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
# time from date and time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.date, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.date, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.date, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.date, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.time, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.time, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.time, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.time, " ", "") '
' ELSE '
' "12:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as time, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level '
# And identify the fields and input variable assessed
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }) '
' MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower($input_name) '
' })-[:AT_LEVEL]->(item_level: ItemLevel { '
' name_lower: level '
' }) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' time, '
' replicate, '
' x_values, '
' y_values, '
# to allow differentiation of defaulted time and set time
' csvLine.time as text_time '
# for trait data if no time is set then drop the row
' WHERE '
' time IS NOT NULL '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' time, '
' replicate, '
' text_time, '
' x_values, '
' y_values '
' WHERE size(y_values) > 0 '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput)'
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record { '
' time : time, '
' replicate: replicate, '
' x_values: x_values '
' }) '
' -[:RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.text_time = text_time, '
' r.y_values = y_values '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' x_values, '
' y_values, '
' r '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN [i in range(0, size(r.x_values) - 1) | [r.x_values[i], r.y_values[i]]] '
' WHEN access IS NOT NULL '
' THEN [i in range(0, size(r.x_values) - 1) | [r.x_values[i], r.y_values[i]]] '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: [i in range(0, size(x_values) - 1) | [x_values[i], y_values[i]]], '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' Replicate: r.replicate, '
' Time: r.time, '
' UID: item.uid, '
' Input: input.name, '
' Partner: partner.name '
' } '
' ORDER BY input.name_lower, field.uid, item.id, r.replicate '
)
upload_table_condition = (
# load in the csv
' LOAD CSV WITH HEADERS FROM $filename as csvLine '
' WITH '
' csvLine, '
' trim(csvLine.person) as person, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN toInteger(csvLine.uid) '
' ELSE '
' toInteger(split(csvLine.uid, "_")[0]) '
' END as field_uid, '
' CASE '
' WHEN size(split(split(csvLine.uid, ".")[0], "_")) = 1 '
' THEN toInteger(split(csvLine.uid, ".")[0]) '
' ELSE '
' toUpper(split(csvLine.uid, ".")[0]) '
' END as uid, '
' coalesce(toInteger(split(trim(toUpper(csvLine.uid)), ".")[1]), 0) as replicate, '
' CASE '
' WHEN size(split(csvLine.uid, "_")) = 1 '
' THEN "field" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "B" '
' THEN "block" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "T" '
' THEN "tree" '
' WHEN toUpper(left(split(csvLine.uid, "_")[1],1)) = "S" '
' THEN "sample" '
' END as level, '
# start time from start date and start time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.`start date`, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.`start date`, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.`start date`, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.`start date`, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.`start time`, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.`start time`, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.`start time`, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.`start time`, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.`start time`, " ", "") '
' ELSE '
' "00:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as start, '
# end time from end date and end time
' apoc.date.parse( '
' CASE '
' WHEN size(split(replace(csvLine.`end date`, " ", ""), "-")) = 3 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[0]) = 4 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[1]) <=2 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[1]) >=1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[1]) >= 1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[1]) <= 12 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[2]) <=2 '
' AND size(split(replace(csvLine.`end date`, " ", ""), "-")[2]) >=1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[2]) >= 1 '
' AND toInteger(split(replace(csvLine.`end date`, " ", ""), "-")[2]) <= 31 '
' THEN '
' replace(csvLine.`end date`, " ", "") '
' ELSE '
' Null '
' END '
' + " " + '
' CASE '
' WHEN size(split(replace(csvLine.`end time`, " ", ""), ":")) = 2 '
' AND size(split(replace(csvLine.`end time`, " ", ""), ":")[0]) <= 2 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[0]) <=24 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[0]) >= 0 '
' AND size(split(replace(csvLine.`end time`, " ", ""), ":")[1]) <= 2 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[1]) <=60 '
' AND toInteger(split(replace(csvLine.`end time`, " ", ""), ":")[1]) >=0 '
' THEN '
' replace(csvLine.`end time`, " ", "") '
' ELSE '
' "24:00" '
' END '
' , "ms", "yyyy-MM-dd HH:mm") as end '
# And identify the fields and inputs assessed
' MATCH '
' (field:Field { '
' uid: field_uid '
' }), '
' (item: Item { '
' uid: uid '
' }) '
' UNWIND $inputs as input_name '
' MATCH '
' (:RecordType {'
' name_lower: $record_type '
' }) '
' <-[:OF_TYPE]-(input: Input { '
' name_lower: toLower(input_name) '
' })-[:AT_LEVEL]->(item_level:ItemLevel { '
' name_lower: level '
' }) '
# Check for data in table
' WHERE trim(csvLine[input_name]) <> "" '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' start, end, '
' csvLine[input_name] as value, '
# to allow differentiation of defaulted time and set time
' csvLine.`start time` as text_start_time, '
' csvLine.`end time` as text_end_time '
' FOREACH (n in CASE '
' WHEN level = "field" '
' THEN [1] ELSE [] END | '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput :FieldInput)-[: FOR_INPUT]->(input) '
' ) '
' FOREACH (n in CASE '
' WHEN level in ["block", "tree", "sample"] '
' THEN [1] ELSE [] END | '
' MERGE '
' (field)<-[: FROM_FIELD]-(field_input: FieldInput)-[: FOR_INPUT]->(input) '
' MERGE '
' (item)<-[: FOR_ITEM]-(:ItemInput)-[: FOR_INPUT]->(field_input) '
' ) '
' WITH '
' field, '
' item, '
' input, '
' level, '
' person, '
' start, end, '
' text_start_time, text_end_time, '
+ upload_check_value +
' AS value '
' WHERE value IS NOT NULL '
# get the user submission tracking nodes
' MATCH '
' (:User { '
' username_lower : toLower(trim($username))'
' }) '
' -[: SUBMITTED]->(: Submissions) '
' -[: SUBMITTED]->(data_sub: Records) '
# and the item/input node
' MATCH '
' (item) '
' <-[: FOR_ITEM]-(item_input: ItemInput)'
' -[ :FOR_INPUT*..2]->(input) '
# and the field/input node
# todo consider the model here, this is an undirected match with two labels, not super happy with this one,
# todo would it be better to have a redundant ItemInput node for fields?
' MATCH (item) '
' -[:FOR_ITEM | FOR_INPUT*..2]-(field_input: FieldInput) '
' -[:FOR_INPUT]->(input) '
' MERGE '
' (r: Record { '
' start : CASE WHEN start IS NOT NULL THEN start ELSE False END, '
' end : CASE WHEN end IS NOT NULL THEN end ELSE False END '
' }) '
' -[:RECORD_FOR]->(item_input) '
' ON MATCH SET '
' r.found = True '
' ON CREATE SET '
' r.found = False, '
' r.person = person, '
' r.text_start_time = text_start_time, '
' r.text_end_time = text_end_time, '
' r.value = CASE '
' WHEN input.format <> "multicat" THEN value '
' ELSE extract(i in FILTER (n in split(value, ":") WHERE size(n) > 0 )| toLower(trim(i)))'
' END '
# additional statements to occur when new data point
' FOREACH (n IN CASE '
' WHEN r.found = False '
' THEN [1] ELSE [] END | '
# track user submissions through /User/FieldInput container
' MERGE '
' (data_sub)'
' -[:SUBMITTED]->(uff:UserFieldInput) '
' -[:CONTRIBUTED]->(field_input) '
# then finally the data with a timestamp
' MERGE '
' (uff)-[s1:SUBMITTED]->(r) '
' ON CREATE SET '
' s1.time = datetime.transaction().epochMillis '
' ) '
' WITH '
' field, '
' item, '
' input, '
' item_input, '
' value, '
' r, '
' start, end '
' MATCH '
' (partner:Partner) '
' <-[:AFFILIATED {data_shared: True}]-(user:User) '
' -[:SUBMITTED]->(:Submissions) '
' -[:SUBMITTED]->(:Records) '
' -[:SUBMITTED]->(:UserFieldInput) '
' -[submitted:SUBMITTED]->(r) '
# need to check for permissions for values that didn't merge to provide filtered feedback
# and optionally roll back if existing records overlap without access confirmed.
' OPTIONAL MATCH '
' (partner)<-[access: AFFILIATED {confirmed: True}]-(:User {username_lower:toLower(trim($username))}) '
# check again for conflicts - in case there have been concurrent submissions
# or there are conflicts within the uploaded table
' OPTIONAL MATCH '
' (r)'
' -[:RECORD_FOR]->(item_input) '
' <-[:RECORD_FOR]-(rr:Record) '
' <-[rr_sub:SUBMITTED]-(:UserFieldInput) '
' <-[:SUBMITTED]-(:Records) '
' <-[:SUBMITTED]-(:Submissions) '
' <-[:SUBMITTED]-(rr_user:User) '
' -[:AFFILIATED {data_shared: True}]->(rr_partner:Partner) '
' WHERE '
' ( '
# handle fully bound records
# - any overlapping records
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END < end '
' AND '
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END > start '
' ) OR ( '
# - a record that has a lower bound in the bound period
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END >= start '
' AND '
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END < end '
' ) OR ( '
# - a record that has an upper bound in the bound period
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END > start '
' AND '
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END <= end '
' ) OR ( '
# now handle lower bound only records
' end IS NULL '
' AND ( '
# - existing bound period includes start
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END > start '
' AND '
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END <= start '
# - record with same lower bound
' ) OR ( '
' rr.start = start '
# - record with upper bound only greater than this lower bound
' ) OR ( '
' rr.start = False '
' AND '
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END > start '
' ) '
' ) OR ( '
# now handle upper bound only records
' start IS NULL '
' AND ( '
# - existing bound period includes end
' CASE WHEN rr.end <> False THEN rr.end ELSE Null END >= end '
' AND '
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END < end '
# - record with same upper bound
' ) OR ( '
' rr.end = end '
# - record with lower bound only less than this upper bound
' ) OR ( '
' rr.end = False '
' AND '
' CASE WHEN rr.start <> False THEN rr.start ELSE Null END < end '
' ) '
' ) OR ( '
# always conflict with unbound records
' rr.end = False '
' AND '
' rr.start = False '
' )'
' OPTIONAL MATCH '
' (rr_partner) '
' <-[rr_access: AFFILIATED {confirmed: True}]-(:User {username_lower: toLower(trim($username))}) '
# If don't have access or if have access and values don't match then potential conflict
# time parsing to allow various degrees of specificity in the relevant time range is below
' WITH '
' r, '
' access, '
' user, '
' partner, '
' value, '
' submitted, '
' item, '
' field, '
' input, '
' case WHEN rr IS NOT NULL AND (rr.value <> r.value OR rr_access IS NULL) THEN '
' collect(DISTINCT { '
' start: rr.start, '
' end: rr.end, '
' existing_value: CASE WHEN rr_access IS NOT NULL THEN toString(rr.value) ELSE "ACCESS DENIED" END, '
' `submitted at`: rr_sub.time, '
' user: CASE WHEN rr_access IS NOT NULL THEN rr_user.name ELSE rr_partner.name END, '
' access: CASE WHEN rr_access IS NOT NULL THEN True ELSE False END '
' }) '
' ELSE Null END as conflicts '
# And give the user feedback on their submission
' RETURN { '
' Found: r.found, '
' `Submitted by`: CASE WHEN access IS NOT NULL THEN user.name ELSE partner.name END, '
' `Submitted at`: submitted.time, '
' Value: CASE '
' WHEN NOT r.found '
' THEN r.value '
' WHEN access IS NOT NULL '
' THEN r.value '
' ELSE "ACCESS DENIED" '
' END, '
' `Uploaded value`: value, '
' Access: CASE WHEN access IS NULL THEN False ELSE True END, '
' Period: [r.start, r.end], '
' UID: item.uid, '
' `Input variable`: input.name, '
' Partner: partner.name, '
' Conflicts: conflicts '
' } '
' ORDER BY input.name_lower, field.uid, item.id '
)
get_fields_treecount = (
' MATCH (country:Country)<-[:IS_IN]-(region: Region) '
' OPTIONAL MATCH (region)<-[:IS_IN]-(farm: Farm) '
' OPTIONAL MATCH (farm)<-[:IS_IN]-(field: Field) '
' OPTIONAL MATCH '
' (field)'
' <-[:IS_IN]-(:FieldTrees)'
' <-[:FOR]-(field_tree_counter:Counter {name:"tree"}) '
' OPTIONAL MATCH '
' (field)'
' <-[:IS_IN*2]-(block:Block)'
' <-[:IS_IN]-(:BlockTrees)'
' <-[:FOR]-(block_tree_counter:Counter {name:"tree"}) '
' WITH '
' country, '
' region, '
' farm, '
' field, '
' field_tree_counter.count as field_trees, '
' {'
' name: block.name, '
' label:"Block", '
' treecount: block_tree_counter.count '
' } as blocks, '
' block_tree_counter.count as block_trees '
' WITH '
' country, '
' region, '
' farm, '
' { '
' name: field.name, '
' label:"Field", '
' treecount: field_trees - sum(block_trees), '
' children: FILTER(block IN collect(blocks) WHERE block["name"] IS NOT NULL)'
' } as fields '
' WITH '
' country, '
' region, '
' {'
' name: farm.name, '
' label: "Farm", '
' children: FILTER(field IN collect(fields) WHERE field["name"] IS NOT NULL)'
' } as farms '
' WITH '
' country, '
' {'
' name: region.name, '
' label:"Region", '
' children: FILTER(farm IN collect(farms) WHERE farm["name"] IS NOT NULL)'
' } as regions '
' WITH '
' {'
' name: country.name, '
' label:"Country", '
' children: FILTER(region IN collect (regions) WHERE region["name"] IS NOT NULL)'
' } as countries '
' RETURN countries '
)
get_submissions_range = (
# first get all the data collections and link to a base node formed from field
' MATCH '
' (:User {username_lower: toLower($username)}) '
' -[:SUBMITTED*3]->(uff:UserFieldInput) '
' -[s:SUBMITTED]->(record: Record) '
' -[:RECORD_FOR]->(), '
' (uff)-[:CONTRIBUTED]->(ff:FieldInput) '
' -[:FROM_FIELD]->(field: Field), '
' (ff)-[:FOR_INPUT]->(input: Input) '
' WHERE s.time >= $starttime AND s.time <= $endtime '
' WITH '
' input, count(record) as record_count, field '
' RETURN '
' "Input" as d_label, '
' input.name + " (" + toString(record_count) + ")" as d_name, '
' id(field) + "_" + id(input) as d_id, '
' "Field" as n_label, '
' field.name as n_name,'
' id(field) as n_id, '
' "FROM" as r_type, '
' id(field) + "_" + id(input) + "_rel" as r_id, '
' id(field) + "_" + id(input) as r_start, '
' id(field) as r_end '
' UNION '
# get users farm context
' MATCH '
' (:User {username_lower: toLower($username)}) '
' -[:SUBMITTED*3]->(:UserFieldInput) '
' -[:CONTRIBUTED]->(: FieldInput) '
' -[:FOR_ITEM | FROM_FIELD]->(field:Field) '
' -[:IS_IN]->(farm:Farm) '
' RETURN '
' "Field" as d_label, '
' field.name as d_name, '
' id(field) as d_id, '
' "Farm" as n_label, '
' farm.name as n_name, '
' id(farm) as n_id, '
' "IS_IN" as r_type, '
' (id(field) + "_" + id(farm)) as r_id, '
' id(field) as r_start, '
' id(farm) as r_end'
' UNION '
# link the above into region context
' MATCH '
' (:User {username_lower: toLower($username)}) '
' -[:SUBMITTED*3]->(:UserFieldInput) '
' -[:CONTRIBUTED]->(: FieldInput) '
' -[:FOR_ITEM | FROM_FIELD]->(:Field) '
' -[:IS_IN]->(farm: Farm) '
' -[:IS_IN]->(region: Region) '
' RETURN '
' "Farm" as d_label, '
' farm.name as d_name, '
' id(farm) as d_id, '
' "Region" as n_label, '
' region.name as n_name, '
' id(region) as n_id, '
' "IS_IN" as r_type, '
' (id(farm) + "_" + id(region)) as r_id, '
' id(farm) as r_start, '
' id(region) as r_end'
' UNION '
# link the above into country context
' MATCH '
' (:User {username_lower: toLower($username)}) '
' -[:SUBMITTED*3]->(:UserFieldInput) '
' -[:CONTRIBUTED]->(: FieldInput) '
' -[:FOR_ITEM | FROM_FIELD]->(: Field) '
' -[:IS_IN]->(: Farm) '
' -[:IS_IN]->(region: Region) '
' -[:IS_IN]->(country: Country) '
' RETURN '
' "Region" as d_label, '
' region.name as d_name, '
' id(region) as d_id, '
' "Country" as n_label, '
' country.name as n_name, '
' id(country) as n_id, '
' "IS_IN" as r_type, '
' (id(region) + "_" + id(country)) as r_id, '
' id(region) as r_start, '
' id(country) as r_end'
)
| gpl-3.0 | 6,791,217,323,624,032,000 | 29.951091 | 110 | 0.525838 | false |
encukou/freeipa | ipaserver/install/dsinstance.py | 1 | 53454 | # Authors: Karl MacMillan <[email protected]>
# Simo Sorce <[email protected]>
#
# Copyright (C) 2007 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, absolute_import
import logging
import shutil
import pwd
import os
import time
import tempfile
import fnmatch
from lib389 import DirSrv
from lib389.idm.ipadomain import IpaDomain
from lib389.instance.options import General2Base, Slapd2Base
from lib389.instance.remove import remove_ds_instance as lib389_remove_ds
from lib389.instance.setup import SetupDs
from ipalib import x509
from ipalib.install import certmonger, certstore
from ipapython.certdb import (IPA_CA_TRUST_FLAGS,
EXTERNAL_CA_TRUST_FLAGS,
TrustFlags)
from ipapython import ipautil, ipaldap
from ipapython import dogtag
from ipaserver.install import service
from ipaserver.install import installutils
from ipaserver.install import certs
from ipaserver.install import replication
from ipaserver.install import sysupgrade
from ipaserver.install import upgradeinstance
from ipalib import api
from ipalib import errors
from ipalib import constants
from ipaplatform.constants import constants as platformconstants
from ipaplatform.tasks import tasks
from ipapython.dn import DN
from ipapython.admintool import ScriptError
from ipaplatform import services
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
DS_USER = platformconstants.DS_USER
DS_GROUP = platformconstants.DS_GROUP
IPA_SCHEMA_FILES = ("60kerberos.ldif",
"60samba.ldif",
"60ipaconfig.ldif",
"60basev2.ldif",
"60basev3.ldif",
"60ipapk11.ldif",
"60ipadns.ldif",
"60certificate-profiles.ldif",
"61kerberos-ipav3.ldif",
"65ipacertstore.ldif",
"65ipasudo.ldif",
"70ipaotp.ldif",
"70topology.ldif",
"71idviews.ldif",
"72domainlevels.ldif",
"73certmap.ldif",
"15rfc2307bis.ldif",
"15rfc4876.ldif")
ALL_SCHEMA_FILES = IPA_SCHEMA_FILES + ("05rfc2247.ldif", )
DS_INSTANCE_PREFIX = 'slapd-'
def find_server_root():
if os.path.isdir(paths.USR_LIB_DIRSRV_64):
return paths.USR_LIB_DIRSRV_64
else:
return paths.USR_LIB_DIRSRV
def config_dirname(serverid):
return (paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % serverid) + "/"
def schema_dirname(serverid):
return config_dirname(serverid) + "/schema/"
def remove_ds_instance(serverid):
"""Call the lib389 api to remove the instance. Because of the
design of the api, there is no "force" command. Provided a marker
file exists, it will attempt the removal, and the marker is the *last*
file to be removed. IE just run this multiple times til it works (if
you even need multiple times ....)
"""
logger.debug("Attempting to remove instance %s", serverid)
# Alloc the local instance by name (no creds needed!)
ds = DirSrv(verbose=True, external_log=logger)
ds.local_simple_allocate(serverid)
# Remove it
lib389_remove_ds(ds)
logger.debug("Instance removed correctly.")
def get_ds_instances():
'''
Return a sorted list of all 389ds instances.
If the instance name ends with '.removed' it is ignored. This
matches 389ds behavior.
'''
dirsrv_instance_dir = paths.ETC_DIRSRV
instances = []
for basename in os.listdir(dirsrv_instance_dir):
pathname = os.path.join(dirsrv_instance_dir, basename)
# Must be a directory
if os.path.isdir(pathname):
# Must start with prefix and not end with .removed
if (basename.startswith(DS_INSTANCE_PREFIX) and
not basename.endswith('.removed')):
# Strip off prefix
instance = basename[len(DS_INSTANCE_PREFIX):]
# Must be non-empty
if instance:
instances.append(instance)
instances.sort()
return instances
def check_ports():
"""
Check of Directory server ports are open.
Returns a tuple with two booleans, one for unsecure port 389 and one for
secure port 636. True means that the port is free, False means that the
port is taken.
"""
ds_unsecure = not ipautil.host_port_open(None, 389)
ds_secure = not ipautil.host_port_open(None, 636)
return (ds_unsecure, ds_secure)
def is_ds_running(server_id=''):
return services.knownservices.dirsrv.is_running(instance_name=server_id)
def get_domain_level(api=api):
dn = DN(('cn', 'Domain Level'),
('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
with ipaldap.LDAPClient.from_realm(api.env.realm) as conn:
conn.external_bind()
try:
entry = conn.get_entry(dn, ['ipaDomainLevel'])
except errors.NotFound:
return constants.DOMAIN_LEVEL_0
else:
return int(entry.single_value['ipaDomainLevel'])
def get_all_external_schema_files(root):
"""Get all schema files"""
f = []
for path, _subdirs, files in os.walk(root):
for name in files:
if fnmatch.fnmatch(name, "*.ldif"):
f.append(os.path.join(path, name))
return sorted(f)
class DsInstance(service.Service):
def __init__(self, realm_name=None, domain_name=None, fstore=None,
domainlevel=None, config_ldif=None):
super(DsInstance, self).__init__(
"dirsrv",
service_desc="directory server",
fstore=fstore,
service_prefix=u'ldap',
keytab=paths.DS_KEYTAB,
service_user=DS_USER,
realm_name=realm_name
)
self.nickname = 'Server-Cert'
self.sub_dict = None
self.domain = domain_name
self.master_fqdn = None
self.pkcs12_info = None
self.cacert_name = None
self.ca_is_configured = True
self.cert = None
self.idstart = None
self.idmax = None
self.ca_subject = None
self.subject_base = None
self.open_ports = []
self.run_init_memberof = True
self.config_ldif = config_ldif # updates for dse.ldif
self.domainlevel = domainlevel
if realm_name:
self.suffix = ipautil.realm_to_suffix(self.realm)
self.serverid = ipaldap.realm_to_serverid(self.realm)
self.__setup_sub_dict()
else:
self.suffix = DN()
self.serverid = None
subject_base = ipautil.dn_attribute_property('_subject_base')
def __common_setup(self):
self.step("creating directory server instance", self.__create_instance)
self.step("configure autobind for root", self.__root_autobind)
self.step("stopping directory server", self.__stop_instance)
self.step("updating configuration in dse.ldif", self.__update_dse_ldif)
self.step("starting directory server", self.__start_instance)
self.step("adding default schema", self.__add_default_schemas)
self.step("enabling memberof plugin", self.__add_memberof_module)
self.step("enabling winsync plugin", self.__add_winsync_module)
self.step("configure password logging", self.__password_logging)
self.step("configuring replication version plugin", self.__config_version_module)
self.step("enabling IPA enrollment plugin", self.__add_enrollment_module)
self.step("configuring uniqueness plugin", self.__set_unique_attrs)
self.step("configuring uuid plugin", self.__config_uuid_module)
self.step("configuring modrdn plugin", self.__config_modrdn_module)
self.step("configuring DNS plugin", self.__config_dns_module)
self.step("enabling entryUSN plugin", self.__enable_entryusn)
self.step("configuring lockout plugin", self.__config_lockout_module)
self.step("configuring topology plugin", self.__config_topology_module)
self.step("creating indices", self.__create_indices)
self.step("enabling referential integrity plugin", self.__add_referint_module)
self.step("configuring certmap.conf", self.__certmap_conf)
self.step("configure new location for managed entries", self.__repoint_managed_entries)
self.step("configure dirsrv ccache and keytab",
self.configure_systemd_ipa_env)
self.step("enabling SASL mapping fallback",
self.__enable_sasl_mapping_fallback)
def __common_post_setup(self):
self.step("initializing group membership", self.init_memberof)
self.step("adding master entry", self.__add_master_entry)
self.step("initializing domain level", self.__set_domain_level)
self.step("configuring Posix uid/gid generation",
self.__config_uidgid_gen)
self.step("adding replication acis", self.__add_replication_acis)
self.step("activating sidgen plugin", self._add_sidgen_plugin)
self.step("activating extdom plugin", self._add_extdom_plugin)
self.step("configuring directory to start on boot", self.__enable)
def init_info(self, realm_name, fqdn, domain_name, dm_password,
subject_base, ca_subject,
idstart, idmax, pkcs12_info, ca_file=None,
setup_pkinit=False):
self.realm = realm_name.upper()
self.serverid = ipaldap.realm_to_serverid(self.realm)
self.suffix = ipautil.realm_to_suffix(self.realm)
self.fqdn = fqdn
self.dm_password = dm_password
self.domain = domain_name
self.subject_base = subject_base
self.ca_subject = ca_subject
self.idstart = idstart
self.idmax = idmax
self.pkcs12_info = pkcs12_info
if pkcs12_info:
self.ca_is_configured = False
self.setup_pkinit = setup_pkinit
self.ca_file = ca_file
self.__setup_sub_dict()
def create_instance(self, realm_name, fqdn, domain_name,
dm_password, pkcs12_info=None,
idstart=1100, idmax=999999,
subject_base=None, ca_subject=None,
hbac_allow=True, ca_file=None, setup_pkinit=False):
self.init_info(
realm_name, fqdn, domain_name, dm_password,
subject_base, ca_subject,
idstart, idmax, pkcs12_info, ca_file=ca_file,
setup_pkinit=setup_pkinit)
self.__common_setup()
self.step("restarting directory server", self.__restart_instance)
self.step("adding sasl mappings to the directory", self.__configure_sasl_mappings)
self.step("adding default layout", self.__add_default_layout)
self.step("adding delegation layout", self.__add_delegation_layout)
self.step("creating container for managed entries", self.__managed_entries)
self.step("configuring user private groups", self.__user_private_groups)
self.step("configuring netgroups from hostgroups", self.__host_nis_groups)
self.step("creating default Sudo bind user", self.__add_sudo_binduser)
self.step("creating default Auto Member layout", self.__add_automember_config)
self.step("adding range check plugin", self.__add_range_check_plugin)
if hbac_allow:
self.step("creating default HBAC rule allow_all", self.add_hbac)
self.step("adding entries for topology management", self.__add_topology_entries)
self.__common_post_setup()
self.start_creation(runtime=30)
def enable_ssl(self):
self.steps = []
self.step("configuring TLS for DS instance", self.__enable_ssl)
if self.master_fqdn is None:
self.step("adding CA certificate entry", self.__upload_ca_cert)
else:
self.step("importing CA certificates from LDAP",
self.__import_ca_certs)
self.step("restarting directory server", self.__restart_instance)
self.start_creation()
def create_replica(self, realm_name, master_fqdn, fqdn,
domain_name, dm_password,
subject_base, ca_subject,
api, pkcs12_info=None, ca_file=None,
ca_is_configured=None,
setup_pkinit=False):
# idstart and idmax are configured so that the range is seen as
# depleted by the DNA plugin and the replica will go and get a
# new range from the master.
# This way all servers use the initially defined range by default.
idstart = 1101
idmax = 1100
self.init_info(
realm_name=realm_name,
fqdn=fqdn,
domain_name=domain_name,
dm_password=dm_password,
subject_base=subject_base,
ca_subject=ca_subject,
idstart=idstart,
idmax=idmax,
pkcs12_info=pkcs12_info,
ca_file=ca_file,
setup_pkinit=setup_pkinit,
)
self.master_fqdn = master_fqdn
if ca_is_configured is not None:
self.ca_is_configured = ca_is_configured
self.promote = True
self.api = api
self.__common_setup()
self.step("restarting directory server", self.__restart_instance)
self.step("creating DS keytab", self.request_service_keytab)
# 389-ds allows to ignore time skew during replication. It is disabled
# by default to avoid issues with non-contiguous CSN values which
# derived from a time stamp when the change occurs. However, there are
# cases when we are interested only in the changes coming from the
# other side and should therefore allow ignoring the time skew.
#
# This helps with initial replication or force-sync because
# the receiving side has no valuable changes itself yet.
self.step("ignore time skew for initial replication",
self.__replica_ignore_initial_time_skew)
self.step("setting up initial replication", self.__setup_replica)
self.step("prevent time skew after initial replication",
self.replica_manage_time_skew)
self.step("adding sasl mappings to the directory", self.__configure_sasl_mappings)
self.step("updating schema", self.__update_schema)
# See LDIFs for automember configuration during replica install
self.step("setting Auto Member configuration", self.__add_replica_automember_config)
self.step("enabling S4U2Proxy delegation", self.__setup_s4u2proxy)
self.__common_post_setup()
self.start_creation(runtime=30)
def _get_replication_manager(self):
# Always connect to self over ldapi
conn = ipaldap.LDAPClient.from_realm(self.realm)
conn.external_bind()
repl = replication.ReplicationManager(
self.realm, self.fqdn, self.dm_password, conn=conn
)
if self.dm_password is not None and not self.promote:
bind_dn = DN(('cn', 'Directory Manager'))
bind_pw = self.dm_password
else:
bind_dn = bind_pw = None
return repl, bind_dn, bind_pw
def __setup_replica(self):
"""
Setup initial replication between replica and remote master.
GSSAPI is always used as a replication bind method. Note, however,
that the bind method for the replication differs between domain levels:
* in domain level 0, Directory Manager credentials are used to bind
to remote master
* in domain level 1, GSSAPI using admin/privileged host credentials
is used (we do not have access to masters' DM password in this
stage)
"""
replication.enable_replication_version_checking(
self.realm,
self.dm_password)
repl, bind_dn, bind_pw = self._get_replication_manager()
repl.setup_promote_replication(
self.master_fqdn,
r_binddn=bind_dn,
r_bindpw=bind_pw,
cacert=self.ca_file
)
self.run_init_memberof = repl.needs_memberof_fixup()
def finalize_replica_config(self):
repl, bind_dn, bind_pw = self._get_replication_manager()
repl.finalize_replica_config(
self.master_fqdn,
r_binddn=bind_dn,
r_bindpw=bind_pw,
cacert=self.ca_file
)
def __configure_sasl_mappings(self):
# we need to remove any existing SASL mappings in the directory as otherwise they
# they may conflict.
try:
res = api.Backend.ldap2.get_entries(
DN(('cn', 'mapping'), ('cn', 'sasl'), ('cn', 'config')),
api.Backend.ldap2.SCOPE_ONELEVEL,
"(objectclass=nsSaslMapping)")
for r in res:
try:
api.Backend.ldap2.delete_entry(r)
except Exception as e:
logger.critical(
"Error during SASL mapping removal: %s", e)
raise
except Exception as e:
logger.critical("Error while enumerating SASL mappings %s", e)
raise
entry = api.Backend.ldap2.make_entry(
DN(
('cn', 'Full Principal'), ('cn', 'mapping'), ('cn', 'sasl'),
('cn', 'config')),
objectclass=["top", "nsSaslMapping"],
cn=["Full Principal"],
nsSaslMapRegexString=[r'\(.*\)@\(.*\)'],
nsSaslMapBaseDNTemplate=[self.suffix],
nsSaslMapFilterTemplate=['(krbPrincipalName=\\1@\\2)'],
nsSaslMapPriority=['10'],
)
api.Backend.ldap2.add_entry(entry)
entry = api.Backend.ldap2.make_entry(
DN(
('cn', 'Name Only'), ('cn', 'mapping'), ('cn', 'sasl'),
('cn', 'config')),
objectclass=["top", "nsSaslMapping"],
cn=["Name Only"],
nsSaslMapRegexString=['^[^:@]+$'],
nsSaslMapBaseDNTemplate=[self.suffix],
nsSaslMapFilterTemplate=['(krbPrincipalName=&@%s)' % self.realm],
nsSaslMapPriority=['10'],
)
api.Backend.ldap2.add_entry(entry)
def __update_schema(self):
# FIXME: https://fedorahosted.org/389/ticket/47490
self._ldap_mod("schema-update.ldif")
def __enable(self):
self.backup_state("enabled", self.is_enabled())
# At the end of the installation ipa-server-install will enable the
# 'ipa' service wich takes care of starting/stopping dirsrv
self.disable()
def __setup_sub_dict(self):
server_root = find_server_root()
try:
idrange_size = self.idmax - self.idstart + 1
except TypeError:
idrange_size = None
self.sub_dict = dict(
FQDN=self.fqdn, SERVERID=self.serverid,
PASSWORD=self.dm_password,
RANDOM_PASSWORD=ipautil.ipa_generate_password(),
SUFFIX=self.suffix,
REALM=self.realm, USER=DS_USER,
SERVER_ROOT=server_root, DOMAIN=self.domain,
TIME=int(time.time()), IDSTART=self.idstart,
IDMAX=self.idmax, HOST=self.fqdn,
ESCAPED_SUFFIX=str(self.suffix),
GROUP=DS_GROUP,
IDRANGE_SIZE=idrange_size,
DOMAIN_LEVEL=self.domainlevel,
MAX_DOMAIN_LEVEL=constants.MAX_DOMAIN_LEVEL,
MIN_DOMAIN_LEVEL=constants.MIN_DOMAIN_LEVEL,
STRIP_ATTRS=" ".join(replication.STRIP_ATTRS),
EXCLUDES='(objectclass=*) $ EXCLUDE ' +
' '.join(replication.EXCLUDES),
TOTAL_EXCLUDES='(objectclass=*) $ EXCLUDE ' +
' '.join(replication.TOTAL_EXCLUDES),
DEFAULT_SHELL=platformconstants.DEFAULT_SHELL,
DEFAULT_ADMIN_SHELL=platformconstants.DEFAULT_ADMIN_SHELL,
SELINUX_USERMAP_DEFAULT=platformconstants.SELINUX_USERMAP_DEFAULT,
SELINUX_USERMAP_ORDER=platformconstants.SELINUX_USERMAP_ORDER,
)
def __create_instance(self):
self.backup_state("serverid", self.serverid)
# The new installer is api driven. We can pass it a log function
# and it will use it. Because of this, we can pass verbose true,
# and allow our logger to control the display based on level.
sds = SetupDs(verbose=True, dryrun=False, log=logger)
# General environmental options.
general_options = General2Base(logger)
general_options.set('strict_host_checking', False)
# Check that our requested configuration is actually valid ...
general_options.verify()
general = general_options.collect()
# Slapd options, ie instance name.
slapd_options = Slapd2Base(logger)
slapd_options.set('instance_name', self.serverid)
slapd_options.set('root_password', self.dm_password)
slapd_options.verify()
slapd = slapd_options.collect()
# Create userroot. Note that the new install does NOT
# create sample entries, so this is *empty*.
userroot = {
'cn': 'userRoot',
'nsslapd-suffix': self.suffix.ldap_text()
}
backends = [userroot]
sds.create_from_args(general, slapd, backends, None)
# Now create the new domain root object in the format that IPA expects.
# Get the instance ....
inst = DirSrv(verbose=True, external_log=logger)
inst.local_simple_allocate(
serverid=self.serverid,
ldapuri=ipaldap.get_ldap_uri(realm=self.realm, protocol='ldapi'),
password=self.dm_password
)
# local_simple_allocate() configures LDAPI but doesn't set up the
# DirSrv object to use LDAPI. Modify the DirSrv() object to use
# LDAPI with password bind. autobind is not available, yet.
inst.ldapi_enabled = 'on'
inst.ldapi_socket = paths.SLAPD_INSTANCE_SOCKET_TEMPLATE % (
self.serverid
)
inst.ldapi_autobind = 'off'
# This actually opens the conn and binds.
inst.open()
try:
ipadomain = IpaDomain(inst, dn=self.suffix.ldap_text())
ipadomain.create(properties={
'dc': self.realm.split('.')[0].lower(),
'info': 'IPA V2.0',
})
finally:
inst.close()
# Done!
logger.debug("completed creating DS instance")
def __update_dse_ldif(self):
"""
This method updates dse.ldif right after instance creation. This is
supposed to allow admin modify configuration of the DS which has to be
done before IPA is fully installed (for example: settings for
replication on replicas)
DS must be turned off.
"""
dse_filename = os.path.join(
paths.ETC_DIRSRV_SLAPD_INSTANCE_TEMPLATE % self.serverid,
'dse.ldif'
)
with tempfile.NamedTemporaryFile(
mode='w', delete=False) as new_dse_ldif:
temp_filename = new_dse_ldif.name
with open(dse_filename, "r") as input_file:
parser = installutils.ModifyLDIF(input_file, new_dse_ldif)
parser.replace_value(
'cn=config,cn=ldbm database,cn=plugins,cn=config',
'nsslapd-db-locks',
[b'50000']
)
if self.config_ldif:
# parse modifications from ldif file supplied by the admin
with open(self.config_ldif, "r") as config_ldif:
parser.modifications_from_ldif(config_ldif)
parser.parse()
new_dse_ldif.flush()
shutil.copy2(temp_filename, dse_filename)
tasks.restore_context(dse_filename)
try:
os.remove(temp_filename)
except OSError as e:
logger.debug("Failed to clean temporary file: %s", e)
def __add_default_schemas(self):
pent = pwd.getpwnam(DS_USER)
for schema_fname in IPA_SCHEMA_FILES:
target_fname = schema_dirname(self.serverid) + schema_fname
shutil.copyfile(
os.path.join(paths.USR_SHARE_IPA_DIR, schema_fname),
target_fname)
os.chmod(target_fname, 0o440) # read access for dirsrv user/group
os.chown(target_fname, pent.pw_uid, pent.pw_gid)
try:
shutil.move(schema_dirname(self.serverid) + "05rfc2247.ldif",
schema_dirname(self.serverid) + "05rfc2247.ldif.old")
target_fname = schema_dirname(self.serverid) + "05rfc2247.ldif"
shutil.copyfile(
os.path.join(paths.USR_SHARE_IPA_DIR, "05rfc2247.ldif"),
target_fname)
os.chmod(target_fname, 0o440)
os.chown(target_fname, pent.pw_uid, pent.pw_gid)
except IOError:
# Does not apply with newer DS releases
pass
def start(self, instance_name="", capture_output=True, wait=True):
super(DsInstance, self).start(
instance_name, capture_output=capture_output, wait=wait
)
api.Backend.ldap2.connect()
def stop(self, instance_name="", capture_output=True):
if api.Backend.ldap2.isconnected():
api.Backend.ldap2.disconnect()
super(DsInstance, self).stop(
instance_name, capture_output=capture_output
)
def restart(self, instance_name="", capture_output=True, wait=True):
api.Backend.ldap2.disconnect()
try:
super(DsInstance, self).restart(
instance_name, capture_output=capture_output, wait=wait
)
if not is_ds_running(instance_name):
logger.critical("Failed to restart the directory server. "
"See the installation log for details.")
raise ScriptError()
except SystemExit as e:
raise e
except Exception as e:
# TODO: roll back here?
logger.critical("Failed to restart the directory server (%s). "
"See the installation log for details.", e)
api.Backend.ldap2.connect()
def __start_instance(self):
self.start(self.serverid)
def __stop_instance(self):
self.stop(self.serverid)
def __restart_instance(self):
self.restart(self.serverid)
def __enable_entryusn(self):
self._ldap_mod("entryusn.ldif")
def __add_memberof_module(self):
self._ldap_mod("memberof-conf.ldif")
def init_memberof(self):
if not self.run_init_memberof:
return
self._ldap_mod("memberof-task.ldif", self.sub_dict)
# Note, keep dn in sync with dn in install/share/memberof-task.ldif
dn = DN(('cn', 'IPA install %s' % self.sub_dict["TIME"]), ('cn', 'memberof task'),
('cn', 'tasks'), ('cn', 'config'))
logger.debug("Waiting for memberof task to complete.")
with ipaldap.LDAPClient.from_realm(self.realm) as conn:
conn.external_bind()
replication.wait_for_task(conn, dn)
def apply_updates(self):
schema_files = get_all_external_schema_files(paths.EXTERNAL_SCHEMA_DIR)
data_upgrade = upgradeinstance.IPAUpgrade(self.realm,
schema_files=schema_files)
try:
data_upgrade.create_instance()
except Exception as e:
# very fatal errors only will raise exception
raise RuntimeError("Update failed: %s" % e)
installutils.store_version()
def __add_referint_module(self):
self._ldap_mod("referint-conf.ldif")
def __set_unique_attrs(self):
self._ldap_mod("unique-attributes.ldif", self.sub_dict)
def __config_uidgid_gen(self):
self._ldap_mod("dna.ldif", self.sub_dict)
def __add_master_entry(self):
self._ldap_mod("master-entry.ldif", self.sub_dict)
def __add_topology_entries(self):
self._ldap_mod("topology-entries.ldif", self.sub_dict)
def __add_winsync_module(self):
self._ldap_mod("ipa-winsync-conf.ldif")
def __password_logging(self):
self._ldap_mod("pw-logging-conf.ldif")
def __config_version_module(self):
self._ldap_mod("version-conf.ldif")
def __config_uuid_module(self):
self._ldap_mod("uuid-conf.ldif")
self._ldap_mod("uuid.ldif", self.sub_dict)
def __config_modrdn_module(self):
self._ldap_mod("modrdn-conf.ldif")
self._ldap_mod("modrdn-krbprinc.ldif", self.sub_dict)
def __config_dns_module(self):
# Configure DNS plugin unconditionally as we would otherwise have
# troubles if other replica just configured DNS with ipa-dns-install
self._ldap_mod("ipa-dns-conf.ldif")
def __config_lockout_module(self):
self._ldap_mod("lockout-conf.ldif")
def __config_topology_module(self):
self._ldap_mod("ipa-topology-conf.ldif", self.sub_dict)
def __repoint_managed_entries(self):
self._ldap_mod("repoint-managed-entries.ldif", self.sub_dict)
def configure_systemd_ipa_env(self):
pent = pwd.getpwnam(platformconstants.DS_USER)
template = os.path.join(
paths.USR_SHARE_IPA_DIR, "ds-ipa-env.conf.template"
)
sub_dict = dict(
KRB5_KTNAME=paths.DS_KEYTAB,
KRB5CCNAME=paths.TMP_KRB5CC % pent.pw_uid
)
conf = ipautil.template_file(template, sub_dict)
destfile = paths.SLAPD_INSTANCE_SYSTEMD_IPA_ENV_TEMPLATE % (
self.serverid
)
destdir = os.path.dirname(destfile)
if not os.path.isdir(destdir):
# create dirsrv-$SERVERID.service.d
os.mkdir(destdir, 0o755)
with open(destfile, 'w') as f:
os.fchmod(f.fileno(), 0o644)
f.write(conf)
tasks.restore_context(destfile)
# remove variables from old /etc/sysconfig/dirsrv file
if os.path.isfile(paths.SYSCONFIG_DIRSRV):
self.fstore.backup_file(paths.SYSCONFIG_DIRSRV)
ipautil.config_replace_variables(
paths.SYSCONFIG_DIRSRV,
removevars={'KRB5_KTNAME', 'KRB5CCNAME'}
)
# reload systemd to materialize new config file
tasks.systemd_daemon_reload()
def __managed_entries(self):
self._ldap_mod("managed-entries.ldif", self.sub_dict)
def __user_private_groups(self):
self._ldap_mod("user_private_groups.ldif", self.sub_dict)
def __host_nis_groups(self):
self._ldap_mod("host_nis_groups.ldif", self.sub_dict)
def __add_enrollment_module(self):
self._ldap_mod("enrollment-conf.ldif", self.sub_dict)
def __enable_ssl(self):
dirname = config_dirname(self.serverid)
dsdb = certs.CertDB(
self.realm,
nssdir=dirname,
subject_base=self.subject_base,
ca_subject=self.ca_subject,
)
if self.pkcs12_info:
if self.ca_is_configured:
trust_flags = IPA_CA_TRUST_FLAGS
else:
trust_flags = EXTERNAL_CA_TRUST_FLAGS
dsdb.create_from_pkcs12(self.pkcs12_info[0], self.pkcs12_info[1],
ca_file=self.ca_file,
trust_flags=trust_flags)
# rewrite the pin file with current password
dsdb.create_pin_file()
server_certs = dsdb.find_server_certs()
if len(server_certs) == 0:
raise RuntimeError("Could not find a suitable server cert in import in %s" % self.pkcs12_info[0])
# We only handle one server cert
self.nickname = server_certs[0][0]
self.cert = dsdb.get_cert_from_db(self.nickname)
if self.ca_is_configured:
dsdb.track_server_cert(
self.nickname, self.principal, dsdb.passwd_fname,
'restart_dirsrv %s' % self.serverid)
self.add_cert_to_service()
else:
dsdb.create_from_cacert()
# rewrite the pin file with current password
dsdb.create_pin_file()
if self.master_fqdn is None:
ca_args = [
paths.CERTMONGER_DOGTAG_SUBMIT,
'--ee-url', 'https://%s:8443/ca/ee/ca' % self.fqdn,
'--certfile', paths.RA_AGENT_PEM,
'--keyfile', paths.RA_AGENT_KEY,
'--cafile', paths.IPA_CA_CRT,
'--agent-submit'
]
helper = " ".join(ca_args)
prev_helper = certmonger.modify_ca_helper('IPA', helper)
else:
prev_helper = None
try:
cmd = 'restart_dirsrv %s' % self.serverid
certmonger.request_and_wait_for_cert(
certpath=dirname,
storage='NSSDB',
nickname=self.nickname,
principal=self.principal,
passwd_fname=dsdb.passwd_fname,
subject=str(DN(('CN', self.fqdn), self.subject_base)),
ca='IPA',
profile=dogtag.DEFAULT_PROFILE,
dns=[self.fqdn],
post_command=cmd,
resubmit_timeout=api.env.certmonger_wait_timeout
)
finally:
if prev_helper is not None:
certmonger.modify_ca_helper('IPA', prev_helper)
# restart_dirsrv in the request above restarts DS, reconnect ldap2
api.Backend.ldap2.disconnect()
api.Backend.ldap2.connect()
self.cert = dsdb.get_cert_from_db(self.nickname)
if prev_helper is not None:
self.add_cert_to_service()
self.cacert_name = dsdb.cacert_name
# use LDAPI?
conn = ipaldap.LDAPClient.from_realm(self.realm)
conn.external_bind()
encrypt_entry = conn.make_entry(
DN(('cn', 'encryption'), ('cn', 'config')),
nsSSLClientAuth=b'allowed',
nsSSL3Ciphers=b'default',
allowWeakCipher=b'off'
)
try:
conn.update_entry(encrypt_entry)
except errors.EmptyModlist:
logger.debug(
"cn=encryption,cn=config is already properly configured")
conf_entry = conn.make_entry(
DN(('cn', 'config')),
# one does not simply uses '-' in variable name
**{'nsslapd-security': b'on'}
)
try:
conn.update_entry(conf_entry)
except errors.EmptyModlist:
logger.debug("nsslapd-security is already on")
entry = conn.make_entry(
DN(('cn', 'RSA'), ('cn', 'encryption'), ('cn', 'config')),
objectclass=["top", "nsEncryptionModule"],
cn=["RSA"],
nsSSLPersonalitySSL=[self.nickname],
nsSSLToken=["internal (software)"],
nsSSLActivation=["on"],
)
try:
conn.add_entry(entry)
except errors.DuplicateEntry:
# 389-DS >= 1.4.0 has a default entry, update it.
conn.update_entry(entry)
conn.unbind()
# check for open secure port 636 from now on
self.open_ports.append(636)
def __upload_ca_cert(self):
"""
Upload the CA certificate from the NSS database to the LDAP directory.
"""
dirname = config_dirname(self.serverid)
dsdb = certs.CertDB(self.realm, nssdir=dirname,
subject_base=self.subject_base)
trust_flags = dict(reversed(dsdb.list_certs()))
conn = ipaldap.LDAPClient.from_realm(self.realm)
conn.external_bind()
nicknames = dsdb.find_root_cert(self.cacert_name)[:-1]
for nickname in nicknames:
cert = dsdb.get_cert_from_db(nickname)
certstore.put_ca_cert_nss(conn, self.suffix, cert, nickname,
trust_flags[nickname])
nickname = self.cacert_name
cert = dsdb.get_cert_from_db(nickname)
cacert_flags = trust_flags[nickname]
if self.setup_pkinit:
cacert_flags = TrustFlags(
cacert_flags.has_key,
cacert_flags.trusted,
cacert_flags.ca,
(cacert_flags.usages |
{x509.EKU_PKINIT_CLIENT_AUTH, x509.EKU_PKINIT_KDC}),
)
certstore.put_ca_cert_nss(conn, self.suffix, cert, nickname,
cacert_flags,
config_ipa=self.ca_is_configured,
config_compat=self.master_fqdn is None)
conn.unbind()
def __import_ca_certs(self):
dirname = config_dirname(self.serverid)
dsdb = certs.CertDB(self.realm, nssdir=dirname,
subject_base=self.subject_base)
with ipaldap.LDAPClient.from_realm(self.realm) as conn:
conn.external_bind()
self.export_ca_certs_nssdb(dsdb, self.ca_is_configured, conn)
def __add_default_layout(self):
self._ldap_mod("bootstrap-template.ldif", self.sub_dict)
def __add_delegation_layout(self):
self._ldap_mod("delegation.ldif", self.sub_dict)
def __add_replication_acis(self):
self._ldap_mod("replica-acis.ldif", self.sub_dict)
def __replica_ignore_initial_time_skew(self):
self.replica_manage_time_skew(prevent=False)
def replica_manage_time_skew(self, prevent=True):
if prevent:
self.sub_dict['SKEWVALUE'] = 'off'
else:
self.sub_dict['SKEWVALUE'] = 'on'
self._ldap_mod("replica-prevent-time-skew.ldif", self.sub_dict)
def __setup_s4u2proxy(self):
def __add_principal(last_cn, principal, self):
dn = DN(('cn', last_cn), ('cn', 's4u2proxy'),
('cn', 'etc'), self.suffix)
value = '{principal}/{fqdn}@{realm}'.format(fqdn=self.fqdn,
realm=self.realm,
principal=principal)
entry = api.Backend.ldap2.get_entry(dn, ['memberPrincipal'])
try:
entry['memberPrincipal'].append(value)
api.Backend.ldap2.update_entry(entry)
except errors.EmptyModlist:
pass
__add_principal('ipa-http-delegation', 'HTTP', self)
__add_principal('ipa-ldap-delegation-targets', 'ldap', self)
def __create_indices(self):
self._ldap_mod("indices.ldif")
def __certmap_conf(self):
write_certmap_conf(self.realm, self.ca_subject)
sysupgrade.set_upgrade_state(
'certmap.conf',
'subject_base',
str(self.subject_base)
)
def __enable_sasl_mapping_fallback(self):
self._ldap_mod("sasl-mapping-fallback.ldif", self.sub_dict)
def add_hbac(self):
self._ldap_mod("default-hbac.ldif", self.sub_dict)
def change_admin_password(self, password):
logger.debug("Changing admin password")
dir_ipa = paths.VAR_LIB_IPA
with tempfile.NamedTemporaryFile("w", dir=dir_ipa) as dmpwdfile, \
tempfile.NamedTemporaryFile("w", dir=dir_ipa) as admpwdfile:
dmpwdfile.write(self.dm_password)
dmpwdfile.flush()
admpwdfile.write(password)
admpwdfile.flush()
args = [paths.LDAPPASSWD, "-h", self.fqdn,
"-ZZ", "-x", "-D", str(DN(('cn', 'Directory Manager'))),
"-y", dmpwdfile.name, "-T", admpwdfile.name,
str(DN(('uid', 'admin'), ('cn', 'users'), ('cn', 'accounts'), self.suffix))]
try:
env = {'LDAPTLS_CACERTDIR': os.path.dirname(paths.IPA_CA_CRT),
'LDAPTLS_CACERT': paths.IPA_CA_CRT}
ipautil.run(args, env=env)
logger.debug("ldappasswd done")
except ipautil.CalledProcessError as e:
print("Unable to set admin password", e)
logger.debug("Unable to set admin password %s", e)
def uninstall(self):
if self.is_configured():
self.print_msg("Unconfiguring directory server")
enabled = self.restore_state("enabled")
# Just eat this state if it exists
self.restore_state("running")
try:
self.fstore.restore_file(paths.LIMITS_CONF)
except ValueError as error:
logger.debug("%s: %s", paths.LIMITS_CONF, error)
try:
self.fstore.restore_file(paths.SYSCONFIG_DIRSRV)
except ValueError as error:
logger.debug("%s: %s", paths.SYSCONFIG_DIRSRV, error)
# disabled during IPA installation
if enabled:
logger.debug("Re-enabling instance of Directory Server")
self.enable()
serverid = self.restore_state("serverid")
if serverid is not None:
# What if this fails? Then what?
self.stop_tracking_certificates(serverid)
logger.debug("Removing DS instance %s", serverid)
try:
remove_ds_instance(serverid)
except ipautil.CalledProcessError:
logger.error("Failed to remove DS instance. You may "
"need to remove instance data manually")
else:
logger.error("Failed to remove DS instance. No serverid present "
"in sysrestore file.")
ipautil.remove_keytab(paths.DS_KEYTAB)
ipautil.remove_ccache(run_as=DS_USER)
if serverid is None:
# Remove scripts dir
scripts = paths.VAR_LIB_DIRSRV_INSTANCE_SCRIPTS_TEMPLATE % (
serverid)
ipautil.rmtree(scripts)
# remove systemd unit file
unitfile = paths.SLAPD_INSTANCE_SYSTEMD_IPA_ENV_TEMPLATE % (
serverid
)
ipautil.remove_file(unitfile)
try:
os.rmdir(os.path.dirname(unitfile))
except OSError:
# not empty
pass
# Just eat this state
self.restore_state("user_exists")
# Make sure some upgrade-related state is removed. This could cause
# re-installation problems.
self.restore_state('nsslapd-port')
self.restore_state('nsslapd-security')
self.restore_state('nsslapd-ldapiautobind')
# If any dirsrv instances remain after we've removed ours then
# (re)start them.
for ds_instance in get_ds_instances():
try:
services.knownservices.dirsrv.restart(ds_instance, wait=False)
except Exception as e:
logger.error(
'Unable to restart DS instance %s: %s', ds_instance, e)
def get_server_cert_nickname(self, serverid=None):
"""
Retrieve the nickname of the server cert used by dirsrv.
The method directly reads the dse.ldif to find the attribute
nsSSLPersonalitySSL of cn=RSA,cn=encryption,cn=config because
LDAP is not always accessible when we need to get the nickname
(for instance during uninstall).
"""
if serverid is None:
serverid = self.get_state("serverid")
if serverid is not None:
dirname = config_dirname(serverid)
config_file = os.path.join(dirname, "dse.ldif")
rsa_dn = "cn=RSA,cn=encryption,cn=config"
with open(config_file, "r") as in_file:
parser = upgradeinstance.GetEntryFromLDIF(
in_file,
entries_dn=[rsa_dn])
parser.parse()
try:
config_entry = parser.get_results()[rsa_dn]
nickname = config_entry["nsSSLPersonalitySSL"][0]
return nickname.decode('utf-8')
except (KeyError, IndexError):
logger.error("Unable to find server cert nickname in %s",
config_file)
logger.debug("Falling back to nickname Server-Cert")
return 'Server-Cert'
def stop_tracking_certificates(self, serverid=None):
if serverid is None:
serverid = self.get_state("serverid")
if serverid is not None:
nickname = self.get_server_cert_nickname(serverid)
# drop the trailing / off the config_dirname so the directory
# will match what is in certmonger
dirname = config_dirname(serverid)[:-1]
dsdb = certs.CertDB(self.realm, nssdir=dirname)
dsdb.untrack_server_cert(nickname)
def start_tracking_certificates(self, serverid):
nickname = self.get_server_cert_nickname(serverid)
dirname = config_dirname(serverid)[:-1]
dsdb = certs.CertDB(self.realm, nssdir=dirname)
if dsdb.is_ipa_issued_cert(api, nickname):
dsdb.track_server_cert(
nickname,
self.principal,
password_file=dsdb.passwd_fname,
command='restart_dirsrv %s' % serverid,
profile=dogtag.DEFAULT_PROFILE)
else:
logger.debug("Will not track DS server certificate %s as it is "
"not issued by IPA", nickname)
# we could probably move this function into the service.Service
# class - it's very generic - all we need is a way to get an
# instance of a particular Service
def add_ca_cert(self, cacert_fname, cacert_name=''):
"""Add a CA certificate to the directory server cert db. We
first have to shut down the directory server in case it has
opened the cert db read-only. Then we use the CertDB class
to add the CA cert. We have to provide a nickname, and we
do not use 'IPA CA' since that's the default, so
we use 'Imported CA' if none specified. Then we restart
the server."""
# first make sure we have a valid cacert_fname
try:
if not os.access(cacert_fname, os.R_OK):
logger.critical("The given CA cert file named [%s] could not "
"be read", cacert_fname)
return False
except OSError as e:
logger.critical("The given CA cert file named [%s] could not "
"be read: %s", cacert_fname, str(e))
return False
# ok - ca cert file can be read
# shutdown the server
self.stop()
dirname = config_dirname(
ipaldap.realm_to_serverid(self.realm))
certdb = certs.CertDB(
self.realm,
nssdir=dirname,
subject_base=self.subject_base,
ca_subject=self.ca_subject,
)
if not cacert_name or len(cacert_name) == 0:
cacert_name = "Imported CA"
# we can't pass in the nickname, so we set the instance variable
certdb.cacert_name = cacert_name
status = True
try:
certdb.load_cacert(cacert_fname, EXTERNAL_CA_TRUST_FLAGS)
except ipautil.CalledProcessError as e:
logger.critical("Error importing CA cert file named [%s]: %s",
cacert_fname, str(e))
status = False
# restart the directory server
self.start()
return status
def __root_autobind(self):
self._ldap_mod(
"root-autobind.ldif",
ldap_uri=ipaldap.get_ldap_uri(realm=self.realm, protocol='ldapi'),
# must simple bind until auto bind is configured
dm_password=self.dm_password
)
def __add_sudo_binduser(self):
self._ldap_mod("sudobind.ldif", self.sub_dict)
def __add_automember_config(self):
self._ldap_mod("automember.ldif", self.sub_dict)
def __add_replica_automember_config(self):
self._ldap_mod("replica-automember.ldif", self.sub_dict)
def __add_range_check_plugin(self):
self._ldap_mod("range-check-conf.ldif", self.sub_dict)
def _add_sidgen_plugin(self):
"""
Add sidgen directory server plugin configuration if it does not already exist.
"""
self.add_sidgen_plugin(self.sub_dict['SUFFIX'])
def add_sidgen_plugin(self, suffix):
"""
Add sidgen plugin configuration only if it does not already exist.
"""
dn = DN('cn=IPA SIDGEN,cn=plugins,cn=config')
try:
api.Backend.ldap2.get_entry(dn)
except errors.NotFound:
self._ldap_mod('ipa-sidgen-conf.ldif', dict(SUFFIX=suffix))
else:
logger.debug("sidgen plugin is already configured")
def _add_extdom_plugin(self):
"""
Add directory server configuration for the extdom extended operation.
"""
self.add_extdom_plugin(self.sub_dict['SUFFIX'])
def add_extdom_plugin(self, suffix):
"""
Add extdom configuration if it does not already exist.
"""
dn = DN('cn=ipa_extdom_extop,cn=plugins,cn=config')
try:
api.Backend.ldap2.get_entry(dn)
except errors.NotFound:
self._ldap_mod('ipa-extdom-extop-conf.ldif', dict(SUFFIX=suffix))
else:
logger.debug("extdom plugin is already configured")
def find_subject_base(self):
"""
Try to find the current value of certificate subject base.
1) Look in sysupgrade first
2) If no value is found there, look in DS (start DS if necessary)
3) If all fails, log loudly and return None
Note that this method can only be executed AFTER the ipa server
is configured, the api is initialized elsewhere and
that a ticket already have been acquired.
"""
logger.debug(
'Trying to find certificate subject base in sysupgrade')
subject_base = sysupgrade.get_upgrade_state(
'certmap.conf', 'subject_base')
if subject_base:
logger.debug(
'Found certificate subject base in sysupgrade: %s',
subject_base)
return subject_base
logger.debug(
'Unable to find certificate subject base in sysupgrade')
logger.debug(
'Trying to find certificate subject base in DS')
ds_is_running = is_ds_running()
if not ds_is_running:
try:
self.start()
ds_is_running = True
except ipautil.CalledProcessError as e:
logger.error('Cannot start DS to find certificate '
'subject base: %s', e)
if ds_is_running:
try:
ret = api.Command['config_show']()
subject_base = str(
ret['result']['ipacertificatesubjectbase'][0])
logger.debug(
'Found certificate subject base in DS: %s', subject_base)
except errors.PublicError as e:
logger.error('Cannot connect to DS to find certificate '
'subject base: %s', e)
if subject_base:
return subject_base
logger.debug('Unable to find certificate subject base in certmap.conf')
return None
def __set_domain_level(self):
# Create global domain level entry and set the domain level
if self.domainlevel is not None:
self._ldap_mod("domainlevel.ldif", self.sub_dict)
def write_certmap_conf(realm, ca_subject):
"""(Re)write certmap.conf with given CA subject DN."""
serverid = ipaldap.realm_to_serverid(realm)
ds_dirname = config_dirname(serverid)
certmap_filename = os.path.join(ds_dirname, "certmap.conf")
shutil.copyfile(
os.path.join(paths.USR_SHARE_IPA_DIR, "certmap.conf.template"),
certmap_filename)
installutils.update_file(
certmap_filename,
'$ISSUER_DN', # lgtm [py/regex/unmatchable-dollar]
str(ca_subject)
)
| gpl-3.0 | 5,848,009,297,824,252,000 | 37.734783 | 113 | 0.585644 | false |
SAPikachu/nyapass | nyapass-client.py | 1 | 1513 | #!/usr/bin/env python3
import sys
import logging
from common import nyapass_run_instances, Nyapass
from local import ClientHandlerManager
from local_socks5 import SocksClientHandler
from config import Config
def main():
config = Config("client")
logging.basicConfig(level=config.log_level)
instances = []
if config.port:
instances.append(Nyapass(
handler_factory=ClientHandlerManager(config),
config=config,
))
if config.socks5_port:
instances.append(Nyapass(
handler_factory=ClientHandlerManager(
config,
handler_cls=SocksClientHandler,
),
config=config,
port=config.socks5_port,
))
if config.shadowsocks_port:
try:
from local_shadowsocks import ShadowsocksClientHandler
except ImportError:
logging.warning(
"Shadowsocks is not installed, "
"can't enable shadowsocks handler"
)
else:
instances.append(Nyapass(
handler_factory=ClientHandlerManager(
config,
handler_cls=ShadowsocksClientHandler,
),
config=config,
port=config.shadowsocks_port,
))
if not instances:
logging.error("All handlers are disabled")
sys.exit(1)
nyapass_run_instances(config, *instances)
if __name__ == "__main__":
main()
| gpl-3.0 | -1,051,527,714,603,912,100 | 25.086207 | 66 | 0.57766 | false |
acgtun/acgtun.com | acgtun/leetcode/views.py | 1 | 1338 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import sys
from collections import OrderedDict
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.conf import settings
from . import db_table
from database.database import Database
sys.path.append(os.path.join(settings.BASE_DIR, 'ommon'))
sys.path.append(os.path.join(settings.BASE_DIR, 'database'))
db_path = os.path.join(settings.BASE_DIR, 'database')
def get_solution(response):
db = Database(os.path.join(db_path, 'db.sqlite3'))
solutions = db.query("SELECT id,problem,cpptime,cppcode,javatime,javacode,pythontime,pythoncode FROM {}".format(
db_table.leetcode_solution_table))
problems = OrderedDict()
for r in solutions:
pn = r[1]
pn = pn.rstrip()
if pn not in problems.keys():
problems[pn] = OrderedDict()
problems[pn]['cpp'] = r[3]
problems[pn]['java'] = r[5]
problems[pn]['python'] = r[7]
problems = OrderedDict(sorted(problems.items(), key=lambda t: t[0]))
return response.write(render_to_string('leetcode/index.html', {'problems': problems}))
def index(request):
response = HttpResponse();
get_solution(response)
response.close()
return response
| gpl-2.0 | -7,404,087,127,536,536,000 | 28.733333 | 116 | 0.683857 | false |
cvandeplas/plaso | plaso/parsers/olecf_plugins/summary_test.py | 1 | 4825 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the OLE Compound File summary and document summary plugins."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import olecf as olecf_formatter
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers.olecf_plugins import summary
from plaso.parsers.olecf_plugins import test_lib
class TestSummaryInfoOlecfPlugin(test_lib.OleCfPluginTestCase):
"""Tests for the OLECF summary information plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._summary_plugin = summary.SummaryInfoOlecfPlugin()
self._test_file = self._GetTestFilePath(['Document.doc'])
def testProcess(self):
"""Tests the Process function on a SummaryInformation stream."""
event_queue_consumer = self._ParseOleCfFileWithPlugin(
self._test_file, self._summary_plugin)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# There is one summary info stream with three event objects.
self.assertEquals(len(event_objects), 3)
event_object = event_objects[0]
self.assertEquals(event_object.name, u'Summary Information')
self.assertEquals(event_object.title, u'Table of Context')
self.assertEquals(event_object.author, u'DAVID NIDES')
self.assertEquals(event_object.template, u'Normal.dotm')
self.assertEquals(event_object.last_saved_by, u'Nides')
self.assertEquals(event_object.revision_number, u'4')
self.assertEquals(event_object.number_of_characters, 18)
self.assertEquals(event_object.application, u'Microsoft Office Word')
self.assertEquals(event_object.security, 0)
self.assertEquals(event_object.timestamp_desc, u'Document Creation Time')
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2012-12-10 18:38:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_msg = (
u'Title: Table of Context '
u'Author: DAVID NIDES '
u'Template: Normal.dotm '
u'Revision number: 4 '
u'Last saved by: Nides '
u'Number of pages: 1 '
u'Number of words: 3 '
u'Number of characters: 18 '
u'Application: Microsoft Office Word '
u'Security: 0')
expected_msg_short = (
u'Title: Table of Context '
u'Author: DAVID NIDES '
u'Revision number: 4')
# TODO: add support for:
# u'Total edit time (secs): 0 '
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
class TestDocumentSummaryInfoOlecfPlugin(test_lib.OleCfPluginTestCase):
"""Tests for the OLECF document summary information plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._document_summary_plugin = summary.DocumentSummaryOlecfPlugin()
self._test_file = self._GetTestFilePath(['Document.doc'])
def testProcess(self):
"""Tests the Process function on a DocumentSummaryInformation stream."""
event_queue_consumer = self._ParseOleCfFileWithPlugin(
self._test_file, self._document_summary_plugin)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# There should only be one summary info stream with one event.
self.assertEquals(len(event_objects), 1)
event_object = event_objects[0]
self.assertEquals(event_object.name, u'Document Summary Information')
self.assertEquals(event_object.number_of_lines, 1)
self.assertEquals(event_object.number_of_paragraphs, 1)
self.assertEquals(event_object.company, u'KPMG')
self.assertFalse(event_object.shared_document)
self.assertEquals(event_object.application_version, u'14.0')
# TODO: add support for:
# self.assertEquals(event_object.is_shared, False)
expected_msg = (
u'Number of lines: 1 '
u'Number of paragraphs: 1 '
u'Company: KPMG '
u'Shared document: False '
u'Application version: 14.0')
expected_msg_short = (
u'Company: KPMG')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -707,528,440,018,639,700 | 36.115385 | 79 | 0.712124 | false |
mancoast/CPythonPyc_test | cpython/241_regrtest.py | 1 | 35276 | #! /usr/bin/env python
"""Regression test.
This will find all modules whose name is "test_*" in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v: verbose -- run tests in verbose mode with output to stdout
-q: quiet -- don't print anything except if a test fails
-g: generate -- write the output file for a test instead of comparing it
-x: exclude -- arguments are tests to *exclude*
-s: single -- run only a single test (see below)
-r: random -- randomize test execution order
-f: fromfile -- read names of tests to run from a file (see below)
-l: findleaks -- if GC is available detect tests that leak memory
-u: use -- specify which special resource intensive tests to run
-h: help -- print this text and exit
-t: threshold -- call gc.set_threshold(N)
-T: coverage -- turn on code coverage using the trace module
-D: coverdir -- Directory where coverage files are put
-N: nocoverdir -- Put coverage files alongside modules
-L: runleaks -- run the leaks(1) command just before exit
-R: huntrleaks -- search for reference leaks (needs debug build, v. slow)
If non-option arguments are present, they are names for tests to run,
unless -x is given, in which case they are names for tests not to run.
If no test names are given, all tests are run.
-v is incompatible with -g and does not compare test output files.
-T turns on code coverage tracing with the trace module.
-D specifies the directory where coverage files are put.
-N Put coverage files alongside modules.
-s means to run only a single test and exit. This is useful when
doing memory analysis on the Python interpreter (which tend to consume
too many resources to run the full regression test non-stop). The
file /tmp/pynexttest is read to find the next test to run. If this
file is missing, the first test_*.py file in testdir or on the command
line is used. (actually tempfile.gettempdir() is used instead of
/tmp).
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), so the minimal invocation is '-R ::'.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
bsddb - It is okay to run the bsddb testsuite, which takes
a long time to complete.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
compiler - Test the compiler package by compiling all the source
in the standard library and test suite. This takes
a long time.
subprocess Run all tests for the subprocess module.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
"""
import os
import sys
import getopt
import random
import warnings
import sre
import cStringIO
import traceback
# I see no other way to suppress these warnings;
# putting them in test_grammar.py has no effect:
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
".*test.test_grammar$")
if sys.maxint > 0x7fffffff:
# Also suppress them in <string>, because for 64-bit platforms,
# that's where test_grammar.py hides them.
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
"<string>")
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
from test import test_support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'compiler', 'subprocess')
def usage(code, msg=''):
print __doc__
if msg: print msg
sys.exit(code)
def main(tests=None, testdir=None, verbose=0, quiet=False, generate=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, generate, exclude, single,
randomize, findleaks, use_resources, trace and coverdir) allow programmers
calling main() directly to set the values that would normally be set by
flags on the command line.
"""
test_support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsrf:lu:t:TD:NLR:',
['help', 'verbose', 'quiet', 'generate',
'exclude', 'single', 'random', 'fromfile',
'findleaks', 'use=', 'threshold=', 'trace',
'coverdir=', 'nocoverdir', 'runleaks',
'huntrleaks='
])
except getopt.error, msg:
usage(2, msg)
# Defaults
if use_resources is None:
use_resources = []
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-g', '--generate'):
generate = True
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-s', '--single'):
single = True
elif o in ('-r', '--randomize'):
randomize = True
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) != 3:
print a, huntrleaks
usage(2, '-R takes three colon-separated arguments')
if len(huntrleaks[0]) == 0:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if len(huntrleaks[1]) == 0:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks[2]) == 0:
huntrleaks[2] = "reflog.txt"
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage(1, 'Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
if generate and verbose:
usage(2, "-g and -v don't go together!")
if single and fromfile:
usage(2, "-s and -f don't go together!")
good = []
bad = []
skipped = []
resource_denieds = []
if findleaks:
try:
import gc
except ImportError:
print 'No GC available, disabling findleaks.'
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
from tempfile import gettempdir
filename = os.path.join(gettempdir(), 'pynexttest')
try:
fp = open(filename, 'r')
next = fp.read().strip()
tests = [next]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(fromfile)
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
if args:
args = map(removepy, args)
if tests:
tests = map(removepy, tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS[:]
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests[:0] = args
args = []
tests = tests or args or findtests(testdir, stdtests, nottests)
if single:
tests = tests[:1]
if randomize:
random.shuffle(tests)
if trace:
import trace
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
trace=False, count=True)
test_support.verbose = verbose # Tell tests to be moderately quiet
test_support.use_resources = use_resources
save_modules = sys.modules.keys()
for test in tests:
if not quiet:
print test
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, generate, verbose, quiet, testdir)',
globals=globals(), locals=vars())
else:
ok = runtest(test, generate, verbose, quiet, testdir, huntrleaks)
if ok > 0:
good.append(test)
elif ok == 0:
bad.append(test)
else:
skipped.append(test)
if ok == -2:
resource_denieds.append(test)
if findleaks:
gc.collect()
if gc.garbage:
print "Warning: test created", len(gc.garbage),
print "uncollectable object(s)."
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
test_support.unload(module)
# The lists won't be sorted if running with -r
good.sort()
bad.sort()
skipped.sort()
if good and not quiet:
if not bad and not skipped and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if verbose:
print "CAUTION: stdout isn't compared in verbose mode:"
print "a test that passes in verbose mode may fail without it."
if bad:
print count(len(bad), "test"), "failed:"
printlist(bad)
if skipped and not quiet:
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
else:
print "Those skips are all expected on", plat + "."
else:
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
if single:
alltests = findtests(testdir, stdtests, nottests)
for i in range(len(alltests)):
if tests[0] == alltests[i]:
if i == len(alltests) - 1:
os.unlink(filename)
else:
fp = open(filename, 'w')
fp.write(alltests[i+1] + '\n')
fp.close()
break
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_operations',
'test_builtin',
'test_exceptions',
'test_types',
]
NOTTESTS = [
'test_support',
'test_future1',
'test_future2',
'test_future3',
]
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
if not testdir: testdir = findtestdir()
names = os.listdir(testdir)
tests = []
for name in names:
if name[:5] == "test_" and name[-3:] == os.extsep+"py":
modname = name[:-3]
if modname not in stdtests and modname not in nottests:
tests.append(modname)
tests.sort()
return stdtests + tests
def runtest(test, generate, verbose, quiet, testdir=None, huntrleaks=False):
"""Run a single test.
test -- the name of the test
generate -- if true, generate output, instead of running the test
and comparing it to a previously created output file
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
testdir -- test directory
"""
test_support.unload(test)
if not testdir:
testdir = findtestdir()
outputdir = os.path.join(testdir, "output")
outputfile = os.path.join(outputdir, test)
if verbose:
cfp = None
else:
cfp = cStringIO.StringIO()
if huntrleaks:
refrep = open(huntrleaks[2], "a")
try:
save_stdout = sys.stdout
try:
if cfp:
sys.stdout = cfp
print test # Output file starts with test name
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# Most tests run to completion simply as a side-effect of
# being imported. For the benefit of tests that can't run
# that way (like test_threaded_import), explicitly invoke
# their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
# This code *is* hackish and inelegant, yes.
# But it seems to do the job.
import copy_reg
fs = warnings.filters[:]
ps = copy_reg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
import gc
def cleanup():
import _strptime, urlparse, warnings, dircache
from distutils.dir_util import _path_created
_path_created.clear()
warnings.filters[:] = fs
gc.collect()
sre.purge()
_strptime._regex_cache.clear()
urlparse.clear_cache()
copy_reg.dispatch_table.clear()
copy_reg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
dircache.reset()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
reload(the_module)
deltas = []
repcount = huntrleaks[0] + huntrleaks[1]
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, \
("1234567890"*(repcount//10 + 1))[:repcount]
for i in range(repcount):
rc = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
cleanup()
deltas.append(sys.gettotalrefcount() - rc - 2)
print >>sys.stderr
if max(map(abs, deltas[-huntrleaks[1]:])) > 0:
print >>sys.stderr, test, 'leaked', \
deltas[-huntrleaks[1]:], 'references'
print >>refrep, test, 'leaked', \
deltas[-huntrleaks[1]:], 'references'
# The end of the huntrleaks hackishness.
finally:
sys.stdout = save_stdout
except test_support.ResourceDenied, msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return -2
except (ImportError, test_support.TestSkipped), msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return -1
except KeyboardInterrupt:
raise
except test_support.TestFailed, msg:
print "test", test, "failed --", msg
sys.stdout.flush()
return 0
except:
type, value = sys.exc_info()[:2]
print "test", test, "crashed --", str(type) + ":", value
sys.stdout.flush()
if verbose:
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return 0
else:
if not cfp:
return 1
output = cfp.getvalue()
if generate:
if output == test + "\n":
if os.path.exists(outputfile):
# Write it since it already exists (and the contents
# may have changed), but let the user know it isn't
# needed:
print "output file", outputfile, \
"is no longer needed; consider removing it"
else:
# We don't need it, so don't create it.
return 1
fp = open(outputfile, "w")
fp.write(output)
fp.close()
return 1
if os.path.exists(outputfile):
fp = open(outputfile, "r")
expected = fp.read()
fp.close()
else:
expected = test + "\n"
if output == expected or huntrleaks:
return 1
print "test", test, "produced unexpected output:"
sys.stdout.flush()
reportdiff(expected, output)
sys.stdout.flush()
return 0
def reportdiff(expected, output):
import difflib
print "*" * 70
a = expected.splitlines(1)
b = output.splitlines(1)
sm = difflib.SequenceMatcher(a=a, b=b)
tuples = sm.get_opcodes()
def pair(x0, x1):
# x0:x1 are 0-based slice indices; convert to 1-based line indices.
x0 += 1
if x0 >= x1:
return "line " + str(x0)
else:
return "lines %d-%d" % (x0, x1)
for op, a0, a1, b0, b1 in tuples:
if op == 'equal':
pass
elif op == 'delete':
print "***", pair(a0, a1), "of expected output missing:"
for line in a[a0:a1]:
print "-", line,
elif op == 'replace':
print "*** mismatch between", pair(a0, a1), "of expected", \
"output and", pair(b0, b1), "of actual output:"
for line in difflib.ndiff(a[a0:a1], b[b0:b1]):
print line,
elif op == 'insert':
print "***", pair(b0, b1), "of actual output doesn't appear", \
"in expected output after line", str(a1)+":"
for line in b[b0:b1]:
print "+", line,
else:
print "get_opcodes() returned bad tuple?!?!", (op, a0, a1, b0, b1)
print "*" * 70
def findtestdir():
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
return testdir
def removepy(name):
if name.endswith(os.extsep + "py"):
name = name[:-3]
return name
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
print fill(' '.join(map(str, x)), width,
initial_indent=blanks, subsequent_indent=blanks)
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_normalization
# Whether a skip is expected here depends on whether a large test
# input file has been downloaded. test_normalization.skip_expected
# controls that.
# test_socket_ssl
# Controlled by test_socket_ssl.skip_expected. Requires the network
# resource, and a socket module with ssl support.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
# test_codecmaps_*
# Whether a skip is expected here depends on whether a large test
# input file has been downloaded. test_codecmaps_*.skip_expected
# controls that.
_expectations = {
'win32':
"""
test__locale
test_applesingle
test_al
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_gdbm
test_gl
test_grp
test_imgfile
test_ioctl
test_largefile
test_linuxaudiodev
test_mhlib
test_nis
test_openpty
test_ossaudiodev
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sunaudiodev
test_threadsignals
test_timing
""",
'linux2':
"""
test_al
test_applesingle
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_nis
test_ntpath
test_ossaudiodev
test_sunaudiodev
""",
'mac':
"""
test_al
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_bz2
test_cd
test_cl
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_gl
test_grp
test_ioctl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mmap
test_nis
test_ntpath
test_openpty
test_ossaudiodev
test_poll
test_popen
test_popen2
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sunaudiodev
test_sundry
test_tarfile
test_timing
""",
'unixware7':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_sundry
""",
'openunix8':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_sundry
""",
'sco_sv3':
"""
test_al
test_applesingle
test_asynchat
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_fork1
test_gettext
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_queue
test_sax
test_sunaudiodev
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'riscos':
"""
test_al
test_applesingle
test_asynchat
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_crypt
test_dbm
test_dl
test_fcntl
test_fork1
test_gdbm
test_gl
test_grp
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mmap
test_nis
test_ntpath
test_openpty
test_poll
test_popen2
test_pty
test_pwd
test_strop
test_sunaudiodev
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
test_timing
""",
'darwin':
"""
test__locale
test_al
test_bsddb
test_bsddb3
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_ossaudiodev
test_poll
test_sunaudiodev
""",
'sunos5':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_curses
test_dbm
test_gdbm
test_gl
test_gzip
test_imgfile
test_linuxaudiodev
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_gzip
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_zipfile
test_zlib
""",
'atheos':
"""
test_al
test_applesingle
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mhlib
test_mmap
test_nis
test_poll
test_popen2
test_resource
test_sunaudiodev
""",
'cygwin':
"""
test_al
test_applesingle
test_bsddb185
test_bsddb3
test_cd
test_cl
test_curses
test_dbm
test_gl
test_imgfile
test_ioctl
test_largefile
test_linuxaudiodev
test_locale
test_nis
test_ossaudiodev
test_socketserver
test_sunaudiodev
""",
'os2emx':
"""
test_al
test_applesingle
test_audioop
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_curses
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_mhlib
test_mmap
test_nis
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
test_sunaudiodev
""",
'freebsd4':
"""
test_aepack
test_al
test_applesingle
test_bsddb
test_bsddb3
test_cd
test_cl
test_gdbm
test_gl
test_imgfile
test_linuxaudiodev
test_locale
test_macfs
test_macostools
test_nis
test_normalization
test_ossaudiodev
test_pep277
test_plistlib
test_pty
test_scriptpackages
test_socket_ssl
test_socketserver
test_sunaudiodev
test_tcl
test_timeout
test_unicode_file
test_urllibnet
test_winreg
test_winsound
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_normalization
from test import test_socket_ssl
from test import test_timeout
from test import test_codecmaps_cn, test_codecmaps_jp
from test import test_codecmaps_kr, test_codecmaps_tw
from test import test_codecmaps_hk
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
if test_normalization.skip_expected:
self.expected.add('test_normalization')
if test_socket_ssl.skip_expected:
self.expected.add('test_socket_ssl')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
for cc in ('cn', 'jp', 'kr', 'tw', 'hk'):
if eval('test_codecmaps_' + cc).skip_expected:
self.expected.add('test_codecmaps_' + cc)
if sys.maxint == 9223372036854775807L:
self.expected.add('test_rgbimg')
self.expected.add('test_imageop')
if not sys.platform in ("mac", "darwin"):
MAC_ONLY = ["test_macostools", "test_macfs", "test_aepack",
"test_plistlib", "test_scriptpackages"]
for skip in MAC_ONLY:
self.expected.add(skip)
if sys.platform != "win32":
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound"]
for skip in WIN_ONLY:
self.expected.add(skip)
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. This
# prevents relative imports from working, and relative imports will screw
# up the testing framework. E.g. if both test.test_support and
# test_support are imported, they will not contain the same globals, and
# much of the testing framework relies on the globals in the
# test.test_support module.
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = pathlen = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
if len(sys.path) == pathlen:
print 'Could not find %r in sys.path to remove it' % mydir
main()
| gpl-3.0 | -5,719,543,437,383,639,000 | 29.674783 | 79 | 0.545498 | false |
jdilallo/jdilallo-test | examples/dfp/v201311/label_service/create_labels.py | 1 | 1668 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new labels.
To determine which labels exist, run get_all_labels.py. This feature is only
available to DFP premium solution networks."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import uuid
# Import appropriate classes from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v201311')
# Create label objects.
labels = []
for _ in xrange(5):
label = {
'name': 'Label #%s' % uuid.uuid4(),
'isActive': 'true',
'types': ['COMPETITIVE_EXCLUSION']
}
labels.append(label)
# Add Labels.
labels = label_service.createLabels(labels)
# Display results.
for label in labels:
print ('Label with id \'%s\', name \'%s\', and types {%s} was found.'
% (label['id'], label['name'], ','.join(label['types'])))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 | -6,635,565,559,352,071,000 | 28.785714 | 77 | 0.681655 | false |
south-coast-science/scs_mfr | src/scs_mfr/aws_group_setup.py | 1 | 4484 | #!/usr/bin/env python3
"""
Created on 21 Sep 2020
@author: Jade Page ([email protected])
DESCRIPTION
The aws_group_setup utility is designed to automate the creation of AWS Greengrass groups using South
Coast Science's configurations.
The group must already exist and the ML lambdas must be associated with the greengrass account for which the IAM auth
keys are given.
SYNOPSIS
aws_group_setup.py [-s [-m] [-a AWS_GROUP_NAME] [-f]] [-k] [-i INDENT] [-v]
EXAMPLES
./aws_group_setup.py -s -a scs-test-001-group -m
FILES
~/SCS/aws/aws_group_config.json
SEE ALSO
scs_mfr/aws_deployment.py
scs_mfr/aws_identity.py
RESOURCES
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/greengrass.html
"""
import sys
from botocore.exceptions import ClientError, NoCredentialsError
from scs_core.aws.client.access_key import AccessKey
from scs_core.aws.client.client import Client
from scs_core.aws.config.aws import AWS
from scs_core.aws.greengrass.aws_group import AWSGroup
from scs_core.aws.greengrass.aws_group_configuration import AWSGroupConfiguration
from scs_core.aws.greengrass.gg_errors import ProjectMissingError
from scs_core.data.datetime import LocalizedDatetime
from scs_core.data.json import JSONify
from scs_core.sys.logging import Logging
from scs_host.sys.host import Host
from scs_mfr.cmd.cmd_aws_group_setup import CmdAWSGroupSetup
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
key = None
# ----------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdAWSGroupSetup()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
# logging...
Logging.config('aws_group_setup', verbose=cmd.verbose)
logger = Logging.getLogger()
logger.info(cmd)
# ----------------------------------------------------------------------------------------------------------------
# resources...
try:
key = AccessKey.from_stdin() if cmd.stdin else AccessKey.from_user()
except ValueError:
logger.error('invalid key.')
exit(1)
except KeyboardInterrupt:
print(file=sys.stderr)
exit(0)
client = Client.construct('greengrass', key)
# AWSGroupConfigurator...
conf = AWSGroupConfiguration.load(Host)
logger.info(conf)
# ----------------------------------------------------------------------------------------------------------------
# run...
try:
if cmd.set:
if conf and not cmd.force:
user_choice = input("Group configuration already exists. Type Yes to overwrite: ")
if not user_choice.lower() == "yes":
exit(0)
try:
now = LocalizedDatetime.now()
conf = AWSGroupConfiguration(AWS.group_name(), now, ml=cmd.use_ml)
configurator = conf.configurator(client)
configurator.collect_information(Host)
configurator.define_aws_group_resources(Host)
configurator.define_aws_group_functions()
configurator.define_aws_group_subscriptions()
# configurator.define_aws_logger()
configurator.create_aws_group_definition()
conf.save(Host)
print(JSONify.dumps(conf, indent=cmd.indent))
except ClientError as error:
if error.response['Error']['Code'] == 'BadRequestException':
logger.error("Invalid request.")
if error.response['Error']['Code'] == 'InternalServerErrorException':
logger.error("AWS server error.")
except ProjectMissingError:
logger.error("Project configuration not set.")
else:
try:
aws_group_info = AWSGroup(AWS.group_name(), client)
aws_group_info.get_group_info_from_name()
aws_group_info.get_group_arns()
aws_group_info.output_current_info()
print(JSONify.dumps(aws_group_info, indent=cmd.indent))
except KeyError:
logger.error("group may not have been configured.")
except KeyboardInterrupt:
print(file=sys.stderr)
except (EOFError, NoCredentialsError):
logger.error("credentials error.")
| mit | -6,137,328,642,779,929,000 | 29.503401 | 118 | 0.574487 | false |
cshinaver/cctools | umbrella/src/umbrella.py | 1 | 188703 | #!/usr/bin/env cctools_python
# CCTOOLS_PYTHON_VERSION 2.7 2.6
# All the vanilla python package dependencies of Umbrella can be satisfied by Python 2.6.
"""
Umbrella is a tool for specifying and materializing comprehensive execution environments, from the hardware all the way up to software and data. A user simply invokes Umbrella with the desired task, and Umbrella determines the minimum mechanism necessary to run the task, whether it be direct execution, a system container, a local virtual machine, or submission to a cloud or grid environment. We present the overall design of Umbrella and demonstrate its use to precisely execute a high energy physics application and a ray-tracing application across many platforms using a combination of Parrot, Chroot, Docker, VMware, Condor, and Amazon EC2.
Copyright (C) 2003-2004 Douglas Thain and the University of Wisconsin
Copyright (C) 2005- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
Implementation Logics of Different Execution Engines:
If the sandbox type is Parrot, create the mountlist file and set PARROT_MOUNT_FILE; set PATH; set PARROT_LDSO_PATH if a separate OS image is needed; parrotize the user's command into `parrot_run user_cmd`.
If the sandbox type is Docker, transfer the OS image into a Docker image; use volume to mount all the software and data dependencies into a container; set PATH; dockerize the user's command into `docker run user_cmd`. To use Docker, a separate OS image is needed.
If the sandbox type is chroot, create mountpoints for software and data dependencies inside the OS image directory and mount software and data into the OS image, set PATH, chrootize the user's command into `chroot user_cmd`.
Implementation Logic of Dependency Sources:
HTTP/HTTPS: Download the dependency into Umbrella local cache.
CVMFS: check whether the mountpoint already exists on the execution node, if yes, do not need to set mountpoint for this dependency and directly process the next dependency; if no, parrot will be used to deliver cvmfs for the application.
If Parrot is needed to access cvmfs, and the sandbox type is Parrot,
Do all the work mentioned above for Parrot execution engine + add SITEINFO into mountlist file.
If Parrot is needed to access cvmfs, and the sandbox type is Docker,
Do all the work mentioned above for Docker execution engine + add SITEINFO into mountlist file + parrotize the user's command. First parrotize the user's command, then dockerize the user's command.
If Parrot is needed to access cvmfs, and the sandbox type is chroot,
Do all the work mentioned above for chroot execution engine + add SITEINFO into mountlist file + parrotize the user's command. First parrotize the user's command, then chrootize the user's command.
ROOT: If the user expects the root file to be access at runtime without downloading. Umbrella does nothing if a ROOT file through ROOT protocol is needed, because ROOT supports data access during runtime without downloading first. Inside the umbrella specification file, the user only needs to specify the mount_env attribute.
If the user expects the root file to be downloaded first, then the user needs to specify both the mount_env and mountpoint attributes inside the umbrella specification.
Git: If the user's application needs git to do `git clone <repo_url>; git checkout <branch_name/commit_id>`, then the user does not need to specify mountpoint attribute inside the umbrella specification.
If the user's application does not explicitly require git, but umbrella tries to pull some dependencies from a remote git repository, then the user needs to specify both the mount_env and mountpoint attributes inside the umbrella specification.
mount_env and mountpoint:
If only mountpoint is set to A in a specification, the dependency will be downloaded into the umbrella local cache with the file path of D, and a new mountpoint will be added into mount_dict (mount_dict[A] = D).
If only mount_env is set to B in a specification, the dependency will not be downloaded, meta_search will be executed to get one remote storage location, C, of the dependency, a new environment variable will be set (env_para_dict[B] = C).
If mountpoint is set to A and mount_env is set to B in a specification, the dependency will be downloaded into the umbrella local cache with the file path of D, and a new mountpoint will be added into mount_dict (mount_dict[A] = D) and a new environment variable will also be set (env_para_dict[B] = A).
Local path inside the umbrella local cache:
Case 1: the dependency is delivered as a git repository through http/https/git protocol.
dest = os.path.dirname(sandbox_dir) + "/cache/" + git_commit + '/' + repo_name
Note: git_commit is optional in the metadata database. If git_commit is not specified in the metadata database, then:
dest = os.path.dirname(sandbox_dir) + "/cache/" + repo_name
Case 2: the dependency is delivered not as a git repository through http/https protocol.
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
Note: checksum is required to be specified in the metadata database. If it is not specified, umbrella will complain and exit.
Case 3: SITECONF info necessary for CVMFS cms repository access through Parrot. For this case, we use a hard-coded path.
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/SITECONF"
"""
import sys
from stat import *
from pprint import pprint
import subprocess
import platform
import re
import tarfile
import StringIO
from optparse import OptionParser
import os
import hashlib
import difflib
import sqlite3
import shutil
import datetime
import time
import getpass
import grp
import logging
import multiprocessing
import resource
import tempfile
import urllib
import gzip
import imp
found_requests = None
try:
imp.find_module('requests')
found_requests = True
import requests
import requests.packages.urllib3
except ImportError:
found_requests = False
found_boto3 = None
try:
imp.find_module('boto3')
found_boto3 = True
import boto3
except ImportError:
found_boto3 = False
found_botocore = None
try:
imp.find_module('botocore')
found_botocore = True
import botocore
except ImportError:
found_botocore = False
s3_url = 'https://s3.amazonaws.com'
if sys.version_info >= (3,):
import urllib.request as urllib2
import urllib.parse as urlparse
else:
import urllib2
import urlparse
if sys.version_info > (2,6,):
import json
else:
import simplejson as json #json module is introduce in python 2.4.3
#Replace the version of cctools inside umbrella is easy: set cctools_binary_version.
cctools_binary_version = "5.2.0"
cctools_dest = ""
#set cms_siteconf_url to be the url for the siteconf your application depends
#the url and format settings here should be consistent with the function set_cvmfs_cms_siteconf
cms_siteconf_url = "http://ccl.cse.nd.edu/research/data/hep-case-study/2efd5cbb3424fe6b4a74294c84d0fb43/SITECONF.tar.gz"
cms_siteconf_format = "tgz"
tempfile_list = [] #a list of temporary file created by umbrella and need to be removed before umbrella ends.
tempdir_list = [] #a list of temporary dir created by umbrella and need to be removed before umbrella ends.
pac_manager = {
"yum": ("-y install", "info")
}
"""
ec2 metadata
the instance types provided by ec2 are undergoing changes as time goes by.
"""
ec2_json = {
"redhat-6.5-x86_64": {
"ami-2cf8901c": {
"ami": "ami-2cf8901c",
"root_device_type": "ebs",
"virtualization_type": "paravirtual",
"user": "ec2-user"
},
"ami-0b5f073b": {
"ami": "ami-0b5f073b",
"root_device_type": "ebs",
"virtualization_type": "paravirtual",
"user": "ec2-user"
}
},
"centos-6.6-x86_64": {
"ami-0b06483b": {
"ami": "ami-0b06483b",
"root_device_type": "ebs",
"virtualization_type": "paravirtual",
"user": "root"
}
},
"redhat-5.10-x86_64": {
"ami-d76a29e7": {
"ami": "ami-d76a29e7",
"root_device_type": "ebs",
"virtualization_type": "hvm",
"user": "root"
}
}
}
upload_count = 0
def subprocess_error(cmd, rc, stdout, stderr):
"""Print the command, return code, stdout, and stderr; and then directly exit.
Args:
cmd: the executed command.
rc: the return code.
stdout: the standard output of the command.
stderr: standard error of the command.
Returns:
directly exit the program.
"""
cleanup(tempfile_list, tempdir_list)
sys.exit("`%s` fails with the return code of %d, \nstdout: %s, \nstderr: %s\n" % (cmd, rc, stdout, stderr))
def func_call(cmd):
""" Execute a command and return the return code, stdout, stderr.
Args:
cmd: the command needs to execute using the subprocess module.
Returns:
a tuple including the return code, stdout, stderr.
"""
logging.debug("Start to execute command: %s", cmd)
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
return (rc, stdout, stderr)
def func_call_withenv(cmd, env_dict):
""" Execute a command with a special setting of the environment variables and return the return code, stdout, stderr.
Args:
cmd: the command needs to execute using the subprocess module.
env_dict: the environment setting.
Returns:
a tuple including the return code, stdout, stderr.
"""
logging.debug("Start to execute command: %s", cmd)
logging.debug("The environment variables for executing the command is:")
logging.debug(env_dict)
p = subprocess.Popen(cmd, env = env_dict, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
return (rc, stdout, stderr)
def which_exec(name):
"""The implementation of shell which command
Args:
name: the name of the executable to be found.
Returns:
If the executable is found, returns its fullpath.
If PATH is not set, directly exit.
Otherwise, returns None.
"""
if not os.environ.has_key("PATH"):
cleanup(tempfile_list, tempdir_list)
logging.critical("The environment variable PATH is not set!")
sys.exit("The environment variable PATH is not set!")
for path in os.environ["PATH"].split(":"):
fullpath = path + '/' + name
if os.path.exists(fullpath) and os.path.isfile(fullpath):
return fullpath
return None
def md5_cal(filename, block_size=2**20):
"""Calculate the md5sum of a file
Args:
filename: the name of the file
block_size: the size of each block
Returns:
If the calculation fails for any reason, directly exit.
Otherwise, return the md5 value of the content of the file
"""
try:
with open(filename, 'rb') as f:
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
except Exception as e:
cleanup(tempfile_list, tempdir_list)
logging.critical("Computing the checksum of %s fails: %s.", filename, e)
sys.exit("md5_cal(" + filename + ") failed.\n" + e)
def url_download(url, dest):
""" Download url into dest
Args:
url: the url needed to be downloaded.
dest: the path where the content from the url should be put.
Returns:
If the url is downloaded successfully, return None;
Otherwise, directly exit.
"""
logging.debug("Start to download %s to %s ...." % (url, dest))
urllib.urlretrieve(url, dest)
def dependency_download(name, url, checksum, checksum_tool, dest, format_remote_storage, action):
"""Download a dependency from the url and verify its integrity.
Args:
name: the file name of the dependency. If its format is plain text, then filename is the same with the archived name. If its format is tgz, the filename should be the archived name with the trailing .tgz/.tar.gz removed.
url: the storage location of the dependency.
checksum: the checksum of the dependency.
checksum_tool: the tool used to calculate the checksum, such as md5sum.
dest: the destination of the dependency where the downloaded dependency will be put.
format_remote_storage: the file format of the dependency, such as .tgz.
action: the action on the downloaded dependency. Options: none, unpack. "none" leaves the downloaded dependency at it is. "unpack" uncompresses the dependency.
Returns:
If the url is a broken link or the integrity of the downloaded data is bad, directly exit.
Otherwise, return None.
"""
print "Download software from %s into the umbrella local cache (%s)" % (url, dest)
logging.debug("Download software from %s into the umbrella local cache (%s)", url, dest)
dest_dir = os.path.dirname(dest)
dest_uncompress = dest #dest_uncompress is the path of the uncompressed-version dependency
if format_remote_storage == "plain":
filename = name
elif format_remote_storage == "tgz":
filename = "%s.tar.gz" % name
dest = os.path.join(dest_dir, filename) #dest is the path of the compressed-version dependency
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(dest):
#download the dependency from the url
#this method currently will fail when the data size is larger than the memory size, use subprocess + wget can solve it
url_download(url, dest)
#if it exists, the uncompressed-version directory will be deleted first
if action == "unpack" and format_remote_storage != 'plain' and os.path.exists(dest_uncompress):
shutil.rmtree(dest_uncompress)
logging.debug("the uncompressed-version directory exists already, first delete it")
#calculate the checkusm of the compressed-version dependency
if checksum_tool == "md5sum":
local_checksum = md5_cal(dest)
logging.debug("The checksum of %s is: %s", dest, local_checksum)
if not local_checksum == checksum:
cleanup(tempfile_list, tempdir_list)
logging.critical("The version of %s is incorrect! Please first delete it and its unpacked directory!!", dest)
sys.exit("the version of " + dest + " is incorrect! Please first delete it and its unpacked directory!!\n")
elif not checksum_tool:
logging.debug("the checksum of %s is not provided!", url)
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s is not supported currently!", checksum_tool)
sys.exit(checksum_tool + "is not supported currently!")
#if the uncompressed-version dependency does not exist, uncompress the dependency
if action == "unpack" and (not os.path.exists(dest_uncompress)) and format_remote_storage == "tgz":
logging.debug("Uncompressing %s into %s ....", dest, dest_uncompress)
tfile = tarfile.open(dest, "r:gz")
tfile.extractall(dest_dir)
def extract_tar(src, dest, form):
"""Extract a tgz file from src to dest
Args:
src: the location of a tgz file
dest: the location where the uncompressed data will be put
form: the format the tarball. Such as: tar, tgz
Returns:
None
"""
if form == "tar":
tfile = tarfile.open(src, "r")
elif form == "tgz":
tfile = tarfile.open(src, "r:gz")
tfile.extractall(dest)
def meta_search(meta_json, name, id=None):
"""Search the metadata information of an dependency in the meta_json
First find all the items with the required name in meta_json.
Then find the right one whose id satisfied the requirement.
If no id parameter is problem, then the first matched one will be returned.
Args:
meta_json: the json object including all the metadata of dependencies.
name: the name of the dependency.
id: the id attribute of the dependency. Defaults to None.
Returns:
If one item is found in meta_json, return the item, which is a dictionary.
If no item satisfied the requirement on meta_json, directly exit.
"""
if meta_json.has_key(name):
if not id:
for item in meta_json[name]:
return meta_json[name][item]
else:
if meta_json[name].has_key(id):
return meta_json[name][id]
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("meta_json does not has <%s> with the id <%s>", name, id)
sys.exit("meta_json does not has <%s> with the id <%s>" % (name, id))
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("meta_json does not include %s", name)
sys.exit("meta_json does not include %s\n" % name)
def attr_check(name, item, attr, check_len = 0):
"""Check and obtain the attr of an item.
Args:
name: the name of the dependency.
item: an item from the metadata database
attr: an attribute
check_len: if set to 1, also check whether the length of the attr is > 0; if set to 0, ignore the length checking.
Returns:
If the attribute check is successful, directly return the attribute.
Otherwise, directly exit.
"""
logging.debug("check the %s attr of the following item:", attr)
logging.debug(item)
if item.has_key(attr):
if check_len == 1:
if len(item[attr]) <= 0:
cleanup(tempfile_list, tempdir_list)
logging.debug("The %s attr of the item is empty.", attr)
sys.exit("The %s attr of the item (%s) is empty." % (item, attr))
#when multiple options are available, currently the first one will be picked.
#we can add filter here to control the choice.
if attr == 'source':
return source_filter(item[attr], ['osf', 's3'], name)
else:
return item[attr][0]
else:
return item[attr]
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("This item doesn not have %s attr!", attr)
sys.exit("the item (%s) does not have %s attr!" % (item, attr))
def source_filter(sources, filters, name):
"""Filter the download urls of a dependency.
The reason why this filtering process is necessary is: some urls are not
accessible by the current umbrella runtime. For example, if some urls points to
OSF, but the execution node has no requests python package installed. In this
case, all the download urls pointing to OSF are ignored.
Args:
sources: a list of download urls
filters: a list of protocols which are not supported by the current umbrella runtime.
name: the name of the dependency.
Returns:
If all the download urls are not available, exit directly.
Otherwise, return the first available url.
"""
l = []
for s in sources:
filtered = 0
for item in filters:
if s[:len(item)] == item:
filtered = 1
break
if not filtered:
l.append(s)
if len(l) == 0:
return sources[0]
else:
return l[0]
def cctools_download(sandbox_dir, hardware_platform, linux_distro, action):
"""Download cctools
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
linux_distro: the linux distro. For Example: redhat6, centos6.
action: the action on the downloaded dependency. Options: none, unpack. "none" leaves the downloaded dependency at it is. "unpack" uncompresses the dependency.
Returns:
the path of the downloaded cctools in the umbrella local cache. For example: /tmp/umbrella_test/cache/d19376d92daa129ff736f75247b79ec8/cctools-4.9.0-redhat6-x86_64
"""
name = "cctools-%s-%s-%s" % (cctools_binary_version, hardware_platform, linux_distro)
source = "http://ccl.cse.nd.edu/software/files/%s.tar.gz" % name
global cctools_dest
cctools_dest = os.path.dirname(sandbox_dir) + "/cache/" + name
dependency_download(name, source, None, None, cctools_dest, "tgz", "unpack")
return cctools_dest
def set_cvmfs_cms_siteconf(sandbox_dir):
"""Download cvmfs SITEINFO and set its mountpoint.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
Returns:
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
"""
dest = os.path.dirname(sandbox_dir) + "/cache/cms_siteconf/SITECONF"
dependency_download("SITECONF.tar.gz", cms_siteconf_url, "", "", dest, cms_siteconf_format, "unpack")
cvmfs_cms_siteconf_mountpoint = '/cvmfs/cms.cern.ch/SITECONF/local %s/local' % dest
return cvmfs_cms_siteconf_mountpoint
def is_dir(path):
"""Judge whether a path is directory or not.
If the path is a dir, directly return. Otherwise, exit directly.
Args:
path: a path
Returns:
None
"""
if os.path.isdir(path):
pass
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("%s is not a directory!", path)
sys.exit("%s is not a directory!" % path)
def git_dependency_download(repo_url, dest, git_branch, git_commit):
"""Prepare a dependency from a git repository.
First check whether dest exist or not: if dest exists, then checkout to git_branch and git_commit;
otherwise, git clone url, and then checkout to git_branch and git_commit.
Args:
repo_url: the url of the remote git repository
dest: the local directory where the git repository will be cloned into
git_branch: the branch name of the git repository
git_commit: the commit id of the repository
Returns:
dest: the local directory where the git repository is
"""
dest = remove_trailing_slashes(dest)
scheme, netloc, path, query, fragment = urlparse.urlsplit(repo_url)
repo_name = os.path.basename(path)
if repo_name[-4:] == '.git':
repo_name = repo_name[:-4]
dest = dest + '/' + repo_name
if os.path.exists(dest):
is_dir(dest)
else:
dir = os.path.dirname(dest)
if os.path.exists(dir):
is_dir(dir)
else:
os.makedirs(dir)
os.chdir(dir)
if dependency_check('git') == -1:
cleanup(tempfile_list, tempdir_list)
sys.exit("Git is not found!")
cmd = "git clone %s" % repo_url
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
os.chdir(dest)
if git_branch:
cmd = "git checkout %s" % git_branch
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
if git_commit:
cmd = "git checkout %s" % git_commit
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
return dest
def git_dependency_parser(item, repo_url, sandbox_dir):
"""Parse a git dependency
Args:
item: an item from the metadata database
repo_url: the url of the remote git repository
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
Returns:
dest: the path of the downloaded data dependency in the umbrella local cache.
"""
logging.debug("This dependency is stored as a git repository: ")
logging.debug(item)
git_branch = ''
if item.has_key("branch"):
git_branch = item["branch"]
git_commit = ''
if item.has_key("commit"):
git_commit = item["commit"]
dest = os.path.dirname(sandbox_dir) + "/cache/" + git_commit
dest = git_dependency_download(repo_url, dest, git_branch, git_commit)
return dest
def data_dependency_process(name, id, meta_json, sandbox_dir, action, osf_auth):
"""Download a data dependency
Args:
name: the item name in the data section
id: the id attribute of the processed dependency
meta_json: the json object including all the metadata of dependencies.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
action: the action on the downloaded dependency. Options: none, unpack. "none" leaves the downloaded dependency at it is. "unpack" uncompresses the dependency.
osf_auth: the osf authentication info including osf_username and osf_password.
Returns:
dest: the path of the downloaded data dependency in the umbrella local cache.
"""
item = meta_search(meta_json, name, id)
source = attr_check(name, item, "source", 1)
if source[:4] == 'git+':
dest = git_dependency_parser(item, source[4:], sandbox_dir)
elif source[:4] == 'osf+':
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
try:
logging.debug("Trying to download %s as a normal url ,,,", source)
dependency_download(name, source[4:], checksum, "md5sum", dest, form, action)
except:
logging.debug("Fails to download %s as a normal url ,,,", source)
if len(osf_auth) < 2:
cleanup(tempfile_list, tempdir_list)
logging.debug("Please use --osf_user and --osf_pass to specify your osf authentication info!")
sys.exit("Please use --osf_user and --osf_pass to specify your osf authentication info!")
if form == "tgz":
osf_download(osf_auth[0], osf_auth[1], source[4:], dest + ".tar.gz")
else:
osf_download(osf_auth[0], osf_auth[1], source[4:], dest)
dependency_download(name, dest, checksum, "md5sum", dest, form, action)
elif source[:3] == "s3+":
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
try:
logging.debug("Trying to download %s as a normal url ,,,", source)
dependency_download(name, source[3:], checksum, "md5sum", dest, form, action)
except:
logging.debug("Fails to download %s as a normal url ,,,", source)
if form == "tgz":
s3_download(source[3:], dest + ".tar.gz")
else:
s3_download(source[3:], dest)
dependency_download(name, dest, checksum, "md5sum", dest, form, action)
else:
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
dependency_download(name, source, checksum, "md5sum", dest, form, action)
return dest
def check_cvmfs_repo(repo_name):
""" Check whether a cvmfs repo is installed on the host or not
Args:
repo_name: a cvmfs repo name. For example: "/cvmfs/cms.cern.ch".
Returns:
If the cvmfs repo is installed, returns the string including the mountpoint of cvmfs cms repo. For example: "/cvmfs/cms.cern.ch".
Otherwise, return an empty string.
"""
logging.debug("Check whether a cvmfs repo is installed on the host or not")
cmd = "df -h|grep '^cvmfs'|grep "+ "'" + repo_name + "'" + "|rev| cut -d' ' -f1|rev"
rc, stdout, stderr = func_call(cmd)
if rc == 0:
return stdout
else:
return ''
def dependency_process(name, id, action, meta_json, sandbox_dir, osf_auth):
""" Process each explicit and implicit dependency.
Args:
name: the item name in the software section
id: the id attribute of the processed dependency
action: the action on the downloaded dependency. Options: none, unpack. "none" leaves the downloaded dependency at it is. "unpack" uncompresses the dependency.
meta_json: the json object including all the metadata of dependencies.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
osf_auth: the osf authentication info including osf_username and osf_password.
Returns:
mount_value: the actual storage path of one dependency.
"""
mount_value = ''
item = meta_search(meta_json, name, id)
source = attr_check(name, item, "source", 1)
logging.debug("%s is chosen to deliver %s", source, name)
if source[:4] == "git+":
dest = git_dependency_parser(item, source[4:], sandbox_dir)
mount_value = dest
cleanup(tempfile_list, tempdir_list)
sys.exit("this is git source, can not support")
elif source[:4] == "osf+":
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
#first download it as a normal url
try:
logging.debug("Trying to download %s as a normal url ,,,", source)
dependency_download(name, source[4:], checksum, "md5sum", dest, form, action)
except:
logging.debug("Fails to download %s as a normal url ,,,", source)
if len(osf_auth) < 2:
cleanup(tempfile_list, tempdir_list)
logging.debug("Please use --osf_user and --osf_pass to specify your osf authentication info!")
sys.exit("Please use --osf_user and --osf_pass to specify your osf authentication info!")
if form == "tgz":
osf_download(osf_auth[0], osf_auth[1], source[4:], dest + ".tar.gz")
else:
osf_download(osf_auth[0], osf_auth[1], source[4:], dest)
dependency_download(name, dest, checksum, "md5sum", dest, form, action)
mount_value = dest
elif source[:3] == "s3+":
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
try:
logging.debug("Trying to download %s as a normal url ,,,", source)
dependency_download(name, source[3:], checksum, "md5sum", dest, form, action)
except:
logging.debug("Fails to download %s as a normal url ,,,", source)
if form == "tgz":
s3_download(source[3:], dest + ".tar.gz")
else:
s3_download(source[3:], dest)
dependency_download(name, dest, checksum, "md5sum", dest, form, action)
mount_value = dest
elif source[:5] == "cvmfs":
pass
else:
checksum = attr_check(name, item, "checksum")
form = attr_check(name, item, "format")
dest = os.path.dirname(sandbox_dir) + "/cache/" + checksum + "/" + name
dependency_download(name, source, checksum, "md5sum", dest, form, action)
mount_value = dest
return mount_value
def env_parameter_init(hardware_spec, kernel_spec, os_spec):
""" Set the environment parameters according to the specification file.
Args:
hardware_spec: the hardware section in the specification for the user's task.
kernel_spec: the kernel section in the specification for the user's task.
os_spec: the os section in the specification for the user's task.
Returns:
a tuple including the requirements for hardware, kernel and os.
"""
hardware_platform = attr_check("hardware", hardware_spec, "arch").lower()
cpu_cores = 1
if hardware_spec.has_key("cores"):
cpu_cores = hardware_spec["cores"].lower()
memory_size = "1GB"
if hardware_spec.has_key("memory"):
memory_size = hardware_spec["memory"].lower()
disk_size = "1GB"
if hardware_spec.has_key("disk"):
disk_size = hardware_spec["disk"].lower()
kernel_name = attr_check("kernel", kernel_spec, "name").lower()
kernel_version = attr_check("kernel", kernel_spec, "version").lower()
kernel_version = re.sub('\s+', '', kernel_version).strip()
distro_name = attr_check("os", os_spec, "name").lower()
distro_version = attr_check("os", os_spec, "version").lower()
os_id = ''
if os_spec.has_key("id"):
os_id = os_spec["id"]
index = distro_version.find('.')
linux_distro = distro_name + distro_version[:index] #example of linux_distro: redhat6
return (hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id)
def compare_versions(v1, v2):
""" Compare two versions, the format of version is: X.X.X
Args:
v1: a version.
v2: a version.
Returns:
0 if v1 == v2; 1 if v1 is newer than v2; -1 if v1 is older than v2.
"""
list1 = v1.split('.')
list2 = v2.split('.')
for i in range(len(list1)):
list1[i] = int(list1[i])
for i in range(len(list2)):
list2[i] = int(list2[i])
if list1[0] == list2[0]:
if list1[1] == list2[1]:
if list1[2] == list2[2]:
return 0
elif list1[2] > list2[2]:
return 1
else:
return -1
elif list1[1] > list2[1]:
return 1
else:
return -1
elif list1[0] > list2[0]:
return 1
else:
return -1
def verify_kernel(host_kernel_name, host_kernel_version, kernel_name, kernel_version):
""" Check whether the kernel version of the host machine matches the requirement.
The kernel_version format supported for now includes: >=2.6.18; [2.6.18, 2.6.32].
Args:
host_kernel_name: the name of the OS kernel of the host machine.
host_kernel_version: the version of the kernel of the host machine.
kernel_name: the name of the required OS kernel (e.g., linux). Not case sensitive.
kernel_version: the version of the required kernel (e.g., 2.6.18).
Returns:
If the kernel version of the host machine matches the requirement, return None.
If the kernel version of the host machine does not match the requirement, directly exit.
"""
if host_kernel_name != kernel_name:
cleanup(tempfile_list, tempdir_list)
logging.critical("The required kernel name is %s, the kernel name of the host machine is %s!", kernel_name, host_kernel_name)
sys.exit("The required kernel name is %s, the kernel name of the host machine is %s!\n" % (kernel_name, host_kernel_name))
if kernel_version[0] == '[':
list1 = kernel_version[1:-1].split(',')
if compare_versions(host_kernel_version, list1[0]) >= 0 and compare_versions(host_kernel_version, list1[1]) <= 0:
logging.debug("The kernel version matches!")
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("The required kernel version is %s, the kernel version of the host machine is %s!", kernel_version, host_kernel_version)
sys.exit("The required kernel version is %s, the kernel version of the host machine is %s!\n" % (kernel_version, host_kernel_version))
elif kernel_version[0] == '>':
if compare_versions(host_kernel_version, kernel_version[2:]) >= 0:
logging.debug("The kernel version matches!")
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("The required kernel version is %s, the kernel version of the host machine is %s!", kernel_version, host_kernel_version)
sys.exit("The required kernel version is %s, the kernel version of the host machine is %s!\n" % (kernel_version, host_kernel_version))
elif kernel_version[0] == '<':
if compare_versions(host_kernel_version, kernel_version[2:]) <= 0:
logging.debug("The kernel version matches!")
else:
cleanup(tempfile_list, tempdir_list)
logging.debug("The required kernel version is %s, the kernel version of the host machine is %s!", kernel_version, host_kernel_version)
sys.exit("The required kernel version is %s, the kernel version of the host machine is %s!\n" % (kernel_version, host_kernel_version))
else: #the kernel version is a single value
if compare_versions(host_kernel_version, kernel_version[2:]) == 0:
logging.debug("The kernel version matches!")
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("The required kernel version is %s, the kernel version of the host machine is %s!", kernel_version, host_kernel_version)
sys.exit("The required kernel version is %s, the kernel version of the host machine is %s!\n" % (kernel_version, host_kernel_version))
def env_check(sandbox_dir, sandbox_mode, hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version):
""" Check the matching degree between the specification requirement and the host machine.
Currently check the following item: sandbox_mode, hardware platform, kernel, OS, disk, memory, cpu cores.
Other things needed to check: software, and data??
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
sandbox_mode: the execution engine.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
cpu_cores: the number of required cpus (e.g., 1).
memory_size: the memory size requirement (e.g., 2GB). Not case sensitive.
disk_size: the disk size requirement (e.g., 2GB). Not case sensitive.
kernel_name: the name of the required OS kernel (e.g., linux). Not case sensitive.
kernel_version: the version of the required kernel (e.g., 2.6.18).
Returns:
host_linux_distro: the linux distro of the host machine. For Example: redhat6, centos6.
"""
print "Execution environment checking ..."
if sandbox_mode not in ["docker", "destructive", "parrot"]:
cleanup(tempfile_list, tempdir_list)
logging.critical("Currently local execution engine only support three sandbox techniques: docker, chroot or parrot!")
sys.exit("Currently local execution engine only support three sandbox techniques: docker, chroot or parrot!\n")
uname_list = platform.uname() #format of uname_list: (system,node,release,version,machine,processor)
logging.debug("Hardware platform checking ...")
if hardware_platform != uname_list[4].lower():
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification requires %s, but the local machine is %s", hardware_platform, uname_list[4].lower())
sys.exit("The specification requires " + hardware_platform + ", but the local machine is " + uname_list[4].lower() + "!\n")
logging.debug("CPU cores checking ...")
cpu_cores = int(cpu_cores)
host_cpu_cores = multiprocessing.cpu_count()
if cpu_cores > host_cpu_cores:
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification requires %d cpu cores, but the local machine only has %d cores!", cpu_cores, host_cpu_cores)
sys.exit("The specification requires %d cpu cores, but the local machine only has %d cores!\n" % (cpu_cores, host_cpu_cores))
logging.debug("Memory size checking ...")
memory_size = re.sub('\s+', '', memory_size).strip()
memory_size = float(memory_size[:-2])
cmd = "free -tg|grep Total|sed 's/\s\+/ /g'|cut -d' ' -f2"
rc, stdout, stderr = func_call(cmd)
if rc != 0:
logging.critical("The return code is %d, memory check fail!", rc)
else:
host_memory_size = float(stdout)
if memory_size > host_memory_size:
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification requires %.2f GB memory space, but the local machine only has %.2f GB free memory space!", memory_size, host_memory_size)
sys.exit("The specification requires %.2f GB memory space, but the local machine only has %.2f GB free memory space!" % (memory_size, host_memory_size))
logging.debug("Disk space checking ...")
disk_size = re.sub('\s+', '', disk_size).strip()
disk_size = float(disk_size[:-2])
st = os.statvfs(sandbox_dir)
free_disk = float(st.f_bavail * st.f_frsize) / (1024*1024*1024)
if disk_size > free_disk:
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification requires %.2f GB disk space, but the local machine only has %.2f GB free disk space!", disk_size, free_disk)
sys.exit("The specification requires %.2f GB disk space, but the local machine only has %.2f GB free disk space!" % (disk_size, free_disk))
#check kernel
logging.debug("Kernel checking ...")
host_kernel_name = uname_list[0].lower()
index = uname_list[2].find('-')
host_kernel_version = uname_list[2][:index]
verify_kernel(host_kernel_name, host_kernel_version, kernel_name, kernel_version)
dist_list = platform.dist()
logging.debug("The hardware information of the local machine:")
logging.debug(dist_list)
#set host_linux_distro. Examples: redhat6, centos6.
#potential problem: maybe in the future, we need a finer control about the host_linux_distro, like redhat6.5, centos6.5.
arch_index = uname_list[2].find('ARCH')
host_linux_distro = None
if arch_index != -1:
host_linux_distro = 'arch'
else:
redhat_index = uname_list[2].find('el')
centos_index = uname_list[2].find('centos')
if redhat_index != -1:
dist_version = uname_list[2][redhat_index + 2]
if centos_index != -1 or dist_list[0].lower() == 'centos':
host_linux_distro = 'centos' + dist_version
else:
host_linux_distro = 'redhat' + dist_version
logging.debug("The OS distribution information of the local machine: %s", host_linux_distro)
return host_linux_distro
def parrotize_user_cmd(user_cmd, sandbox_dir, cwd_setting, linux_distro, hardware_platform, meta_json, cvmfs_http_proxy):
"""Modify the user's command into `parrot_run + the user's command`.
The cases when this function should be called: (1) sandbox_mode == parrot; (2) sandbox_mode != parrot and cvmfs is needed to deliver some dependencies not installed on the execution node.
Args:
user_cmd: the user's command.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
cwd_setting: the current working directory for the execution of the user's command.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
linux_distro: the linux distro. For Example: redhat6, centos6.
meta_json: the json object including all the metadata of dependencies.
cvmfs_http_proxy: HTTP_PROXY environmetn variable used to access CVMFS by Parrot
Returns:
None
"""
#Here we use the cctools meta from the local cache (which includes all the meta including cvmfs, globus, fuse and so on). Even if the user may install cctools by himself on the machine, the configuration of the local installation may be not what we want. For example, the user may just configure like this `./configure --prefix ~/cctools`.
#4.4 and 4.4 does not support --no-set-foreground feature.
#user_cmd[0] = dest + "/bin/parrot_run --no-set-foreground /bin/sh -c 'cd " + cwd_setting + "; " + user_cmd[0] + "'"
if cvmfs_http_proxy:
user_cmd[0] = "export HTTP_PROXY=" + cvmfs_http_proxy + "; " + cctools_dest + "/bin/parrot_run --no-set-foreground /bin/sh -c 'cd " + cwd_setting + "; " + user_cmd[0] + "'"
else:
user_cmd[0] = cctools_dest + "/bin/parrot_run --no-set-foreground /bin/sh -c 'cd " + cwd_setting + "; " + user_cmd[0] + "'"
logging.debug("The parrotized user_cmd: %s" % user_cmd[0])
def chrootize_user_cmd(user_cmd, cwd_setting):
"""Modify the user's command when the sandbox_mode is chroot. This check should be done after `parrotize_user_cmd`.
The cases when this function should be called: sandbox_mode == chroot
Args:
user_cmd: the user's command.
cwd_setting: the current working directory for the execution of the user's command.
Returns:
the modified version of the user's cmd.
"""
#By default, the directory of entering chroot is /. So before executing the user's command, first change the directory to the $PWD environment variable.
user_cmd[0] = 'chroot / /bin/sh -c "cd %s; %s"' %(cwd_setting, user_cmd[0])
return user_cmd
def software_install(mount_dict, env_para_dict, software_spec, meta_json, sandbox_dir, pac_install_destructive, osf_auth, name=None):
""" Installation each software dependency specified in the software section of the specification.
Args:
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
software_spec: the software section of the specification
meta_json: the json object including all the metadata of dependencies.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
pac_install_destructive: whether this is to install packages through package manager in destructive mode
osf_auth: the osf authentication info including osf_username and osf_password.
name: if name is specified, then only the specified item will be installed. All the other items in the software section will be ignored.
Returns:
None.
"""
print "Installing software dependencies ..."
for item in software_spec:
if name and name != item:
continue
# always first check whether the attribute is set or not inside the umbrella specificiation file.
id = ''
if software_spec[item].has_key('id'):
id = software_spec[item]['id']
mountpoint = ''
if software_spec[item].has_key('mountpoint'):
mountpoint = software_spec[item]['mountpoint']
mount_env = ''
if software_spec[item].has_key('mount_env'):
mount_env = software_spec[item]['mount_env']
action = 'unpack'
if software_spec[item].has_key('action'):
action = software_spec[item]['action'].lower()
if mount_env and not mountpoint:
result = meta_search(meta_json, item, id)
env_para_dict[mount_env] =attr_check(item, result, "source", 1)
else:
if mount_env and mountpoint:
env_para_dict[mount_env] = mountpoint
mount_value = dependency_process(item, id, action, meta_json, sandbox_dir, osf_auth)
if len(mount_value) > 0:
logging.debug("Add mountpoint (%s:%s) into mount_dict", mountpoint, mount_value)
if pac_install_destructive:
parent_dir = os.path.dirname(mountpoint)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
elif not os.path.isdir(parent_dir):
cleanup(tempfile_list, tempdir_list)
logging.critical("%s is not a directory!\n", parent_dir)
sys.exit("%s is not a directory!\n" % parent_dir)
if not os.path.exists(mountpoint):
cmd = "mv -f %s %s/" % (mount_value, parent_dir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
else:
mount_dict[mountpoint] = mount_value
def data_install(data_spec, meta_json, sandbox_dir, mount_dict, env_para_dict, osf_auth, name=None):
"""Process data section of the specification.
At the beginning of the function, mount_dict only includes items for software and os dependencies. After this function is done, all the items for data dependencies will be added into mount_dict.
Args:
data_spec: the data section of the specification.
meta_json: the json object including all the metadata of dependencies.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
osf_auth: the osf authentication info including osf_username and osf_password.
name: if name is specified, then only the specified item will be installed. All the other items in the software section will be ignored.
Returns:
None
"""
print "Installing data dependencies ..."
for item in data_spec:
if name and name != item:
continue
id = ''
if data_spec[item].has_key('id'):
id = data_spec[item]['id']
mountpoint = ''
if data_spec[item].has_key('mountpoint'):
mountpoint = data_spec[item]['mountpoint']
mount_env = ''
if data_spec[item].has_key('mount_env'):
mount_env = data_spec[item]['mount_env']
action = 'unpack'
if data_spec[item].has_key('action'):
action = data_spec[item]['action']
if mount_env and not mountpoint:
result = meta_search(meta_json, item, id)
env_para_dict[mount_env] = attr_check(item, result, "source", 1)
else:
mount_value = data_dependency_process(item, id, meta_json, sandbox_dir, action, osf_auth)
logging.debug("Add mountpoint (%s:%s) into mount_dict", mountpoint, mount_value)
mount_dict[mountpoint] = mount_value
if mount_env and mountpoint:
env_para_dict[mount_env] = mountpoint
def get_linker_path(hardware_platform, os_image_dir):
"""Return the path of ld-linux.so within the downloaded os image dependency
Args:
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
os_image_dir: the path of the OS image inside the umbrella local cache.
Returns:
If the dynamic linker is found within the OS image, return its fullpath.
Otherwise, returns None.
"""
#env_list is directly under the directory of the downloaded os image dependency
if hardware_platform == "x86_64":
p = os_image_dir + "/lib64/ld-linux-x86-64.so.2"
if os.path.exists(p):
return p
else:
return None
else:
return None
def construct_docker_volume(input_dict, mount_dict, output_f_dict, output_d_dict):
"""Construct the docker volume parameters based on mount_dict.
Args:
input_dict: the setting of input files specified by the --inputs option.
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
Returns:
volume_paras: all the `-v` options for the docker command.
"""
if "/" in mount_dict:
del mount_dict["/"] #remove "/" from the mount_dict to avoid messing the root directory of the host machine
volume_paras = ""
for key in mount_dict:
volume_paras = volume_paras + " -v " + mount_dict[key] + ":" + key + " "
for key in input_dict:
volume_paras = volume_paras + " -v " + input_dict[key] + ":" + key + " "
for key in output_f_dict:
volume_paras = volume_paras + " -v " + output_f_dict[key] + ":" + key + " "
for key in output_d_dict:
volume_paras = volume_paras + " -v " + output_d_dict[key] + ":" + key + " "
return volume_paras
def obtain_path(os_image_dir, sw_mount_dict):
"""Get the path environment variable from envfile and add the mountpoints of software dependencies into it
the envfile here is named env_list under the OS image.
Args:
os_image_dir: the path of the OS image inside the umbrella local cache.
sw_mount_dict: a dict only including all the software mounting items.
Returns:
path_env: the new value for PATH.
"""
path_env = ''
if os.path.exists(os_image_dir + "/env_list") and os.path.isfile(os_image_dir + "/env_list"):
with open(os_image_dir + "/env_list", "rb") as f:
for line in f:
if line[:5] == 'PATH=':
path_env = line[5:-1]
break
else:
path_env = '.:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin'
for key in sw_mount_dict:
path_env = key + "/bin:" + path_env
return path_env
def transfer_env_para_docker(env_para_dict):
"""Transfer the env_para_dict into the docker `-e` options.
Args:
env_para_dict: the environment variables which need to be set for the execution of the user's command.
Returns:
env_options: the docker `-e` options constructed from env_para_dict.
"""
env_options = ''
for key in env_para_dict:
if key:
env_options = env_options + ' -e "' + key + '=' + env_para_dict[key] + '" '
return env_options
def collect_software_bin(host_cctools_path, sw_mount_dict):
"""Construct the path environment from the mountpoints of software dependencies.
Each softare meta has a bin subdir containing all its executables.
Args:
host_cctools_path: the path of cctools under the umbrella local cache.
sw_mount_dict: a dict only including all the software mounting items.
Returns:
extra_path: the paths which are extracted from sw_mount_dict and host_cctools_path, and needed to be added into PATH.
"""
extra_path = ""
for key in sw_mount_dict:
if key != '/':
extra_path += '%s/bin:' % key
if host_cctools_path:
extra_path += '%s/bin:' % host_cctools_path
return extra_path
def in_local_passwd():
"""Judge whether the current user exists in /etc/passwd.
Returns:
If the current user is inside /etc/passwd, returns 'yes'.
Otherwise, returns 'no'.
"""
user_name = getpass.getuser()
with open('/etc/passwd') as f:
for line in f:
if line[:len(user_name)] == user_name:
logging.debug("%s is included in /etc/passwd!", user_name)
return 'yes'
logging.debug("%s is not included in /etc/passwd!", user_name)
return 'no'
def in_local_group():
"""Judge whether the current user's group exists in /etc/group.
Returns:
If the current user's group exists in /etc/group, returns 'yes'.
Otherwise, returns 'no'.
"""
group_name = grp.getgrgid(os.getgid())[0]
with open('/etc/group') as f:
for line in f:
if line[:len(group_name)] == group_name:
logging.debug("%s is included in /etc/group!", group_name)
return 'yes'
logging.debug("%s is not included in /etc/group!", group_name)
return 'no'
def create_fake_mount(os_image_dir, sandbox_dir, mount_list, path):
"""For each ancestor dir B of path (including path iteself), check whether it exists in the rootfs and whether it exists in the mount_list and
whether it exists in the fake_mount directory inside the sandbox.
If B is inside the rootfs or the fake_mount dir, do nothing. Otherwise, create a fake directory inside the fake_mount.
Reason: the reason why we need to guarantee any ancestor dir of a path exists somehow is that `cd` shell builtin does a syscall stat on each level of
the ancestor dir of a path. Without creating the mountpoint for any ancestor dir, `cd` would fail.
Args:
os_image_dir: the path of the OS image inside the umbrella local cache.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
mount_list: a list of mountpoints which already been inside the parrot mountlist file.
path: a dir path.
Returns:
mount_str: a string including the mount items which are needed to added into the parrot mount file.
"""
mount_str = ''
if not path: #if the path is NULL, directly return.
return
path_list = []
tmp_path = path
while tmp_path != '/':
path_list.insert(0, tmp_path)
tmp_path = remove_trailing_slashes(os.path.dirname(tmp_path))
for item in path_list:
logging.debug("Judge whether the following mountpoint exists: %s", item)
fake_mount_path = '%s/fake_mount%s' % (sandbox_dir, item)
#if item is under localdir, do nothing.
if item in remove_trailing_slashes(os.path.dirname(sandbox_dir)):
break
if not os.path.exists(os_image_dir + item) and item not in mount_list and not os.path.exists(fake_mount_path):
logging.debug("The mountpoint (%s) does not exist, create a fake mountpoint (%s) for it!", item, fake_mount_path)
os.makedirs(fake_mount_path)
mount_str += '%s %s\n' % (item, fake_mount_path)
else:
logging.debug("The mountpoint (%s) already exists, do nothing!", item)
return mount_str
def remove_trailing_slashes(path):
"""Remove the trailing slashes of a string
Args:
path: a path, which can be any string.
Returns:
path: the new path without any trailing slashes.
"""
while len(path) > 1 and path.endswith('/'):
path = path[:-1]
return path
def construct_mountfile_full(sandbox_dir, os_image_dir, mount_dict, input_dict, output_f_dict, output_d_dict, cvmfs_cms_siteconf_mountpoint):
"""Create the mountfile if parrot is used to create a sandbox for the application and a separate rootfs is needed.
The trick here is the adding sequence does matter. The latter-added items will be checked first during the execution.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
os_image_dir: the path of the OS image inside the umbrella local cache.
mount_dict: all the mount items extracted from the specification file and possible implicit dependencies like cctools.
input_dict: the setting of input files specified by the --inputs option
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
Returns:
the path of the mountfile.
"""
mount_list = []
mountfile_path = sandbox_dir + "/.__mountlist"
with open(mountfile_path, "wb") as mountfile:
new_root = mount_dict["/"]
mountfile.write("/ " + new_root + "\n")
mount_list.append('/')
del mount_dict["/"]
mountfile.write(new_root + " " + new_root + "\n") #this one is needed to avoid recuisive path resolution.
mount_list.append(new_root)
mountfile.write("%s %s\n" % (os.path.dirname(sandbox_dir), os.path.dirname(sandbox_dir)))
mount_list.append(os.path.dirname(sandbox_dir))
logging.debug("Adding items from mount_dict into %s", mountfile_path)
for key in mount_dict:
#os.path.dirname('/a/b/') is '/a/b'. Therefore, before and after calling dirname, use remove_trailing_slashes to remove the trailing slashes.
key = remove_trailing_slashes(key)
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, remove_trailing_slashes(os.path.dirname(key)))
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mount_list.append(key)
mount_list.append(mount_dict[key])
mountfile.write(key + " " + mount_dict[key] + "\n")
mountfile.write(mount_dict[key] + " " + mount_dict[key] + "\n")
for key in output_f_dict:
mountfile.write(key + " " + output_f_dict[key] + "\n")
for key in output_d_dict:
mountfile.write(key + " " + output_d_dict[key] + "\n")
#common-mountlist includes all the common mountpoint (/proc, /dev, /sys, /mnt, /disc, /selinux)
if os.path.exists(os_image_dir + "/common-mountlist") and os.path.isfile(os_image_dir + "/common-mountlist"):
logging.debug("Adding items from %s/common-mountlist into %s", os_image_dir, mountfile_path)
with open(os_image_dir + "/common-mountlist", "rb") as f:
for line in f:
tmplist = line.split(' ')
item = remove_trailing_slashes(tmplist[0])
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, remove_trailing_slashes(os.path.dirname(item)))
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mount_list.append(tmplist[0])
mountfile.write(line)
logging.debug("Add sandbox_dir(%s) into %s", sandbox_dir, mountfile_path)
mountfile.write(sandbox_dir + ' ' + sandbox_dir + '\n')
mount_list.append(sandbox_dir)
logging.debug("Add /etc/hosts and /etc/resolv.conf into %s", mountfile_path)
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, '/etc')
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mountfile.write('/etc/hosts /etc/hosts\n')
mount_list.append('/etc/hosts')
mountfile.write('/etc/resolv.conf /etc/resolv.conf\n')
mount_list.append('/etc/resolv.conf')
#nd workstation uses NSCD (Name Service Cache Daemon) to deal with passwd, group, hosts services. Here first check whether the current uid and gid is in the /etc/passwd and /etc/group, if yes, use them. Otherwise, construct separate passwd and group files.
#If the current user name and group can not be found in /etc/passwd and /etc/group, a fake passwd and group file will be constructed under sandbox_dir.
existed_user = in_local_passwd()
if existed_user == 'yes':
logging.debug("Add /etc/passwd into %s", mountfile_path)
mountfile.write('/etc/passwd /etc/passwd\n')
else:
logging.debug("Construct a fake passwd file: .passwd, add .passwd into %s", mountfile_path)
with open('.passwd', 'w+') as f:
f.write('%s:x:%d:%d:unknown:%s:%s\n' % (getpass.getuser(), os.getuid(), os.getgid(), sandbox_dir + '/' + getpass.getuser(), os.environ['SHELL']))
mountfile.write('/etc/passwd %s/.passwd\n' % (sandbox_dir))
logging.debug("Construct a fake acl file: .__acl, add .__acl into %s", mountfile_path)
with open('.__acl', 'w+') as acl_file:
acl_file.write('%s rwlax\n' % getpass.getuser())
mount_list.append('/etc/passwd')
#getpass.getuser() returns the login name of the user
#os.makedirs(getpass.getuser()) #it is not really necessary to create this dir.
existed_group = in_local_group()
if existed_group == 'yes':
logging.debug("Add /etc/group into %s", mountfile_path)
mountfile.write('/etc/group /etc/group\n')
else:
logging.debug("Construct a fake group file: .group, add .group into %s", mountfile_path)
with open('.group', 'w+') as f:
f.write('%s:x:%d:%d\n' % (grp.getgrgid(os.getgid())[0], os.getgid(), os.getuid()))
mountfile.write('/etc/group %s/.group\n' % (sandbox_dir))
mount_list.append('/etc/group')
#add /var/run/nscd/socket into mountlist
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, '/var/run/nscd')
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mountfile.write('/var/run/nscd/socket ENOENT\n')
mount_list.append('/var/run/nscd/socket')
if os.path.exists(os_image_dir + "/special_files") and os.path.isfile(os_image_dir + "/special_files"):
logging.debug("Add %s/special_files into %s", os_image_dir, mountfile_path)
with open(os_image_dir + "/special_files", "rb") as f:
for line in f:
tmplist = line.split(' ')
item = remove_trailing_slashes(tmplist[0])
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, remove_trailing_slashes(os.path.dirname(item)))
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mount_list.append(tmplist[0])
mountfile.write(line)
#add the input_dict into mountflie
logging.debug("Add items from input_dict into %s", mountfile_path)
for key in input_dict:
key = remove_trailing_slashes(key)
mount_str = create_fake_mount(os_image_dir, sandbox_dir, mount_list, remove_trailing_slashes(os.path.dirname(key)))
if mount_str:
logging.debug("Adding fake mount items (%s) into %s", mount_str, mountfile_path)
mountfile.write(mount_str)
mountfile.write(key + " " + input_dict[key] + "\n")
mount_list.append(key)
if cvmfs_cms_siteconf_mountpoint == '':
logging.debug('cvmfs_cms_siteconf_mountpoint is null')
else:
mountfile.write('/cvmfs /cvmfs\n')
mountfile.write(cvmfs_cms_siteconf_mountpoint + '\n')
logging.debug('cvmfs_cms_siteconf_mountpoint is not null: %s', cvmfs_cms_siteconf_mountpoint)
return mountfile_path
def construct_mountfile_cvmfs_cms_siteconf(sandbox_dir, cvmfs_cms_siteconf_mountpoint):
""" Create the mountfile if chroot and docker is used to execute a CMS application and the host machine does not have cvmfs installed.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
Returns:
the path of the mountfile.
"""
mountfile_path = sandbox_dir + "/.__mountlist"
with open(mountfile_path, "wb") as f:
f.write(cvmfs_cms_siteconf_mountpoint + '\n')
logging.debug('cvmfs_cms_siteconf_mountpoint is not null: %s', cvmfs_cms_siteconf_mountpoint)
return mountfile_path
def construct_mountfile_easy(sandbox_dir, input_dict, output_f_dict, output_d_dict, mount_dict, cvmfs_cms_siteconf_mountpoint):
""" Create the mountfile if parrot is used to create a sandbox for the application and a separate rootfs is not needed.
The trick here is the adding sequence does matter. The latter-added items will be checked first during the execution.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
mount_dict: all the mount items extracted from the specification file and possible implicit dependencies like cctools.
input_dict: the setting of input files specified by the --inputs option
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
Returns:
the path of the mountfile.
"""
mountfile_path = sandbox_dir + "/.__mountlist"
with open(mountfile_path, "wb") as f:
for key in input_dict:
f.write(key + " " + input_dict[key] + "\n")
for key in output_f_dict:
f.write(key + " " + output_f_dict[key] + "\n")
for key in output_d_dict:
f.write(key + " " + output_d_dict[key] + "\n")
for key in mount_dict:
f.write(key + " " + mount_dict[key] + "\n")
f.write(mount_dict[key] + " " + mount_dict[key] + "\n")
if cvmfs_cms_siteconf_mountpoint == '':
logging.debug('cvmfs_cms_siteconf_mountpoint is null')
else:
f.write(cvmfs_cms_siteconf_mountpoint + '\n')
logging.debug('cvmfs_cms_siteconf_mountpoint is not null: %s', cvmfs_cms_siteconf_mountpoint)
return mountfile_path
def construct_env(sandbox_dir, os_image_dir):
""" Read env_list inside an OS image and save all the environment variables into a dictionary.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
os_image_dir: the path of the OS image inside the umbrella local cache.
Returns:
env_dict: a dictionary which includes all the environment variables from env_list
"""
if os.path.exists(os_image_dir + "/env_list") and os.path.isfile(os_image_dir + "/env_list"):
with open(os_image_dir + "/env_list", "rb") as f:
env_dict = {}
for line in f:
index = line.find("=")
key = line[:index]
value = line[(index+1):-1]
env_dict[key] = value
return env_dict
return {}
def has_docker_image(hardware_platform, distro_name, distro_version, tag):
"""Check whether the required docker image exists on the local machine or not.
Args:
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
tag: the tag of the expected docker image. tag is os_id
Returns:
If the required docker image exists on the local machine, returns 'yes'.
Otherwise, returns 'no'.
"""
name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
cmd = "docker images %s | awk '{print $2}'" % (name)
logging.debug("Start to run the command: %s", cmd)
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#str = "\n%s\s+" % (name)
if stdout.find(tag) == -1:
return 'no'
else:
return 'yes'
def create_docker_image(sandbox_dir, hardware_platform, distro_name, distro_version, tag):
"""Create a docker image based on the cached os image directory.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
tag: the tag of the expected docker image. tag is os_id
Returns:
If the docker image is imported from the tarball successfully, returns None.
Otherwise, directly exit.
"""
name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
location = os.path.dirname(sandbox_dir) + '/cache/' + tag + '/' + name
#docker container runs as root user, so use the owner option of tar command to set the owner of the docker image
cmd = 'cd ' + location + '; tar --owner=root -c .|docker import - ' + name + ":" + tag + '; cd -'
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
def construct_chroot_mount_dict(sandbox_dir, output_dir, input_dict, need_separate_rootfs, os_image_dir, mount_dict, host_cctools_path):
"""Construct directory mount list and file mount list for chroot. chroot requires the target mountpoint must be created within the chroot jail.
Args:
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
output_f_dict: the mappings of output files (key is the file path used by the application; value is the file path the user specifies.)
output_d_dict: the mappings of output dirs (key is the dir path used by the application; value is the dir path the user specified.)
input_dict: the setting of input files specified by the --inputs option.
need_separate_rootfs: whether a separate rootfs is needed to execute the user's command.
os_image_dir: the path of the OS image inside the umbrella local cache.
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
host_cctools_path: the path of cctools under the umbrella local cache.
Returns:
a tuple includes the directory mount list and the file mount list
"""
dir_dict = {}
file_dict = {}
logging.debug("need_separate_rootfs: %d", need_separate_rootfs)
if need_separate_rootfs == 1:
logging.debug("Add %s into dir_dict of chroot", os_image_dir + "/common-mountlist")
with open(os_image_dir + "/common-mountlist") as f:
for line in f:
index = line.find(' ')
item = line[:index]
dir_dict[item] = item
#special_files includes all the paths of the files which includes all the file paths of special types (block, character, socket, pipe)
logging.debug("Add %s into dir_dict of chroot", os_image_dir + "/special_files")
with open(os_image_dir + "/special_files") as f:
for line in f:
index = line.find(' ')
item = line[:index]
if os.path.exists(item):
file_dict[item] = item
if host_cctools_path:
logging.debug("Add cctools binary (%s) into dir_dict of chroot", host_cctools_path)
dir_dict[host_cctools_path] = host_cctools_path
logging.debug("Add sandbox_dir and output_dir into dir_dict of chroot")
dir_dict[sandbox_dir] = sandbox_dir
dir_dict[output_dir] = output_dir
logging.debug("Add items from mount_dict into dir_dict of chroot")
for key in mount_dict:
if key != '/':
value = mount_dict[key]
mode = os.lstat(value).st_mode
if S_ISDIR(mode):
dir_dict[value] = key
else:
file_dict[value] = key
logging.debug("Add /etc/passwd /etc/group /etc/hosts /etc/resolv.conf into file_dict of chroot")
file_dict['/etc/passwd'] = '/etc/passwd'
file_dict['/etc/group'] = '/etc/group'
file_dict['/etc/hosts'] = '/etc/hosts'
file_dict['/etc/resolv.conf'] = '/etc/resolv.conf'
logging.debug("Add input_dict into file_dict of chroot")
for key in input_dict:
value = input_dict[key]
mode = os.lstat(value).st_mode
if S_ISDIR(mode):
dir_dict[value] = key
else:
file_dict[value] = key
logging.debug("dir_dict:")
logging.debug(dir_dict)
logging.debug("file_dict:")
logging.debug(file_dict)
return (dir_dict, file_dict)
def chroot_mount_bind(dir_dict, file_dict, sandbox_dir, need_separate_rootfs, hardware_platform, distro_name, distro_version):
"""Create each target mountpoint under the cached os image directory through `mount --bind`.
Args:
dir_dict: a dict including all the directory mountpoints needed to be created inside the OS image.
file_dict: a dict including all the file mountpoints needed to be created inside the OS image.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
need_separate_rootfs: whether a separate rootfs is needed to execute the user's command.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
Returns:
If no error happens, returns None.
Otherwise, directly exit.
"""
logging.debug("Use mount --bind to redirect mountpoints")
if need_separate_rootfs == 1:
os_image_name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
os_image_path = os.path.dirname(sandbox_dir) + '/cache/' + os_image_name
else:
os_image_path = '/'
#mount --bind -o ro hostdir sandboxdir
for key in dir_dict:
jaildir = '%s%s' % (os_image_path, dir_dict[key])
hostdir = key
#if jaildir and hostdir are the same, there is no necessary to do mount.
if jaildir != hostdir:
if not os.path.exists(jaildir):
os.makedirs(jaildir)
cmd = 'mount --bind -o ro %s %s' % (hostdir, jaildir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
for key in file_dict:
jailfile = '%s%s' % (os_image_path, file_dict[key])
hostfile = key
if jailfile != hostfile:
if not os.path.exists(jailfile):
d = os.path.dirname(jailfile)
if not os.path.exists(d):
os.makedirs(d)
with open(jailfile, 'w+') as f:
pass
cmd = 'mount --bind -o ro %s %s' % (hostfile, jailfile)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
def chroot_post_process(dir_dict, file_dict, sandbox_dir, need_separate_rootfs, hardware_platform, distro_name, distro_version):
"""Remove all the created target mountpoints within the cached os image directory.
It is not necessary to change the mode of the output dir, because only the root user can use the chroot method.
Args:
dir_dict: a dict including all the directory mountpoints needed to be created inside the OS image.
file_dict: a dict including all the file mountpoints needed to be created inside the OS image.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
need_separate_rootfs: whether a separate rootfs is needed to execute the user's command.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
Returns:
If no error happens, returns None.
Otherwise, directly exit.
"""
logging.debug("post process of chroot")
if need_separate_rootfs == 1:
os_image_name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
os_image_path = os.path.dirname(sandbox_dir) + '/cache/' + os_image_name
else:
os_image_path = '/'
#file_dict must be processed ahead of dir_dict, because we can not umount a directory if there is another mountpoints created for files under it.
for key in file_dict:
jailfile = '%s%s' % (os_image_path, file_dict[key])
hostfile = key
if jailfile != hostfile:
if os.path.exists(jailfile):
cmd = 'umount -f %s' % (jailfile)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
for key in dir_dict:
jaildir = '%s%s' % (os_image_path, dir_dict[key])
hostdir = key
if jaildir != hostdir:
if os.path.exists(jaildir):
cmd = 'umount -f %s' % (jaildir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#remove all the empty ancestor directory
parent_dir = jaildir
mode = os.lstat(parent_dir).st_mode
if S_ISDIR(mode):
while len(os.listdir(parent_dir)) == 0:
os.rmdir(parent_dir)
parent_dir = os.path.dirname(parent_dir)
def workflow_repeat(cwd_setting, sandbox_dir, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, hardware_platform, host_linux_distro, distro_name, distro_version, need_separate_rootfs, os_image_dir, os_image_id, host_cctools_path, cvmfs_cms_siteconf_mountpoint, mount_dict, sw_mount_dict, meta_json, new_os_image_dir):
"""Run user's task with the help of the sandbox techniques, which currently inculde chroot, parrot, docker.
Args:
cwd_setting: the current working directory for the execution of the user's command.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
sandbox_mode: the execution engine.
output_f_dict: the mappings of output files (key is the file path used by the application; value is the file path the user specifies.)
output_d_dict: the mappings of output dirs (key is the dir path used by the application; value is the dir path the user specified.)
input_dict: the setting of input files specified by the --inputs option.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
user_cmd: the user's command.
hardware_platform: the architecture of the required hardware platform (e.g., x86_64).
distro_name: the name of the required OS (e.g., redhat).
distro_version: the version of the required OS (e.g., 6.5).
need_separate_rootfs: whether a separate rootfs is needed to execute the user's command.
os_image_dir: the path of the OS image inside the umbrella local cache.
os_image_id: the id of the OS image.
host_cctools_path: the path of cctools under the umbrella local cache.
cvmfs_cms_siteconf_mountpoint: a string in the format of '/cvmfs/cms.cern.ch/SITECONF/local <SITEINFO dir in the umbrella local cache>/local'
mount_dict: a dict including each mounting item in the specification, whose key is the access path used by the user's task; whose value is the actual storage path.
sw_mount_dict: a dict only including all the software mounting items.
meta_json: the json object including all the metadata of dependencies.
new_os_image_dir: the path of the newly created OS image with all the packages installed by package manager.
Returns:
If no error happens, returns None.
Otherwise, directly exit.
"""
#sandbox_dir will be the home directory of the sandbox
print 'Executing the application ....'
if not os.path.exists(sandbox_dir):
os.makedirs(sandbox_dir)
logging.debug("chdir(%s)", sandbox_dir)
os.chdir(sandbox_dir) #here, we indeed want to chdir to sandbox_dir, not cwd_setting, to do preparation work like create mountlist file for Parrot.
#at this point, all the software should be under the cache dir, all the mountpoint of the software should be in mount_dict
print "Execution engine: %s" % sandbox_mode
logging.debug("execution engine: %s", sandbox_mode)
logging.debug("need_separate_rootfs: %d", need_separate_rootfs)
if sandbox_mode == "destructive":
env_dict = os.environ
if cvmfs_cms_siteconf_mountpoint:
logging.debug("Create a parrot mountfile for the siteconf meta ...")
env_dict['PARROT_MOUNT_FILE'] = construct_mountfile_cvmfs_cms_siteconf(sandbox_dir, cvmfs_cms_siteconf_mountpoint)
logging.debug("Add env_para_dict into environment variables")
for key in env_para_dict:
env_dict[key] = env_para_dict[key]
logging.debug("Add software binary into PATH")
extra_path = collect_software_bin(host_cctools_path, sw_mount_dict)
if "PATH" not in env_dict:
env_dict['PATH'] = ""
env_dict['PATH'] = '%s:%s' % (env_dict['PATH'], extra_path[:-1])
#move software and data into the location
for key in mount_dict:
parent_dir = os.path.dirname(key)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
elif not os.path.isdir(parent_dir):
cleanup(tempfile_list, tempdir_list)
logging.critical("%s is not a directory!\n", parent_dir)
sys.exit("%s is not a directory!\n" % parent_dir)
if not os.path.exists(key):
cmd = "mv -f %s %s/" % (mount_dict[key], parent_dir)
rc, stdout, stderr = func_call_withenv(cmd, env_dict)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
print "Start executing the user's task: %s" % user_cmd[0]
cmd = "cd %s; %s" % (cwd_setting, user_cmd[0])
rc, stdout, stderr = func_call_withenv(cmd, env_dict)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
logging.debug("Moving the outputs to the expected locations ...")
print "Moving the outputs to the expected locations ..."
for key in output_f_dict:
cmd = "mv -f %s %s" % (key, output_f_dict[key])
rc, stdout, stderr = func_call_withenv(cmd, env_dict)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
for key in output_d_dict:
cmd = "mv -f %s %s" % (key, output_d_dict[key])
rc, stdout, stderr = func_call_withenv(cmd, env_dict)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
elif sandbox_mode == "docker":
if need_separate_rootfs == 1:
if has_docker_image(hardware_platform, distro_name, distro_version, os_image_id) == 'no':
logging.debug("Start to construct a docker image from the os image")
create_docker_image(sandbox_dir, hardware_platform, distro_name, distro_version, os_image_id)
logging.debug("Finish constructing a docker image from the os image")
if cvmfs_cms_siteconf_mountpoint:
item = cvmfs_cms_siteconf_mountpoint.split(' ')[1]
logging.debug("Adding the siteconf meta (%s) into mount_dict", item)
mount_dict[item] = item
logging.debug("Create a parrot mountfile for the siteconf meta (%s)", item)
env_para_dict['PARROT_MOUNT_FILE'] = construct_mountfile_cvmfs_cms_siteconf(sandbox_dir, cvmfs_cms_siteconf_mountpoint)
logging.debug("Add a volume item (%s:%s) for the sandbox_dir", sandbox_dir, sandbox_dir)
#-v /home/hmeng/umbrella_test/output:/home/hmeng/umbrella_test/output
volume_output = " -v %s:%s " % (sandbox_dir, sandbox_dir)
#-v /home/hmeng/umbrella_test/cache/git-x86_64-redhat5:/software/git-x86_64-redhat5/
logging.debug("Start to construct other volumes from input_dict")
volume_parameters = construct_docker_volume(input_dict, mount_dict, output_f_dict, output_d_dict)
#-e "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/software/git-x86_64-redhat5/bin"
logging.debug("Set the environment variables ....")
path_env = obtain_path(os_image_dir, sw_mount_dict)
other_envs = transfer_env_para_docker(env_para_dict)
docker_image_name = "%s-%s-%s" %(distro_name, distro_version, hardware_platform)
#by default, docker executes user_cmd as the root user, `chown` is used to change the owner of the output dir to be the user who calls `umbrella`
chown_cmd = 'chown -R %d:%d %s %s %s' % (os.getuid(), os.getgid(), sandbox_dir, ' '.join(output_f_dict), ' '.join(output_d_dict))
#to count the post processing time, this cmd is split into two commands
container_name = "umbrella_%s_%s_%s" % (docker_image_name, os_image_id, os.path.basename(sandbox_dir))
#do not enable `-i` and `-t` option of Docker, it will fail when condor execution engine is chosen.
#to allow the exit code of user_cmd to be transferred back, seperate the user_cmd and the chown command.
cmd = 'docker run --name %s %s %s -e "PATH=%s" %s %s:%s /bin/sh -c "cd %s; %s"' % (container_name, volume_output, volume_parameters, path_env, other_envs, docker_image_name, os_image_id, cwd_setting, user_cmd[0])
print "Start executing the user's task: %s" % cmd
rc, stdout, stderr = func_call(cmd)
print "\n********** STDOUT of the command **********"
print stdout
print "\n********** STDERR of the command **********"
print stderr
#docker export container_name > tarball
if len(new_os_image_dir) > 0:
if not os.path.exists(new_os_image_dir):
os.makedirs(new_os_image_dir)
os_tar = new_os_image_dir + ".tar"
cmd = "docker export %s > %s" % (container_name, os_tar)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#uncompress the tarball
cmd = "tar xf %s -C %s" % (os_tar, new_os_image_dir)
extract_tar(os_tar, new_os_image_dir, "tar")
#docker rm container_name
cmd = "docker rm %s" % (container_name)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
cmd1 = 'docker run --rm %s %s -e "PATH=%s" %s %s:%s %s' % (volume_output, volume_parameters, path_env, other_envs, docker_image_name, os_image_id, chown_cmd)
rc, stdout, stderr = func_call(cmd1)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
else:
#if a separate rootfs is not needed to execute the user's cmd, should forcely use other execution engine to run the user cmd.
cleanup(tempfile_list, tempdir_list)
logging.debug("Docker execution engine can only be used when a separate rootfs is needed.")
sys.exit("Docker execution engine can only be used when a separate rootfs is needed.\n")
elif sandbox_mode == "parrot":
if need_separate_rootfs == 1:
logging.debug("Construct environment variables ....")
env_dict = construct_env(sandbox_dir, os_image_dir)
env_dict['PWD'] = cwd_setting
logging.debug("Construct mounfile ....")
env_dict['PARROT_MOUNT_FILE'] = construct_mountfile_full(sandbox_dir, os_image_dir, mount_dict, input_dict, output_f_dict, output_d_dict, cvmfs_cms_siteconf_mountpoint)
for key in env_para_dict:
env_dict[key] = env_para_dict[key]
#here, setting the linker will cause strange errors.
logging.debug("Construct dynamic linker path ....")
result = get_linker_path(hardware_platform, os_image_dir)
if not result:
cleanup(tempfile_list, tempdir_list)
logging.critical("Can not find the dynamic linker inside the os image (%s)!", os_image_dir)
sys.exit("Can not find the dynamic linker inside the os image (%s)!\n" % os_image_dir)
env_dict['PARROT_LDSO_PATH'] = result
env_dict['USER'] = getpass.getuser()
#env_dict['HOME'] = sandbox_dir + '/' + getpass.getuser()
logging.debug("Add software binary into PATH")
extra_path = collect_software_bin(host_cctools_path, sw_mount_dict)
if "PATH" not in env_dict:
env_dict['PATH'] = '.:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin'
env_dict['PATH'] = '%s%s' % (extra_path, env_dict['PATH'])
print "Start executing the user's task: %s" % user_cmd[0]
rc, stdout, stderr = func_call_withenv(user_cmd[0], env_dict)
print "\n********** STDOUT of the command **********"
print stdout
print "\n********** STDERR of the command **********"
print stderr
else:
env_dict = os.environ
env_dict['PARROT_MOUNT_FILE'] = construct_mountfile_easy(sandbox_dir, input_dict, output_f_dict, output_d_dict, mount_dict, cvmfs_cms_siteconf_mountpoint)
for key in env_para_dict:
env_dict[key] = env_para_dict[key]
if 'PATH' not in env_dict: #if we run umbrella on Condor, Condor will not set PATH by default.
env_dict['PATH'] = '.:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin'
logging.debug("PATH is empty, forcely set it to be %s", env_dict['PATH'])
else:
env_dict['PATH'] = '.' + env_dict['PATH']
logging.debug("Forcely add '.' into PATH")
logging.debug("Add software binary into PATH")
extra_path = collect_software_bin(host_cctools_path, sw_mount_dict)
env_dict['PATH'] = '%s%s' % (extra_path, env_dict['PATH'])
print "Start executing the user's task: %s" % user_cmd[0]
rc, stdout, stderr = func_call_withenv(user_cmd[0], env_dict)
print "\n********** STDOUT of the command **********"
print stdout
print "\n********** STDERR of the command **********"
print stderr
# logging.debug("Removing the parrot mountlist file and the parrot submit file from the sandbox")
# if os.path.exists(env_dict['PARROT_MOUNT_FILE']):
# os.remove(env_dict['PARROT_MOUNT_FILE'])
else:
pass
def condor_process(spec_path, spec_json, spec_path_basename, meta_path, sandbox_dir, output_dir, input_list_origin, user_cmd, cwd_setting, condorlog_path, cvmfs_http_proxy):
"""Process the specification when condor execution engine is chosen
Args:
spec_path: the absolute path of the specification.
spec_json: the json object including the specification.
spec_path_basename: the file name of the specification.
meta_path: the path of the json file including all the metadata information.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
output_dir: the output directory.
input_list_origin: the list of input file paths.
user_cmd: the user's command.
cwd_setting: the current working directory for the execution of the user's command.
condorlog_path: the path of the umbrella log executed on the remote condor execution node.
cvmfs_http_proxy: HTTP_PROXY environmetn variable used to access CVMFS by Parrot
Returns:
If no errors happen, return None;
Otherwise, directly exit.
"""
if not os.path.exists(sandbox_dir):
os.makedirs(sandbox_dir)
print "Checking the validity of the umbrella specification ..."
if spec_json.has_key("hardware") and spec_json["hardware"] and spec_json.has_key("kernel") and spec_json["kernel"] and spec_json.has_key("os") and spec_json["os"]:
logging.debug("Setting the environment parameters (hardware, kernel and os) according to the specification file ....")
(hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id) = env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("this specification is not complete! You must have a hardware section, a kernel section and a os section!")
sys.exit("this spec has no hardware section\n")
condor_submit_path = sandbox_dir + "/condor_task.submit"
print "Constructing Condor submission file according to the umbrella specification ..."
transfer_inputs = ''
new_input_options = ''
logging.debug("Transform input_list_origin into condor attributes ....")
for item in input_list_origin:
index_equal = item.find('=')
access_path = item[:index_equal]
actual_path = item[(index_equal+1):]
transfer_inputs += ',%s' % (actual_path)
new_input_options += '%s=%s,' % (access_path, os.path.basename(actual_path))
if new_input_options[-1] == ',':
new_input_options = new_input_options[:-1]
logging.debug("transfer_input_files: %s, %s", spec_path, transfer_inputs)
logging.debug("The new_input_options of Umbrella: %s", new_input_options)
condor_output_dir = tempfile.mkdtemp(dir=".")
condor_output_dir = os.path.abspath(condor_output_dir)
condor_log_path = sandbox_dir + '/condor_task.log'
umbrella_fullpath = which_exec("umbrella")
if umbrella_fullpath == None:
cleanup(tempfile_list, tempdir_list)
logging.critical("Failed to find the executable umbrella. Please modify your $PATH.")
sys.exit("Failed to find the executable umbrella. Please modify your $PATH.\n")
logging.debug("The full path of umbrella is: %s" % umbrella_fullpath)
#find cctools_python
cmd = 'which cctools_python'
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
cctools_python_path = stdout[:-1]
condor_submit_file = open(condor_submit_path, "w+")
condor_submit_file.write('universe = vanilla\n')
condor_submit_file.write('executable = %s\n' % cctools_python_path)
if cvmfs_http_proxy:
condor_submit_file.write('arguments = "./umbrella -s local --spec %s --cvmfs_http_proxy %s --meta %s -l condor_umbrella -i \'%s\' -o %s --log condor_umbrella.log run \'%s\'"\n' % (spec_path_basename, cvmfs_http_proxy, os.path.basename(meta_path), new_input_options, os.path.basename(condor_output_dir), user_cmd[0]))
else:
condor_submit_file.write('arguments = "./umbrella -s local --spec %s --meta %s -l condor_umbrella -i \'%s\' -o %s --log condor_umbrella.log run \'%s\'"\n' % (spec_path_basename, os.path.basename(meta_path), new_input_options, os.path.basename(condor_output_dir), user_cmd[0]))
# condor_submit_file.write('PostCmd = "echo"\n')
# condor_submit_file.write('PostArguments = "$?>%s/condor_rc"\n' % os.path.basename(condor_output_dir))
condor_submit_file.write('transfer_input_files = %s, %s, %s, %s%s\n' % (cctools_python_path, umbrella_fullpath, meta_path, spec_path, transfer_inputs))
condor_submit_file.write('transfer_output_files = %s, condor_umbrella.log\n' % os.path.basename(condor_output_dir))
condor_submit_file.write('transfer_output_remaps = "condor_umbrella.log=%s"\n' % condorlog_path)
#the python on the redhat5 machines in the ND condor pool is 2.4. However, umbrella requires python 2.6.* or 2.7*.
if linux_distro == "redhat5":
condor_submit_file.write('requirements = TARGET.Arch == "%s" && TARGET.OpSys == "%s" && TARGET.OpSysAndVer == "redhat6"\n' % (hardware_platform, kernel_name))
else:
#condor_submit_file.write('requirements = TARGET.Arch == "%s" && TARGET.OpSys == "%s" && TARGET.OpSysAndVer == "%s" && TARGET.has_docker == true\n' % (hardware_platform, kernel_name, linux_distro))
condor_submit_file.write('requirements = TARGET.Arch == "%s" && TARGET.OpSys == "%s" && TARGET.OpSysAndVer == "%s"\n' % (hardware_platform, kernel_name, linux_distro))
condor_submit_file.write('environment = PATH=.:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin\n')
condor_submit_file.write('output = %s/condor_stdout\n' % sandbox_dir)
condor_submit_file.write('error = %s/condor_stderr\n' % sandbox_dir)
condor_submit_file.write('log = %s\n' % condor_log_path)
condor_submit_file.write('should_transfer_files = yes\n')
condor_submit_file.write('when_to_transfer_output = on_exit\n')
condor_submit_file.write('queue\n')
condor_submit_file.close()
#submit condor job
print "Submitting the Condor job ..."
cmd = 'condor_submit ' + condor_submit_path
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#keep tracking whether condor job is done
print "Waiting for the job is done ..."
logging.debug("Waiting for the job is done ...")
cmd = 'condor_wait %s' % condor_log_path
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
#check the content of condor log file to figure out the exit code of the remote executed umbrella
remote_rc = 500
with open(condor_log_path, 'rb') as f:
content = f.read()
str = "Normal termination (return value "
index1 = content.rfind(str)
index2 = content.find(')', index1)
remote_rc = int(content[(index1 + len(str)):index2])
print "The exit code of the remote executed umbrella found in the condor log file (%s) is %d!" % (condor_log_path, remote_rc)
logging.debug("The exit code of the remote executed umbrella found in the condor log file (%s) is %d!", condor_log_path, remote_rc)
if remote_rc == 500:
cleanup(tempfile_list, tempdir_list)
logging.critical("Can not find the exit code of the remote executed umbrella inside the condor log file (%s)!", condor_log_path)
sys.exit("Can not find the exit code of the remote executed umbrella inside the condor log file (%s)!" % condor_log_path)
elif remote_rc != 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("The remote umbrella fails and the exit code is %d.", remote_rc)
sys.exit("The remote umbrella fails and the exit code is %d." % remote_rc)
logging.debug("the condor jos is done, put the output back into the output directory!")
print "the condor jobs is done, put the output back into the output directory!"
#check until the condor job is done, post-processing (put the output back into the output directory)
#the semantics of condor_output_files only supports transferring a dir from the execution node back to the current working dir (here it is condor_output_dir).
os.rename(condor_output_dir, output_dir)
print "move condor_stdout, condor_stderr and condor_task.log from sandbox_dir to output_dir."
logging.debug("move condor_stdout, condor_stderr and condor_task.log from sandbox_dir to output_dir.")
os.rename(sandbox_dir + '/condor_stdout', output_dir + '/condor_stdout')
os.rename(sandbox_dir + '/condor_stderr', output_dir + '/condor_stderr')
os.rename(sandbox_dir + '/condor_task.log', output_dir + '/condor_task.log')
print "Remove the sandbox dir"
logging.debug("Remove the sandbox_dir.")
shutil.rmtree(sandbox_dir)
print "The output has been put into the output dir: %s" % output_dir
def decide_instance_type(cpu_cores, memory_size, disk_size, instances):
""" Compare the required hardware configurations with each instance type, and return the first matched instance type, return 'no' if no matched instance type exist.
We can rank each instance type in the future, so that in the case of multiple matches exit, the closest matched instance type is returned.
Args:
cpu_cores: the number of required cpus (e.g., 1).
memory_size: the memory size requirement (e.g., 2GB). Not case sensitive.
disk_size: the disk size requirement (e.g., 2GB). Not case sensitive.
instances: the instances section of the ec2 json file.
Returns:
If there is no matched instance type, return 'no'.
Otherwise, returns the first matched instance type.
"""
cpu_cores = int(cpu_cores)
memory_size = int(memory_size[:-2])
disk_size = int(disk_size[:-2])
for item in instances:
j = instances[item]
inst_mem = int(float((j["memory"][:-2])))
inst_disk = int(j["disk"][:-2])
if cpu_cores <= int(j["cores"]) and memory_size <= inst_mem and disk_size <= inst_disk:
return item
return 'no'
def ec2_process(spec_path, spec_json, meta_option, meta_path, ssh_key, ec2_key_pair, ec2_security_group, ec2_instance_type, sandbox_dir, output_option, output_f_dict, output_d_dict, sandbox_mode, input_list, input_list_origin, env_option, env_para_dict, user_cmd, cwd_setting, ec2log_path, cvmfs_http_proxy):
"""
Args:
spec_path: the path of the specification.
spec_json: the json object including the specification.
meta_option: the --meta option.
meta_path: the path of the json file including all the metadata information.
ssh_key: the name the private key file to use when connecting to an instance.
ec2_key_pair: the path of the key-pair to use when launching an instance.
ec2_security_group: the security group within which the EC2 instance should be run.
ec2_instance_type: the type of an Amazone ec2 instance
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
output_f_dict: the mappings of output files (key is the file path used by the application; value is the file path the user specifies.)
output_d_dict: the mappings of output dirs (key is the dir path used by the application; value is the dir path the user specified.)
sandbox_mode: the execution engine.
input_list: a list including all the absolute path of the input files on the local machine.
input_list_origin: the list of input file paths.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
user_cmd: the user's command.
cwd_setting: the current working directory for the execution of the user's command.
ec2log_path: the path of the umbrella log executed on the remote EC2 execution node.
cvmfs_http_proxy: HTTP_PROXY environmetn variable used to access CVMFS by Parrot
Returns:
If no errors happen, return None;
Otherwise, directly exit.
"""
print "Checking the validity of the umbrella specification ..."
if spec_json.has_key("hardware") and spec_json["hardware"] and spec_json.has_key("kernel") and spec_json["kernel"] and spec_json.has_key("os") and spec_json["os"]:
(hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id) = env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
else:
cleanup(tempfile_list, tempdir_list)
sys.exit("this spec has no hardware section!\n")
#According to the given specification file, the AMI and the instance type can be identified. os and arch can be used to decide the AMI; cores, memory and disk can be used to decide the instance type.
#decide the AMI according to (distro_name, distro_version, hardware_platform)
print "Deciding the AMI according to the umbrella specification ..."
name = '%s-%s-%s' % (distro_name, distro_version, hardware_platform)
if ec2_json.has_key(name):
if os_id[:4] != "ec2:":
for item in ec2_json[name]:
logging.debug("The AMI information is: ")
logging.debug(ec2_json[name][item])
ami = ec2_json[name][item]['ami']
user_name = ec2_json[name][item]['user']
break
else:
if ec2_json[name].has_key(os_id):
logging.debug("The AMI information is: ")
logging.debug(ec2_json[name][os_id])
ami = ec2_json[name][os_id]['ami']
user_name = ec2_json[name][os_id]['user']
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s with the id <%s> is not in the ec2 json file (%s).", name, os_id, ec2_path)
sys.exit("%s with the id <%s> is not in the ec2 json file (%s)." % (name, os_id, ec2_path))
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s is not in the ec2 json file (%s).", name, ec2_path)
sys.exit("%s is not in the ec2 json file (%s).\n" % (name, ec2_path))
#start the instance and obtain the instance id
print "Starting an Amazon EC2 instance ..."
instance_id = get_instance_id(ami, ec2_instance_type, ec2_key_pair, ec2_security_group)
logging.debug("Start the instance and obtain the instance id: %s", instance_id)
#get the public DNS of the instance_id
print "Obtaining the public DNS of the Amazon EC2 instance ..."
public_dns = get_public_dns(instance_id)
logging.debug("Get the public DNS of the instance_id: %s", public_dns)
'''
#instance_id = "<instance_id>"
#public_dns = "<public_dns>"
instance_id = "i-e61ad13c"
public_dns = "ec2-52-26-177-97.us-west-2.compute.amazonaws.com"
'''
#install wget on the instance
print "Installing wget on the EC2 instance ..."
logging.debug("Install wget on the instance")
#here we should judge the os type, yum is used by Fedora, CentOS, and REHL.
if distro_name not in ["fedora", "centos", "redhat"]:
cleanup(tempfile_list, tempdir_list)
sys.exit("Currently the supported Linux distributions are redhat, centos and fedora.\n")
#ssh exit code 255: the remote node is down or unavailable
rc = 300
while rc != 0:
#without `-t` option of ssh, if the username is not root, `ssh + sudo` will get the following error: sudo: sorry, you must have a tty to run sudo.
cmd = 'ssh -t -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i %s %s@%s \'sudo yum -y install wget\'' % (ssh_key, user_name, public_dns)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
logging.debug("`%s` fails with the return code of %d, \nstdout: %s, \nstderr: %s" % (cmd, rc, stdout, stderr))
time.sleep(5)
#python, the python is needed to execute umbrella itself
print "Installing python 2.6.9 on the instance ..."
logging.debug("Install python 2.6.9 on the instance.")
python_name = 'python-2.6.9-%s-%s' % (linux_distro, hardware_platform)
python_url = "http://ccl.cse.nd.edu/research/data/hep-case-study/python-2.6.9-%s-%s.tar.gz" % (linux_distro, hardware_platform)
scheme, netloc, path, query, fragment = urlparse.urlsplit(python_url)
python_url_filename = os.path.basename(path)
cmd = 'ssh -t -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i %s %s@%s \'sudo wget %s && sudo tar zxvf %s\'' % (ssh_key, user_name, public_dns, python_url, python_url_filename)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
#scp umbrella, meta.json and input files to the instance
print "Sending the umbrella task to the EC2 instance ..."
logging.debug("scp relevant files into the HOME dir of the instance.")
input_file_string = ''
for input_file in input_list:
input_file_string += input_file + ' '
#here meta_path may start with http so need a special treatement
umbrella_fullpath = which_exec("umbrella")
if meta_option:
meta_option = " --meta ~%s/%s " % (user_name, os.path.basename(meta_path))
cmd = 'scp -i %s %s %s %s %s %s@%s:' % (ssh_key, umbrella_fullpath, spec_path, meta_path, input_file_string, user_name, public_dns)
else:
meta_option = ""
cmd = 'scp -i %s %s %s %s %s@%s:' % (ssh_key, umbrella_fullpath, spec_path, input_file_string, user_name, public_dns)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
#change the --inputs option to put all the inputs directory in the home dir of the instance
new_input_options = ''
if len(input_list_origin) > 0:
logging.debug("change the --inputs option to put all the inputs directory in the home dir of the instance")
logging.debug("Transform input_list_origin ....")
new_input_options = " -i '"
for item in input_list_origin:
index_equal = item.find('=')
access_path = item[:index_equal]
actual_path = item[(index_equal+1):]
new_input_options += '%s=%s,' % (access_path, os.path.basename(actual_path))
if new_input_options[-1] == ',':
new_input_options = new_input_options[:-1]
new_input_options += "'"
logging.debug("The new_input_options of Umbrella: %s", new_input_options) #--inputs option
#find cctools_python
cmd = 'which cctools_python'
rc, stdout, stderr = func_call(cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
cctools_python_path = stdout[:-1]
#cvmfs_http_proxy
cvmfs_http_proxy_option = ''
if cvmfs_http_proxy:
cvmfs_http_proxy_option = '--cvmfs_http_proxy %s' % cvmfs_http_proxy
#execute the command on the instance
print "Executing the user's task on the EC2 instance ..."
logging.debug("Execute the command on the instance ...")
ec2_output_option = ""
if output_option:
ec2_output_option = " -o '%s'" % output_option
cmd = 'ssh -t -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i %s %s@%s "sudo %s/bin/python ~%s/umbrella %s -s destructive --spec ~%s/%s %s --log ~%s/ec2_umbrella.log -l ec2_umbrella %s %s %s run \'%s\'"' % (ssh_key, user_name, public_dns, python_name, user_name, cvmfs_http_proxy_option, user_name, os.path.basename(spec_path), meta_option, user_name, ec2_output_option, new_input_options, env_option, user_cmd[0])
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
#postprocessing
print "Transferring the output of the user's task from the EC2 instance back to the local machine ..."
logging.debug("Create a tarball for the output dir on the instance.")
output = '%s %s' % (' '.join(output_f_dict.values()), ' '.join(output_d_dict.values()))
cmd = 'ssh -t -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i %s %s@%s \'sudo tar cvzf ~%s/output.tar.gz %s && sudo chown %s:%s ~%s/output.tar.gz ~%s/ec2_umbrella.log\'' % (ssh_key, user_name, public_dns, user_name, output, user_name, user_name, user_name, user_name)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
logging.debug("The instance returns the output.tar.gz to the local machine.")
cmd = 'scp -i %s %s@%s:output.tar.gz %s/' % (ssh_key, user_name, public_dns, sandbox_dir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
logging.debug("The instance returns the remote umbrella log file to the local machine.")
cmd = 'scp -i %s %s@%s:ec2_umbrella.log %s' % (ssh_key, user_name, public_dns, ec2log_path)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
cmd = 'tar zxvf %s/output.tar.gz -C /' % (sandbox_dir)
rc, stdout, stderr = func_call(cmd)
if rc != 0:
terminate_instance(instance_id)
subprocess_error(cmd, rc, stdout, stderr)
print "Terminating the EC2 instance ..."
terminate_instance(instance_id)
def obtain_package(spec_json):
"""Check whether this spec includes a package_manager section, which in turn includes a list attr.
Args:
spec_json: the json object including the specification.
Returns:
if a package list is specified in the spec_json, return the package manager name and a list of the required package name.
Otherwise, return None
"""
if spec_json.has_key("package_manager") and spec_json["package_manager"]:
if spec_json["package_manager"].has_key("name") and spec_json["package_manager"].has_key("list"):
pac_name = spec_json["package_manager"]["name"]
pac_str = spec_json["package_manager"]["list"]
pac_list = pac_str.split()
pac_list.sort()
if len(pac_list) > 0:
if len(pac_name) == 0:
logging.critical("The spec does not specify which package manager to use\n")
sys.exit("The spec does not specify which package manager to use\n")
else:
return (pac_name, pac_list)
return (None, None)
def cal_new_os_id(sec, old_os_id, pac_list):
"""Calculate the id of the new OS based on the old_os_id and the package_manager section
Args:
sec: the json object including the package_manager section.
old_os_id: the id of the original os image without any info about package manager.
pac_list: a list of the required package name.
Returns:
md5_value: the md5 value of the string constructed from binding old_os_id and information from the package_manager section.
install_cmd: the package install cmd, such as: yum -y install python
"""
pm_name = attr_check("os", sec, "name")
cmd = pm_name + " " + pac_manager[pm_name][0] + " " + ' '.join(pac_list)
install_cmd = []
install_cmd.append(cmd)
pac_str = ''.join(pac_list)
config_str = ''
if sec.has_key("config") and sec["config"]:
l = []
for item in sec["config"]:
id_attr = sec["config"][item]["id"]
l.append(id_attr)
l.sort()
config_str = ''.join(l)
data = old_os_id + pm_name + pac_str + config_str
md5 = hashlib.md5()
md5.update(data)
md5_value = md5.hexdigest()
return (md5_value, install_cmd)
def specification_process(spec_json, sandbox_dir, behavior, meta_json, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, cwd_setting, cvmfs_http_proxy, osf_auth):
""" Create the execution environment specified in the specification file and run the task on it.
Args:
spec_json: the json object including the specification.
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
behavior: the umbrella behavior, such as `run`.
meta_json: the json object including all the metadata of dependencies.
sandbox_mode: the execution engine.
output_f_dict: the mappings of output files (key is the file path used by the application; value is the file path the user specifies.)
output_d_dict: the mappings of output dirs (key is the dir path used by the application; value is the dir path the user specified.)
input_dict: the setting of input files specified by the --inputs option.
env_para_dict: the environment variables which need to be set for the execution of the user's command.
user_cmd: the user's command.
cwd_setting: the current working directory for the execution of the user's command.
cvmfs_http_proxy: HTTP_PROXY environmetn variable used to access CVMFS by Parrot
osf_auth: the osf authentication info including osf_username and osf_password.
Returns:
None.
"""
print "Checking the validity of the umbrella specification ..."
if spec_json.has_key("hardware") and spec_json["hardware"] and spec_json.has_key("kernel") and spec_json["kernel"] and spec_json.has_key("os") and spec_json["os"]:
logging.debug("Setting the environment parameters (hardware, kernel and os) according to the specification file ....")
(hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id) = env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("this specification is not complete! You must have a hardware section, a kernel section and a os section!")
sys.exit("this specification is not complete! You must have a hardware section, a kernel section and a os section!\n")
host_linux_distro = env_check(sandbox_dir, sandbox_mode, hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version)
#check os
need_separate_rootfs = 0
os_image_dir = ''
if os_id == "":
if sandbox_mode in ["docker"]:
cleanup(tempfile_list, tempdir_list)
logging.critical("the specification does not provide a concrete OS image, but docker execution engine needs a specific OS image!")
sys.exit("the specification does not provide a concrete OS image, but docker execution engine needs a specific OS image!\n")
if linux_distro != host_linux_distro:
cleanup(tempfile_list, tempdir_list)
logging.critical("the specification does not provide a concrete OS image, and the OS image of the local machine does not matching the requirement!")
sys.exit("the specification does not provide a concrete OS image, and the OS image of the local machine does not matching the requirement!\n")
else:
logging.debug("the specification does not provide a concrete OS image, but the OS image of the local machine matches the requirement!")
print "the specification does not provide a concrete OS image, but the OS image of the local machine matches the requirement!\n"
else:
need_separate_rootfs = 1
#check for dependencies which need to be installed by package managers
(pac_name, pac_list) = obtain_package(spec_json)
if pac_list:
logging.debug("The spec needs to use %s install packages.", pac_name)
print "The spec needs to use %s install packages." % pac_name
if sandbox_mode in ["parrot"]:
cleanup(tempfile_list, tempdir_list)
logging.critical("Installing packages through package managers requires the root authority! Please choose a different sandbox mode (docker or destructive)!")
sys.exit("Installing packages through package managers requires the root authority! Please choose a different sandbox mode(docker or destructive)!")
mount_dict = {}
cvmfs_cms_siteconf_mountpoint = ''
host_cctools_path = '' #the path of the cctools binary which is compatible with the host machine under the umbrella cache
if sandbox_mode in ["parrot"]:
logging.debug("To use parrot sandbox mode, cctools binary is needed")
host_cctools_path = cctools_download(sandbox_dir, hardware_platform, host_linux_distro, 'unpack')
logging.debug("Add mountpoint (%s:%s) into mount_dict", host_cctools_path, host_cctools_path)
mount_dict[host_cctools_path] = host_cctools_path
parrotize_user_cmd(user_cmd, sandbox_dir, cwd_setting, host_linux_distro, hardware_platform, meta_json, cvmfs_http_proxy)
item = '%s-%s-%s' % (distro_name, distro_version, hardware_platform) #example of item here: redhat-6.5-x86_64
if need_separate_rootfs and sandbox_mode not in ["destructive"]:
#download the os dependency into the local
os_image_dir = "%s/cache/%s/%s" % (os.path.dirname(sandbox_dir), os_id, item)
logging.debug("A separate OS (%s) is needed!", os_image_dir)
mountpoint = '/'
action = 'unpack'
r3 = dependency_process(item, os_id, action, meta_json, sandbox_dir, osf_auth)
logging.debug("Add mountpoint (%s:%s) into mount_dict for /.", mountpoint, r3)
mount_dict[mountpoint] = r3
#check for cvmfs dependency
is_cms_cvmfs_app = 0
cvmfs_path = ""
cvmfs_mountpoint = ""
result = needCVMFS(spec_json, meta_json)
if result:
(cvmfs_path, cvmfs_mountpoint) = result
if cvmfs_path:
logging.debug("cvmfs is needed! (%s)", cvmfs_path)
print "cvmfs is needed! (%s)" % cvmfs_path
cvmfs_ready = False
if need_separate_rootfs:
os_cvmfs_path = "%s%s" % (os_image_dir, cvmfs_mountpoint)
if os.path.exists(os_cvmfs_path) and os.path.isdir(os_cvmfs_path):
cvmfs_ready = True
logging.debug("The os image has /cvmfs/cms.cern.ch!")
print "The os image has /cvmfs/cms.cern.ch!"
if not cvmfs_ready:
local_cvmfs = ""
local_cvmfs = check_cvmfs_repo(cvmfs_path[7:])
if len(local_cvmfs) > 0:
mount_dict[cvmfs_mountpoint] = local_cvmfs
logging.debug("The cvmfs is installed on the local host, and its mountpoint is: %s", local_cvmfs)
print "The cvmfs is installed on the local host, and its mountpoint is: %s" % local_cvmfs
else:
logging.debug("The cvmfs is not installed on the local host.")
print "The cvmfs is not installed on the local host."
if cvmfs_path.find("cms.cern.ch") != -1:
is_cms_cvmfs_app = 1 #cvmfs is needed to deliver cms.cern.ch repo, and the local host has no cvmfs installed.
if not cvmfs_http_proxy or len(cvmfs_http_proxy) == 0:
cleanup(tempfile_list, tempdir_list)
logging.debug("Access CVMFS through Parrot requires the --cvmfs_http_proxy of umbrella to be set.")
sys.exit("Access CVMFS through Parrot requires the --cvmfs_http_proxy of umbrella to be set.")
#currently, if the logic reaches here, only parrot execution engine is allowed.
cvmfs_cms_siteconf_mountpoint = set_cvmfs_cms_siteconf(sandbox_dir)
#add cvmfs SITEINFO into mount_dict
if sandbox_mode == "docker":
list1 = cvmfs_cms_siteconf_mountpoint.split(' ')
logging.debug("Add mountpoint (%s:%s) into mount_dict for cvmfs SITEINFO", list1[0], list1[1])
mount_dict[list1[0]] = list1[1]
if sandbox_mode != "parrot":
logging.debug("To use parrot to access cvmfs, cctools binary is needed")
host_cctools_path = cctools_download(sandbox_dir, hardware_platform, linux_distro, 'unpack')
logging.debug("Add mountpoint (%s:%s) into mount_dict", host_cctools_path, host_cctools_path)
mount_dict[host_cctools_path] = host_cctools_path
parrotize_user_cmd(user_cmd, sandbox_dir, cwd_setting, linux_distro, hardware_platform, meta_json, cvmfs_http_proxy)
if need_separate_rootfs:
new_os_image_dir = ""
#if some packages from package managers are needed, ceate a intermediate os image with all the packages ready.
if pac_list:
new_sw_sec = spec_json["package_manager"]["config"]
(new_os_id, pm_cmd) = cal_new_os_id(spec_json["package_manager"], os_id, pac_list)
new_os_image_dir = "%s/cache/%s/%s" % (os.path.dirname(sandbox_dir), new_os_id, item)
logging.debug("Installing the package into the image (%s), and create a new image: %s ...", os_image_dir, new_os_image_dir)
if os.path.exists(new_os_image_dir) and os.path.isdir(new_os_image_dir):
logging.debug("the new os image already exists!")
#use the intermidate os image which has all the dependencies from package manager ready as the os image
os_image_dir = new_os_image_dir
os_id = new_os_id
pass
else:
logging.debug("the new os image does not exist!")
new_env_para_dict = {}
#install dependency specified in the spec_json["package_manager"]["config"] section
logging.debug('Install dependency specified in the spec_json["package_manager"]["config"] section.')
if sandbox_mode == "destructive":
software_install(mount_dict, new_env_para_dict, new_sw_sec, meta_json, sandbox_dir, 1, osf_auth)
#install dependencies through package managers
rc, stdout, stderr = func_call(pm_cmd)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
else:
software_install(mount_dict, new_env_para_dict, new_sw_sec, meta_json, sandbox_dir, 0, osf_auth)
#install dependencies through package managers
logging.debug("Create an intermediate OS image with all the dependencies from package managers ready!")
workflow_repeat(cwd_setting, sandbox_dir, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, pm_cmd, hardware_platform, host_linux_distro, distro_name, distro_version, need_separate_rootfs, os_image_dir, os_id, host_cctools_path, cvmfs_cms_siteconf_mountpoint, mount_dict, mount_dict, meta_json, new_os_image_dir)
logging.debug("Finishing creating the intermediate OS image!")
#use the intermidate os image which has all the dependencies from package manager ready as the os image
os_image_dir = new_os_image_dir
os_id = new_os_id
if spec_json.has_key("software") and spec_json["software"]:
software_install(mount_dict, env_para_dict, spec_json["software"], meta_json, sandbox_dir, 0, osf_auth)
else:
logging.debug("this spec does not have software section!")
software_install(mount_dict, env_para_dict, "", meta_json, sandbox_dir, 0, osf_auth)
sw_mount_dict = dict(mount_dict) #sw_mount_dict will be used later to config the $PATH
if spec_json.has_key("data") and spec_json["data"]:
data_install(spec_json["data"], meta_json, sandbox_dir, mount_dict, env_para_dict, osf_auth)
else:
logging.debug("this spec does not have data section!")
workflow_repeat(cwd_setting, sandbox_dir, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, hardware_platform, host_linux_distro, distro_name, distro_version, need_separate_rootfs, os_image_dir, os_id, host_cctools_path, cvmfs_cms_siteconf_mountpoint, mount_dict, sw_mount_dict, meta_json, "")
def dependency_check(item):
"""Check whether an executable exists or not.
Args:
item: the name of the executable to be found.
Returns:
If the executable can be found through $PATH, return 0;
Otherwise, return -1.
"""
print "dependency check -- ", item, " "
result = which_exec(item)
if result == None:
logging.debug("Failed to find the executable `%s` through $PATH.", item)
print "Failed to find the executable `%s` through $PATH." % item
return -1
else:
logging.debug("Find the executable `%s` through $PATH.", item)
print "Find the executable `%s` through $PATH." % item
return 0
def get_instance_id(image_id, instance_type, ec2_key_pair, ec2_security_group):
""" Start one VM instance through Amazon EC2 command line interface and return the instance id.
Args:
image_id: the Amazon Image Identifier.
instance_type: the Amazon EC2 instance type used for the task.
ec2_key_pair: the path of the key-pair to use when launching an instance.
ec2_security_group: the security group within which the EC2 instance should be run.
Returns:
If no error happens, returns the id of the started instance.
Otherwise, directly exit.
"""
sg_option = ''
if ec2_security_group:
sg_option = ' -g ' + ec2_security_group
cmd = 'ec2-run-instances %s -t %s -k %s %s --associate-public-ip-address true' % (image_id, instance_type, ec2_key_pair, sg_option)
logging.debug("Starting an instance: %s", cmd)
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
str = "\nINSTANCE"
index = stdout.find(str)
if index == -1:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fail to get the instance id!")
else:
instance_id = stdout[(index+9):(index+20)]
return instance_id
def terminate_instance(instance_id):
"""Terminate an instance.
Args:
instance_id: the id of the VM instance.
Returns:
None.
"""
logging.debug("Terminate the ec2 instance: %s", instance_id)
cmd = 'ec2-terminate-instances %s' % instance_id
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
def get_public_dns(instance_id):
"""Get the public dns of one VM instance from Amazon EC2.
`ec2-run-instances` can not directly return the public dns of the instance, so this function is needed to check the result of `ec2-describe-instances` to obtain the public dns of the instance.
Args:
instance_id: the id of the VM instance.
Returns:
If no error happens, returns the public dns of the instance.
Otherwise, directly exit.
"""
public_dns = ''
while public_dns == None or public_dns == '' or public_dns == 'l':
cmd = 'ec2-describe-instances ' + instance_id
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = True)
(stdout, stderr) = p.communicate()
rc = p.returncode
logging.debug("returncode: %d\nstdout: %s\nstderr: %s", rc, stdout, stderr)
if rc != 0:
subprocess_error(cmd, rc, stdout, stderr)
str = "\nPRIVATEIPADDRESS"
index = stdout.find(str)
if index >= 0:
index1 = stdout.find("ec2", index + 1)
if index1 == -1:
time.sleep(5)
continue
public_dns = stdout[index1:-1]
break
return public_dns
def add2spec(item, source_dict, target_dict):
"""Abstract the metadata information (source format checksum size) from source_dict (metadata database) and add these information into target_dict (umbrella spec).
For any piece of metadata information, if it already exists in target_dict, do nothing; otherwise, add it into the umbrella spec.
Args:
item: the name of a dependency
source_dict: fragment of an Umbrella metadata database
target_dict: fragement of an Umbrella specficiation
Returns:
None
"""
#item must exist inside target_dict.
ident = None
if source_dict.has_key("checksum"):
checksum = source_dict["checksum"]
ident = checksum
if not target_dict.has_key("id"):
target_dict["id"] = ident
if not target_dict.has_key(checksum):
target_dict["checksum"] = source_dict["checksum"]
if source_dict.has_key("source"):
if len(source_dict["source"]) == 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("the source attribute of %s can not be empty!" % item)
sys.exit("the source attribute of %s can not be empty!" % item)
else:
source = source_dict["source"][0]
#if checksum is not provided in source_dict, the first url in the source section will be set to the ident.
if not ident and not target_dict.has_key("id"):
target_dict["id"] = source
if not target_dict.has_key("source"):
target_dict["source"] = list(source_dict["source"])
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s does not have source attribute in the umbrella metadata database!", item)
sys.exit("%s does not have source attribute in the umbrella metadata database!" % item)
if source_dict.has_key("format") and not target_dict.has_key("format"):
target_dict["format"] = source_dict["format"]
if source_dict.has_key("size") and not target_dict.has_key("size"):
target_dict["size"] = source_dict["size"]
if source_dict.has_key("uncompressed_size") and not target_dict.has_key("uncompressed_size"):
target_dict["uncompressed_size"] = source_dict["uncompressed_size"]
def add2db(item, source_dict, target_dict):
"""Add the metadata information (source format checksum size) about item from source_dict (umbrella specification) to target_dict (metadata database).
The item can be identified through two mechanisms: checksum attribute or one source location, which is used when checksum is not applicable for this item.
If the item has been in the metadata database, do nothing; otherwise, add it, together with its metadata, into the metadata database.
Args:
item: the name of a dependency
source_dict: fragment of an Umbrella specification
target_dict: fragement of an Umbrella metadata database
Returns:
None
"""
if not item in target_dict:
target_dict[item] = {}
ident = None
if source_dict.has_key("checksum"):
checksum = source_dict["checksum"]
if target_dict[item].has_key(checksum):
logging.debug("%s has been inside the metadata database!", item)
return
ident = checksum
target_dict[item][ident] = {}
target_dict[item][ident]["checksum"] = source_dict["checksum"]
if source_dict.has_key("source"):
if len(source_dict["source"]) == 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("the source attribute of %s can not be empty!" % item)
sys.exit("the source attribute of %s can not be empty!" % item)
else:
source = source_dict["source"][0]
if target_dict[item].has_key(source):
logging.debug("%s has been inside the metadata database!", item)
return
#if checksum is not provided in source_dict, the first url in the source section will be set to the ident.
if not ident:
ident = source
target_dict[item][ident] = {}
target_dict[item][ident]["source"] = list(source_dict["source"])
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s does not have source attribute in the umbrella specification!", item)
sys.exit("%s does not have source attribute in the umbrella specification!" % item)
if source_dict.has_key("format"):
target_dict[item][ident]["format"] = source_dict["format"]
if source_dict.has_key("size"):
target_dict[item][ident]["size"] = source_dict["size"]
if source_dict.has_key("uncompressed_size"):
target_dict[item][ident]["uncompressed_size"] = source_dict["uncompressed_size"]
def prune_attr(dict_item, attr_list):
"""Remove certain attributes from a dict.
If a specific ttribute does not exist, pass.
Args:
dict_item: a dict
attr_list: a list of attributes which will be removed from the dict.
Returns:
None
"""
for item in attr_list:
if dict_item.has_key(item):
del dict_item[item]
def prune_spec(json_object):
"""Remove the metadata information from a json file (which represents an umbrella specification).
Note: the original json file will not be changed by this function.
Args:
json_object: a json file representing an umbrella specification
Returns:
temp_json: a new json file without metadata information
"""
logging.debug("Remove the metadata information from %s.\n", json_object)
temp_json = dict(json_object)
attr_list = ["source", "checksum", "format", "size", "uncompressed_size"]
if temp_json.has_key("os"):
os_sec = temp_json["os"]
if os_sec:
prune_attr(os_sec, attr_list)
if temp_json.has_key("package_manager") and temp_json["package_manager"] \
and temp_json["package_manager"].has_key("config") and temp_json["package_manager"]["config"]:
pm_config_sec = temp_json["package_manager"]["config"]
if pm_config_sec:
for item in pm_config_sec:
prune_attr(pm_config_sec[item], attr_list)
if temp_json.has_key("software"):
software_sec = temp_json["software"]
if software_sec:
for item in software_sec:
prune_attr(software_sec[item], attr_list)
if temp_json.has_key("data"):
data_sec = temp_json["data"]
if data_sec:
for item in data_sec:
prune_attr(data_sec[item], attr_list)
return temp_json
def abstract_metadata(spec_json, meta_path):
"""Abstract metadata information from a self-contained umbrella spec into a metadata database.
Args:
spec_json: a dict including the contents from a json file
meta_path: the path of the metadata database.
Returns:
If the umbrella spec is not complete, exit directly.
Otherwise, return None.
"""
hardware_sec = attr_check("hardware", spec_json, "hardware")
hardware_arch = attr_check("hardware", hardware_sec, "arch")
metadata = {}
os_sec = attr_check("os", spec_json, "os")
os_name = attr_check("os", os_sec, "name")
os_version = attr_check("os", os_sec, "version")
os_item = "%s-%s-%s" % (os_name, os_version, hardware_arch)
os_item = os_item.lower()
add2db(os_item, os_sec, metadata)
if spec_json.has_key("package_manager") and spec_json["package_manager"] \
and spec_json["package_manager"].has_key("config") and spec_json["package_manager"]["config"]:
pm_config_sec = spec_json["package_manager"]["config"]
if pm_config_sec:
for item in pm_config_sec:
add2db(item, pm_config_sec[item], metadata)
if spec_json.has_key("software"):
software_sec = spec_json["software"]
if software_sec:
for item in software_sec:
add2db(item, software_sec[item], metadata)
if spec_json.has_key("data"):
data_sec = spec_json["data"]
if data_sec:
for item in data_sec:
add2db(item, data_sec[item], metadata)
with open(meta_path, 'w') as f:
json.dump(metadata, f, indent=4)
logging.debug("dump the metadata information from the umbrella spec to %s" % meta_path)
print "dump the metadata information from the umbrella spec to %s" % meta_path
def needCVMFS(spec_json, meta_json):
"""For each dependency in the spec_json, check whether cvmfs is needed to deliver it.
Args:
spec_json: the json object including the specification.
meta_json: the json object including all the metadata of dependencies.
Returns:
if cvmfs is needed, return the cvmfs url. Otherwise, return None
"""
for sec_name in ["software", "data", "package_manager"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
if sec_name == "package_manager":
if sec.has_key("config") and sec["config"]:
sec = sec["config"]
else:
logging.debug("%s does not have config attribute!", sec_name)
break
for item in sec:
item_id = ""
if sec[item].has_key("id") and len(sec[item]["id"]) > 0:
item_id = sec[item]["id"]
mountpoint = sec[item]["mountpoint"]
result = meta_search(meta_json, item, item_id)
if result.has_key("source") and len(result["source"]) > 0:
url = result["source"][0]
if url[:5] == "cvmfs":
return (url, mountpoint)
return None
def cleanup(filelist, dirlist):
"""Cleanup the temporary files and dirs created by umbrella
Args:
filelist: a list including file paths
dirlist: a list including dir paths
Returns:
None
"""
#cleanup the temporary files
for item in filelist:
if os.path.exists(item):
logging.debug("cleanup temporary file: %s", item)
print "cleanup temporary file: ", item
os.remove(item)
#cleanup the temporary dirs
for item in dirlist:
if os.path.exists(item):
logging.debug("cleanup temporary dir: %s", item)
print "cleanup temporary dir: ", item
shutil.rmtree(item)
def separatize_spec(spec_json, meta_json, target_type):
"""Given an umbrella specification and an umbrella metadata database, generate a self-contained umbrella specification or a metadata database only including the informationnecessary for the umbrella spec.
If the target_type is spec, then generate a self-contained umbrella specification.
If the target_type is db, then generate a metadata database only including the information necessary for the umbrella spec.
Args:
spec_json: the json object including the specification.
meta_json: the json object including all the metadata of dependencies.
target_type: the type of the target json file, which can be an umbrella spec or an umbrella metadata db.
Returns:
metadata: a json object
"""
#pull the metadata information of the spec from the meatadata db to the spec
if target_type == "spec":
metadata = dict(spec_json)
#pull the metadata information of the spec from the metadata db into a separate db
if target_type == "meta":
metadata = {}
hardware_sec = attr_check("hardware", spec_json, "hardware")
hardware_arch = attr_check("hardware", hardware_sec, "arch")
os_sec = attr_check("os", spec_json, "os")
os_name = attr_check("os", os_sec, "name")
os_version = attr_check("os", os_sec, "version")
os_item = "%s-%s-%s" % (os_name, os_version, hardware_arch)
os_item = os_item.lower()
ident = None
if os_sec.has_key("id"):
ident = os_sec["id"]
source = meta_search(meta_json, os_item, ident)
if target_type == "spec":
add2spec(os_item, source, metadata["os"])
if target_type == "meta":
add2db(os_item, source, metadata)
if spec_json.has_key("package_manager") and spec_json["package_manager"] \
and spec_json["package_manager"].has_key("config") and spec_json["package_manager"]["config"]:
pm_config_sec = spec_json["package_manager"]["config"]
if pm_config_sec:
for item in pm_config_sec:
ident = None
if pm_config_sec[item].has_key("id"):
ident = pm_config_sec[item]["id"]
source = meta_search(meta_json, item, ident)
if target_type == "spec":
add2spec(os_item, source, metadata["package_manager"]["config"][item])
if target_type == "meta":
add2db(item, source, metadata)
if spec_json.has_key("software"):
software_sec = spec_json["software"]
if software_sec:
for item in software_sec:
ident = None
if software_sec[item].has_key("id"):
ident = software_sec[item]["id"]
source = meta_search(meta_json, item, ident)
if target_type == "spec":
add2spec(os_item, source, metadata["software"][item])
if target_type == "meta":
add2db(item, source, metadata)
if spec_json.has_key("data"):
data_sec = spec_json["data"]
if data_sec:
for item in data_sec:
ident = None
if data_sec[item].has_key("id"):
ident = data_sec[item]["id"]
source = meta_search(meta_json, item, ident)
if target_type == "spec":
add2spec(os_item, source, metadata["data"][item])
if target_type == "meta":
add2db(item, source, metadata)
return metadata
def json2file(filepath, json_item):
"""Write a json object into a file
Args:
filepath: a file path
json_item: a dict representing a json object
Returns:
None
"""
with open(filepath, 'w') as f:
json.dump(json_item, f, indent=4)
logging.debug("dump a json object from the umbrella spec to %s" % filepath)
print "dump a json object from the umbrella spec to %s" % filepath
def path_exists(filepath):
"""Check the validity and existence of a file path.
Args:
filepath: a file path
Returns:
Exit directly if any error happens.
Otherwise, returns None.
"""
logging.debug("Checking file path: %s", filepath)
if os.path.exists(filepath):
cleanup(tempfile_list, tempdir_list)
logging.debug("The file (%s) already exists, please specify a new path!", filepath)
sys.exit("The file (%s) already exists, please specify a new path!" % filepath)
def dir_create(filepath):
"""Create the directory for it if necessary. If the file already exists, exit directly.
Args:
filepath: a file path
Returns:
Exit directly if any error happens.
Otherwise, returns None.
"""
dirpath = os.path.dirname(filepath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
else:
if not os.path.isdir(dirpath):
cleanup(tempfile_list, tempdir_list)
logging.debug("The basename of the file (%s) is not a directory!\n", dirpath)
sys.exit("The basename of the file (%s) is not a directory!\n" % dirpath)
def validate_meta(meta_json):
"""Validate a metadata db.
The current standard for a valid metadata db is: for each item, the "source" attribute must exist and not be not empty.
Args:
meta_json: a dict object representing a metadata db.
Returns:
If error happens, return directly with the error info.
Otherwise, None.
"""
logging.debug("Starting validating the metadata db ....\n")
print "Starting validating the metadata db ...."
for name in meta_json:
for ident in meta_json[name]:
logging.debug("check for %s with the id of %s ...", name, ident)
print "check for %s with the id of %s ..." % (name, ident)
attr_check(name, meta_json[name][ident], "source", 1)
logging.debug("Finish validating the metadata db ....\n")
print "Finish validating the metadata db successfully!"
def validate_spec(spec_json, meta_json = None):
"""Validate a spec_json.
Args:
spec_json: a dict object representing a specification.
meta_json: a dict object representing a metadata db.
Returns:
If error happens, return directly with the error info.
Otherwise, None.
"""
logging.debug("Starting validating the spec file ....\n")
print "Starting validating the spec file ...."
#validate the following three sections: hardware, kernel and os.
env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
for sec_name in ["software", "data", "package_manager"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
if sec_name == "package_manager":
if sec.has_key("config") and sec["config"]:
sec = sec["config"]
else:
logging.debug("%s does not have config attribute!", sec_name)
break
for item in sec:
if (sec[item].has_key("mountpoint") and sec[item]["mountpoint"]) \
or (sec[item].has_key("mount_env") and sec[item]["mount_env"]):
pass
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s in the %s section should have either <mountpoint> or <mount_env>!\n", item, sec_name)
sys.exit("%s in the %s section should have either <mountpoint> or <mount_env>!\n" % (item, sec_name))
if sec[item].has_key("source") and len(sec[item]["source"]) > 0:
pass
else:
if meta_json:
ident = None
if sec[item].has_key("id"):
ident = sec[item]["id"]
result = meta_search(meta_json, item, ident)
if result.has_key("source") and len(result["source"]) > 0:
pass
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s in the metadata db should have <source> attr!\n", item)
sys.exit("%s in the metadata db should have <source> attr!\n", item)
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s in the %s section should have <source> attr!\n", item, sec_name)
sys.exit("%s in the %s section should have <source> attr!\n" % (item, sec_name))
logging.debug("Finish validating the spec file ....\n")
print "Finish validating the spec file successfully!"
def osf_create(username, password, user_id, proj_name, is_public):
"""Create an OSF project, and return the project id.
Args:
username: an OSF username
password: an OSF password
user_id: the id of an OSF user
proj_name: the name of the OSF project
is_public: set to 1 if the project is public; set to 0 if the project is private.
Returns:
the id of the OSF project
"""
#first check whether the user already has an existing OSF project having the same name
url="https://api.osf.io:443/v2/users/%s/nodes/" % user_id
nodes=set()
#the response results are splitted into pages, and each page has 10 items.
while url:
r=requests.get(url)
if r.status_code != 200:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to check the projects contributed by the user (%d): %s!" % (r.status_code, r.reason))
for data in r.json()['data']:
nodes.add(data['attributes']['title'])
url=r.json()['links']['next']
if proj_name in nodes:
cleanup(tempfile_list, tempdir_list)
sys.exit("The project name (%s) already exists!" % proj_name)
#create the new project
auth = (username, password)
payload = {
"type": "nodes",
"title": proj_name,
"category": "project",
"public": is_public
}
url="https://api.osf.io:443/v2/nodes/"
r=requests.post(url, auth=auth, data=payload)
if r.status_code != 201:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to create the new project (%d): %s!" % (r.status_code, r.reason))
proj_id = r.json()['data']['id']
return proj_id
def osf_upload(username, password, proj_id, source):
"""upload a file from source into the OSF project identified by proj_id.
Args:
username: an OSF username
password: an OSF password
proj_id: the id of the OSF project
source: a file path
Returns:
the OSF download url of the uploaded file
"""
print "Upload %s to OSF ..." % source
logging.debug("Upload %s to OSF ...",source)
url="https://files.osf.io/v1/resources/%s/providers/osfstorage/" % proj_id
payload = {"kind":"file", "name":os.path.basename(source)}
auth = (username, password)
f=open(source, 'rb')
r=requests.put(url, params=payload, auth = auth, data=f)
if r.status_code != 201 and r.status_code != 200:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to upload the file %s to OSF(%d): %s!" % (source, r.status_code, r.reason))
return r.json()['data']['links']['download']
def osf_download(username, password, osf_url, dest):
"""download a file pointed by an OSF url to dest.
Args:
username: an OSF username
password: an OSF password
osf_url: the OSF download url
dest: the destination of the OSF file
Returns:
If the osf_url is downloaded successfully, return None;
Otherwise, directly exit.
"""
if not found_requests:
cleanup(tempfile_list, tempdir_list)
logging.critical("\nDownloading private stuff from OSF requires a python package - requests. Please check the installation page of requests:\n\n\thttp://docs.python-requests.org/en/latest/user/install/\n")
sys.exit("\nDownloading private stuff from OSF requires a python package - requests. Please check the installation page of requests:\n\n\thttp://docs.python-requests.org/en/latest/user/install/\n")
print "Download %s from OSF to %s" % (osf_url, dest)
logging.debug("Download %s from OSF to %s", osf_url, dest)
word = 'resources'
proj_id = osf_url[(osf_url.index(word) + len(word) + 1):(osf_url.index(word) + len(word) + 6)]
url="https://api.osf.io:443/v2/nodes/%s/" % proj_id
r=requests.get(url)
r2 = None
if r.status_code == 401:
if username == None or password == None:
cleanup(tempfile_list, tempdir_list)
sys.exit("The OSF resource (%s) is private (%d): %s! To use the OSF resource, you need to provide a legal OSF username and password." % (url, r.status_code, r.reason))
auth = (username, password)
r1=requests.get(url, auth=auth)
if r1.status_code != 200:
cleanup(tempfile_list, tempdir_list)
sys.exit("The OSF resource (%s) is private (%d): %s! The username or password is incorrect!" % (url, r1.status_code, r1.reason))
else:
r2=requests.get(osf_url, auth=auth, stream=True)
else:
r2=requests.get(osf_url, stream=True)
if r2.status_code != 200:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to download the osf resource: %s (%d): %s!" % (r2.status_code, r2.reason))
chunk_size=10240
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
with open(dest, 'wb') as fd:
for chunk in r2.iter_content(chunk_size):
fd.write(chunk)
def s3_create(bucket_name, acl):
"""Create a s3 bucket
Args:
bucket_name: the bucket name
acl: the access control, which can be: private, public-read
Returns:
bucket: an S3.Bucket instance
"""
#create the connection with s3
s3 = boto3.resource('s3')
#list all the bucket names
buckets = set()
try:
for bucket in s3.buckets.all():
buckets.add(bucket.name)
except botocore.exceptions.ClientError as e:
cleanup(tempfile_list, tempdir_list)
sys.exit(e.message)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to list all the current buckets: %s!" % e)
#check whether the bucket name already exists
if bucket_name in buckets:
cleanup(tempfile_list, tempdir_list)
sys.exit("The bucket name (%s) already exists!" % bucket_name)
#create a new bucket
try:
s3.create_bucket(Bucket=bucket_name)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to create the new bucket (%s): %s!" % (bucket_name, e))
#obtain the created bucket
bucket = s3.Bucket(bucket_name)
#set access control
#ACL totally can be one of these options: 'private'|'public-read'|'public-read-write'|'authenticated-read'
#for now, when an user uses Umbrella to upload to s3, the acl can only be private, public-read.
try:
bucket.Acl().put(ACL=acl)
except botocore.exceptions.ClientError as e:
cleanup(tempfile_list, tempdir_list)
sys.exit(e.message)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to list all the current buckets: %s!" % e)
return bucket
def s3_upload(bucket, source, acl):
"""Upload a local file to s3
Args:
bucket: an S3.Bucket instance
source: the local file path
acl: the access control, which can be: private, public-read
Returns:
link: the link of a s3 object
"""
print "Upload %s to S3 ..." % source
logging.debug("Upload %s to S3 ...", source)
key = os.path.basename(source)
data = open(source, 'rb')
try:
#acl on the bucket does not automatically apply to all the objects in it. Acl must be set on each object.
bucket.put_object(ACL=acl, Key=key, Body=data) #https://s3.amazonaws.com/testhmeng/s3
except botocore.exceptions.ClientError as e:
cleanup(tempfile_list, tempdir_list)
sys.exit(e.message)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to upload the file (%s) to S3: %s!" % (source, e))
return "%s/%s/%s" % (s3_url, bucket.name, key)
def s3_download(link, dest):
"""Download a s3 file to dest
Args:
link: the link of a s3 object. e.g., https://s3.amazonaws.com/testhmeng/s3
dest: a local file path
Returns:
None
"""
if not found_boto3 or not found_botocore:
cleanup(tempfile_list, tempdir_list)
logging.critical("\nUploading umbrella spec dependencies to s3 requires a python package - boto3. Please check the installation page of boto3:\n\n\thttps://boto3.readthedocs.org/en/latest/guide/quickstart.html#installation\n")
sys.exit("\nUploading umbrella spec dependencies to s3 requires a python package - boto3. Please check the installation page of boto3:\n\n\thttps://boto3.readthedocs.org/en/latest/guide/quickstart.html#installation\n")
print "Download %s from S3 to %s" % (link, dest)
logging.debug("Download %s from S3 to %s", link, dest)
s3 = boto3.resource('s3')
if (len(s3_url)+1) >= len(link):
cleanup(tempfile_list, tempdir_list)
sys.exit("The s3 object link (%s) is invalid! The correct format shoulde be <%s>/<bucket_name>/<key>!" % (link, s3_url))
m = link[(len(s3_url)+1):] #m format: <bucket_name>/<key>
i = m.find('/')
if i == -1:
cleanup(tempfile_list, tempdir_list)
sys.exit("The s3 object link (%s) is invalid! The correct format shoulde be <%s>/<bucket_name>/<key>!" % (link, s3_url))
bucket_name = m[:i]
if (i+1) >= len(m):
cleanup(tempfile_list, tempdir_list)
sys.exit("The s3 object link (%s) is invalid! The correct format shoulde be <%s>/<bucket_name>/<key>!" % (link, s3_url))
key = m[(i+1):]
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
#the download url can be automatically combined through bucket name and key
try:
s3.Object(bucket_name, key).download_file(dest)
except botocore.exceptions.ClientError as e:
cleanup(tempfile_list, tempdir_list)
sys.exit(e.message)
except Exception as e:
cleanup(tempfile_list, tempdir_list)
sys.exit("Fails to download the object (%s) from the bucket(%s):! Please ensure you have the right permission to download these s3 objects: %s!" % (key, bucket_name, e))
def has_source(sources, target):
"""Check whether the sources includes a url from the specific target.
Args:
sources: a list of url
target: the specific resource url. For example, s3, osf.
Returns:
If a url from the specific target exists, return True.
Otherwise, return False.
"""
if not sources or len(sources) == 0:
return False
n = len(target)
for source in sources:
if len(source) > n and source[:n] == target:
return True
return False
def spec_upload(spec_json, meta_json, target_info, sandbox_dir, osf_auth=None, s3_bucket=None):
"""Upload each dependency in an umbrella spec to the target (OSF or s3), and add the new target download url into the umbrella spec.
The source of the dependencies can be anywhere supported by umbrella: http
https git local s3 osf. Umbrella always first downloads each dependency into
its local cache, then upload the dep from its local cache to the target.
Args:
spec_json: the json object including the specification.
meta_json: the json object including all the metadata of dependencies.
target_info: the info necessary to communicate with the remote target (i.e., OSF, s3)
sandbox_dir: the sandbox dir for temporary files like Parrot mountlist file.
osf_auth: the osf authentication info including osf_username and osf_password.
s3_bucket: an S3.Bucket instance
Returns:
None
"""
mount_dict = {}
env_para_dict = {}
global upload_count
print "Upload the dependencies from the umbrella spec to %s ..." % target_info[0]
logging.debug("Upload the dependencies from the umbrella spec to %s ...", target_info[0])
if spec_json.has_key("os") and spec_json["os"] and spec_json["os"].has_key("id") and spec_json["os"]["id"]:
os_id = spec_json["os"]["id"]
if spec_json.has_key("hardware") and spec_json["hardware"] and spec_json.has_key("kernel") and spec_json["kernel"] and spec_json.has_key("os") and spec_json["os"]:
logging.debug("Setting the environment parameters (hardware, kernel and os) according to the specification file ....")
(hardware_platform, cpu_cores, memory_size, disk_size, kernel_name, kernel_version, linux_distro, distro_name, distro_version, os_id) = env_parameter_init(spec_json["hardware"], spec_json["kernel"], spec_json["os"])
item = '%s-%s-%s' % (distro_name, distro_version, hardware_platform) #example of item here: redhat-6.5-x86_64
os_image_dir = "%s/cache/%s/%s" % (os.path.dirname(sandbox_dir), os_id, item)
logging.debug("A separate OS (%s) is needed!", os_image_dir)
mountpoint = '/'
action = 'unpack'
if spec_json["os"].has_key("source") or attr_check(item, meta_search(meta_json, item, os_id), "source", 1):
if spec_json["os"].has_key("source"):
sources = spec_json["os"]["source"]
else:
sources = meta_search(meta_json, item, os_id)["source"]
if has_source(sources, target_info[0]):
logging.debug("The os section already has a url from %s!", target_info[0])
print "The os section already has a url from %s!" % target_info[0]
else:
upload_count += 1
r3 = dependency_process(item, os_id, action, meta_json, sandbox_dir, osf_auth)
logging.debug("Add mountpoint (%s:%s) into mount_dict for /.", mountpoint, r3)
mount_dict[mountpoint] = r3
if target_info[0] == "osf":
osf_url = osf_upload(target_info[1], target_info[2], target_info[3], os_image_dir + ".tar.gz")
spec_json["os"]["source"].append("osf+" + osf_url)
elif target_info[0] == "s3":
s3_url = s3_upload(s3_bucket, os_image_dir + ".tar.gz", target_info[1])
spec_json["os"]["source"].append("s3+" + s3_url)
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("the os section does not has source attr!")
sys.exit("the os section does not has source attr!")
for sec_name in ["data"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
for item in sec:
if sec[item].has_key("source") or attr_check(item, meta_search(meta_json, item, id), "source", 1):
if sec[item].has_key("source"):
sources = sec[item]["source"]
else:
sources = meta_search(meta_json, item, id)["source"]
if has_source(sources, target_info[0]):
logging.debug("%s already has a url from %s!", item, target_info[0])
print "%s already has a url from %s!" % (item, target_info[0])
continue
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s does not has the source attr!", item)
sys.exit("%s does not has the source attr!" % item)
upload_count += 1
data_install(sec, meta_json, sandbox_dir, mount_dict, env_para_dict, osf_auth, item)
if sec[item]["format"] == "tgz":
source_url = mount_dict[sec[item]["mountpoint"]] + ".tar.gz"
else:
source_url = mount_dict[sec[item]["mountpoint"]]
if target_info[0] == "osf":
osf_url = osf_upload(target_info[1], target_info[2], target_info[3], source_url)
sec[item]["source"].append("osf+" + osf_url)
elif target_info[0] == "s3":
s3_url = s3_upload(s3_bucket, source_url, target_info[1])
sec[item]["source"].append("s3+" + s3_url)
for sec_name in ["software", "package_manager"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
if sec_name == "package_manager":
if sec.has_key("config") and sec["config"]:
sec = sec["config"]
else:
logging.debug("%s does not have config attribute!", sec_name)
break
for item in sec:
if sec[item].has_key("source") or attr_check(item, meta_search(meta_json, item, id), "source", 1):
if sec[item].has_key("source"):
sources = sec[item]["source"]
else:
sources = meta_search(meta_json, item, id)["source"]
if has_source(sources, target_info[0]):
logging.debug("%s already has a url from %s!", item, target_info[0])
print "%s already has a url from %s!" % (item, target_info[0])
continue
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("%s does not has the source attr!", item)
sys.exit("%s does not has the source attr!" % item)
upload_count += 1
software_install(mount_dict, env_para_dict, sec, meta_json, sandbox_dir, 0, osf_auth, item)
#ignore upload resouces from cvmfs
if (not sec[item].has_key("mountpoint")) or (not mount_dict.has_key(sec[item]["mountpoint"])) or mount_dict[sec[item]["mountpoint"]] == "":
continue
if sec[item]["format"] == "tgz":
source_url = mount_dict[sec[item]["mountpoint"]] + ".tar.gz"
else:
source_url = mount_dict[sec[item]["mountpoint"]]
if target_info[0] == "osf":
osf_url = osf_upload(target_info[1], target_info[2], target_info[3], source_url)
sec[item]["source"].append("osf+" + osf_url)
elif target_info[0] == "s3":
s3_url = s3_upload(s3_bucket, source_url, target_info[1])
sec[item]["source"].append("s3+" + s3_url)
def dep_build(d, name):
"""Build the metadata info of a dependency.
Args:
d: a dependency object
name: the name of the dependency
Returns:
None
"""
#check the validity of the 'format' attr
formats = ['plain', 'tgz']
form = attr_check(name, d, "format")
if not form in formats:
cleanup(tempfile_list, tempdir_list)
sys.exit("The format attr can only be: %s!\n", ' or '.join(formats))
#check the validity of the 'source' attr
source = attr_check(name, d, "source", 1)
if source == '':
cleanup(tempfile_list, tempdir_list)
sys.exit("The source of %s is empty!" % name)
if source[0] != '/':
cleanup(tempfile_list, tempdir_list)
sys.exit("The source of %s should be a local path!" % name)
#set the file size
size = os.stat(source).st_size
d["size"] = str(size)
#set the uncompressed size of tgz file
if form == "tgz":
full_size = get_tgz_size(source)
d["uncompressed_size"] = str(full_size)
#set the 'checksum' and 'id' attrs
checksum = md5_cal(source)
d["id"] = checksum
d["checksum"] = checksum
def get_tgz_size(path):
"""Get the uncompressed size of a tgz file
Args:
path: a tgz file path
Returns:
size: the uncompressed size of a tgz file
"""
size = 0
f = gzip.open(path, 'rb')
try:
while True:
c = f.read(1024*1024)
if not c:
break
else:
size += len(c)
finally:
f.close()
return size
def spec_build(spec_json):
"""Build the metadata information of an umbrella spec
Args:
spec_json: the json object including the specification.
Returns:
None
"""
if spec_json.has_key("os") and spec_json["os"]:
dep_build(spec_json["os"], "os")
for sec_name in ["data", "software", "package_manager"]:
if spec_json.has_key(sec_name) and spec_json[sec_name]:
sec = spec_json[sec_name]
if sec_name == "package_manager":
if sec.has_key("config") and sec["config"]:
sec = sec["config"]
else:
logging.debug("%s does not have config attribute!", sec_name)
break
for item in sec:
dep_build(sec[item], item)
help_info = {
"build": '''Build up the metadata info of dependencies inside an umbrella spec, and write the built-up version into a new file.
A good use case of build is when you have some dependencies from the local filesystem. In this case, umbrella will calculate the metadata info
about these dependencies.
The source spec should specify the following info of each local dependency: source, action, mountpoint, format.
When the local dependency is a .tar.gz file, the following metadata info will be put into the target spec: id, checksum, size, uncompressed size.
When the local dependency is a plain file, the following metadata info will be put into the target spec: id, checksum, size.
When the local dependencies is a dir D, a corresponding D.tar.gz file will be created under the same directory with D, then the following metadata info will be put into the target spec: id, checksum, size, uncompressed size.
For more info about how to compose an umbrella spec, please check the following link:
http://ccl.cse.nd.edu/software/manuals/umbrella.html#create_spec
usage: umbrella [options] build source target
source the path of an existing umbrella spec file from your local filesystem whose metadata info is needed to be built up
target an non-existing file path on your local filesystem where the built-up version of the umbrella spec will be wrotten into
''',
"expand": '''Expand an umbrella spec file into a self-contained umbrella spec
The source umbrella spec should be specified through the --spec option; the metadata db should be specified through the --meta option.
For each dependency in the source umbrella spec, the following info will be extracted from the metadata db: source, size, format, checksum.
Finally, the expanded umbrella sepc will be wrotten into a new file.
usage: umbrella [options] expand target
target an non-existing file path on your local filesystem where the expanded version of the umbrella spec will be wrotten into
''',
"filter": '''Filter the metadata info for an umbrella spec file from a huge metadata db
The source umbrella spec should be specified through the --spec option; the metadata db should be specified through the --meta option.
The source umbrella spec should NOT be self-contained.
For each dependency specified in the source umbrella spec, its metadata info will be extracted from the huge metadata db, and written into the target path.
usage: umbrella [options] filter target
target an non-existing file path on your local filesystem where the metadata info of all the dependencies in the umbrella spec will be wrotten into
''',
"run": '''Run your application through umbrella
usage: umbrella [options] run [command]
command command to run, the command can also be set inside the umbrella spec. By default: /bin/sh
''',
"split": '''Split a self-contained umbrella spec file into an umbrella spec and a metadata db
The source umbrella spec should be specified through the --spec option; The --meta option will be ignored.
The source umbrella spec should be self-contained.
usage: umbrella [options] split newspec newdb
newspec an non-existing file path on your local filesystem where the new umbrella spec will be wrotten into
newdb an non-existing file path on your local filesystem where the metadata info corresponding to newspec will be wrotten into
''',
"upload": '''Upload the dependencies in an umbrella spec into remote archives (OSF, Amazon S3)
Umbrella will upload all the dependencies to the target archive, and add the new resource location into the source section of each dependency.
Finally, the new umbrella spec will be written into a new file.
When the source of a dependency has already include one url from the target archive, the dependency will be ignored.
Currently, the supported target includes: OSF, the Amazon S3.
Uploading to OSF requires the following umbrella options: --osf_user, --osf_pass, --osf_userid
usage of upload osf: umbrella [options] upload osf proj acl target
proj the osf project name
acl the access permission of the uploaded data. Options: public, private
target an non-existing file path on your local filesystem where the new umbrella spec will be wrotten into
usage of upload s3: umbrella [options] upload s3 bucket acl target
bucket the s3 bucket name
acl the access permission of the uploaded data. Options: public-read, private
target an non-existing file path on your local filesystem where the new umbrella spec will be wrotten into
''',
"validate": '''Validate an umbrella spec file
The source umbrella spec should be specified through the --spec option; the metadata db should be specified through the --meta option.
usage: umbrella [options] validate
'''
}
def main():
parser = OptionParser(description="Umbrella is a portable environment creator for reproducible computing on clusters, clouds, and grids.",
usage="""usage: %prog [options] run|expand|filter|split|validate|upload|build ...
Currently, umbrella supports the following behaviors:
build\t\tbuild up the metadata info of dependencies inside an umbrella spec
expand\t\texpand an umbrella spec file into a self-contained umbrella spec
filter\t\tfilter the metadata info for an umbrella spec file from a huge metadata db
run\t\trun your application through umbrella
split\t\tsplit a self-contained umbrella spec file into an umbrella spec and a metadata db
upload\t\tupload the dependencies in an umbrella spec into remote archives (OSF, Amazon S3)
validate\tvalidate an umbrella spec file
To check the help doc for a specific behavoir, use: %prog <behavior> help""",
version="%prog CCTOOLS_VERSION")
parser.add_option("--spec",
action="store",
help="The specification json file.",)
parser.add_option("--meta",
action="store",
help="The source of meta information, which can be a local file path (e.g., file:///tmp/meta.json) or url (e.g., http://...).\nIf this option is not provided, the specification will be treated a self-contained specification.",)
parser.add_option("-l", "--localdir",
action="store",
help="The path of directory used for all the cached data and all the sandboxes, the directory can be an existing dir.",)
parser.add_option("-o", "--output",
action="store",
help="The mappings of outputs in the format of <container_path>=<local_path>. Multiple mappings should be separated by comma.\ncontainer_path is a path inside the sandbox and should be exposed in the output section of an umbrella spec.\nlocal_path should be a non-existing path on your local filessytem where you want the output from container_path to be put into.",)
parser.add_option("-s", "--sandbox_mode",
action="store",
choices=['parrot', 'destructive', 'docker', 'ec2',],
help="sandbox mode, which can be parrot, destructive, docker, ec2.",)
parser.add_option("-i", "--inputs",
action="store",
help="The path of input files in the format of <container_path>=<local_path>. Multiple mappings should be separated by comma. Please refer to the --output option for the settings of local_path and container_path.",)
parser.add_option("-e", "--env",
action="store",
help="The environment variables in the format of <variable_name>=<variable_value>. Multiple settings should be separated by comma. I.e., -e 'PWD=/tmp'.")
parser.add_option("--log",
action="store",
default="./umbrella.log",
help="The path of umbrella log file. (By default: ./umbrella.log)",)
parser.add_option("--cvmfs_http_proxy",
action="store",
help="HTTP_PROXY to access cvmfs (Used by Parrot)",)
parser.add_option("--ec2",
action="store",
help="The source of ec2 information.",)
parser.add_option("--condor_log",
action="store",
help="The path of the condor umbrella log file. Required for condor execution engines.",)
parser.add_option("--ec2_log",
action="store",
help="The path of the ec2 umbrella log file. Required for ec2 execution engines.",)
parser.add_option("-g", "--ec2_group",
action="store",
help="the security group within which an Amazon EC2 instance should be run. (only for ec2)",)
parser.add_option("-k", "--ec2_key",
action="store",
help="the name of the key pair to use when launching an Amazon EC2 instance. (only for ec2)",)
parser.add_option("--ec2_sshkey",
action="store",
help="the name of the private key file to use when connecting to an Amazon EC2 instance. (only for ec2)",)
parser.add_option("--ec2_instance_type",
action="store",
help="the type of an Amazon EC2 instance. (only for ec2)",)
parser.add_option("--osf_user",
action="store",
help="the OSF username (required in two cases: uploading to osf; downloading private osf resources.)",)
parser.add_option("--osf_pass",
action="store",
help="the OSF password (required in two cases: uploading to osf; downloading private osf resources.)",)
parser.add_option("--osf_userid",
action="store",
help="the OSF user id (required in two cases: uploading to osf; downloading private osf resources.)",)
(options, args) = parser.parse_args()
logfilename = options.log
if os.path.exists(logfilename) and not os.path.isfile(logfilename):
sys.exit("The --log option <%s> is not a file!" % logfilename)
global tempfile_list
global tempdir_list
global upload_count
"""
disable_warnings function is used here to disable the SNIMissingWarning and InsecurePlatformWarning from /afs/crc.nd.edu/user/h/hmeng/.local/lib/python2.6/site-packages/requests-2.9.1-py2.6.egg/requests/packages/urllib3/util/ssl_.py.
"Requests 2.6 introduced this warning for users of Python prior to Python 2.7.9 with only stock SSL modules available."
"""
if found_requests:
requests.packages.urllib3.disable_warnings()
logging.basicConfig(filename=logfilename, level=logging.DEBUG,
format='%(asctime)s.%(msecs)d %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
logging.debug("*******Welcome to Umbrella*******")
logging.debug("Arguments: ")
logging.debug(sys.argv)
start = datetime.datetime.now()
logging.debug("Start time: %s", start)
logging.debug("Check the validity of the command ....")
if not args:
logging.critical("You must provide the behavior and the command!")
print "You must provide the behavior and the command!\n"
parser.print_help()
sys.exit(1)
user_cmd = []
behavior = args[0]
logging.debug("Check the validity of the behavior: %s", behavior)
behavior_list = ["run", "expand", "filter", "split", "validate", "upload", "build"]
if behavior not in behavior_list:
logging.critical("%s is not supported by umbrella!", behavior)
print behavior + " is not supported by umbrella!\n"
parser.print_help()
sys.exit(1)
if len(args) > 1 and args[1] in ['help']:
print help_info[behavior]
sys.exit(0)
if behavior in ["build"]:
if len(args) != 3:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella build is: umbrella ... build <source.umbrella> <dest.umbrella>\n")
sys.exit("The syntax for umbrella build is: umbrella ... build <source.umbrella> <dest.umbrella>\n")
args[1] = os.path.abspath(args[1])
if (not os.path.exists(args[1])) or (not os.path.isfile(args[1])):
cleanup(tempfile_list, tempdir_list)
logging.critical("<source.umbrella> (%s) should be an existing file!\n", args[1])
sys.exit("<source.umbrella> (%s) should be an existing file!\n" % args[1])
if os.path.exists(args[2]):
cleanup(tempfile_list, tempdir_list)
logging.critical("<dest.umbrella> (%s) should be a non-existing file!\n", args[2])
sys.exit("<dest.umbrella> (%s) should be a non-existing file!\n" % args[2])
args[2] = os.path.abspath(args[2])
if not os.path.exists(os.path.dirname(args[2])):
print os.path.dirname(args[2])
try:
os.makedirs(os.path.dirname(args[2]))
except Exception as e:
cleanup(tempfile_list, tempdir_list)
logging.critical("Fails to create the directory for the <dest.umbrella> (%s): %s!", args[2], e)
sys.exit("Fails to create the directory for the <dest.umbrella> (%s)!" % (args[2], e))
with open(args[1]) as f:
spec_json = json.load(f)
spec_build(spec_json)
json2file(args[2], spec_json)
sys.exit(0)
if behavior in ["run", "upload"]:
#get the absolute path of the localdir directory, which will cache all the data, and store all the sandboxes.
#to allow the reuse the local cache, the localdir can be a dir which already exists.
localdir = options.localdir
localdir = os.path.abspath(localdir)
logging.debug("Check the localdir option: %s", localdir)
if not os.path.exists(localdir):
logging.debug("create the localdir: %s", localdir)
os.makedirs(localdir)
sandbox_dir = tempfile.mkdtemp(dir=localdir)
logging.debug("Create the sandbox_dir: %s", sandbox_dir)
#add sandbox_dir into tempdir_list
tempdir_list.append(sandbox_dir)
osf_auth = []
#osf_auth info
osf_user = options.osf_user
osf_pass = options.osf_pass
if osf_user or osf_pass:
osf_auth.append(osf_user)
osf_auth.append(osf_pass)
if behavior in ["run"]:
sandbox_mode = options.sandbox_mode
logging.debug("Check the sandbox_mode option: %s", sandbox_mode)
if sandbox_mode in ["destructive"]:
if getpass.getuser() != 'root':
cleanup(tempfile_list, tempdir_list)
logging.critical("You must be root to use the %s sandbox mode.", sandbox_mode)
print 'You must be root to use the %s sandbox mode.\n' % (sandbox_mode)
parser.print_help()
sys.exit(1)
#transfer options.env into a dictionary, env_para_dict
env_para = options.env
env_para_dict = {}
if (not env_para) or env_para == '':
logging.debug("The env option is null")
env_para_list = ''
env_para_dict = {}
else:
logging.debug("Process the env option: %s", env_para)
env_para = re.sub('\s+', '', env_para).strip()
env_para_list = env_para.split(',')
for item in env_para_list:
index = item.find('=')
name = item[:index]
value = item[(index+1):]
env_para_dict[name] = value
logging.debug("the dictionary format of the env options (env_para_dict):")
logging.debug(env_para_dict)
#get the cvmfs HTTP_PROXY
cvmfs_http_proxy = options.cvmfs_http_proxy
if behavior in ["run", "expand", "filter", "split", "validate", "upload"]:
spec_path = options.spec
if behavior == "validate" and spec_path == None:
spec_json = None
else:
spec_path_basename = os.path.basename(spec_path)
logging.debug("Start to read the specification file: %s", spec_path)
if not os.path.isfile(spec_path):
cleanup(tempfile_list, tempdir_list)
logging.critical("The specification json file (%s) does not exist! Please refer the -c option.", spec_path)
print "The specification json file does not exist! Please refer the -c option.\n"
parser.print_help()
sys.exit(1)
with open(spec_path) as f: #python 2.4 does not support this syntax: with open () as
spec_json = json.load(f)
if behavior in ["run"]:
user_cmd = args[1:]
if len(user_cmd) == 0:
if spec_json.has_key("cmd") and len(spec_json["cmd"]) > 0:
user_cmd.append(spec_json["cmd"])
else:
user_cmd.append("/bin/sh") #set the user_cmd to be default: /bin/sh
logging.debug("The user's command is: %s", user_cmd)
#if the spec file has environ seciton, merge the variables defined in it into env_para_dict
if spec_json.has_key("environ") and spec_json["environ"]:
logging.debug("The specification file has environ section, update env_para_dict ....")
spec_env = spec_json["environ"]
for key in spec_env:
env_para_dict[key] = spec_env[key]
logging.debug("env_para_dict:")
logging.debug(env_para_dict)
if behavior in ["run"]:
if 'PWD' in env_para_dict:
cwd_setting = env_para_dict['PWD']
logging.debug("PWD environment variable is set explicitly: %s", cwd_setting)
else:
cwd_setting = sandbox_dir
env_para_dict['PWD'] = cwd_setting
logging.debug("PWD is not set explicitly, use sandbox_dir (%s) as PWD", cwd_setting)
#get the absolute path of each input file
input_files = options.inputs
input_list = []
input_dict = {}
if (not input_files) or input_files == '':
input_list_origin = ''
input_list = []
input_dict = {}
logging.debug("the inputs options is null")
else:
input_files = re.sub( '\s+', '', input_files).strip() #remove all the whitespaces within the inputs option
logging.debug("The inputs option: %s", input_files)
input_list_origin = input_files.split(',')
for item in input_list_origin:
index = item.find('=')
access_path = item[:index]
actual_path = item[(index+1):]
if access_path[0] != '/':
access_path = os.path.join(cwd_setting, access_path)
actual_path = os.path.abspath(actual_path)
input_dict[access_path] = actual_path
input_list.append(actual_path) #get the absolute path of each input file and add it into input_list
logging.debug("The list version of the inputs option: ")
logging.debug(input_list)
logging.debug("The dict version of the inputs option: ")
logging.debug(input_dict)
#get the absolute path of each output file
output_dir = options.output
output_dict = {}
output_f_dict = {}
output_d_dict = {}
if output_dir and len(output_dir) > 0:
output_dir = re.sub( '\s+', '', output_dir).strip() #remove all the whitespaces within the inputs option
if output_dir == "":
logging.debug("the output option is null!")
else:
logging.debug("the output option: %s", output_dir)
outputs = output_dir.split(',')
for item in outputs:
index = item.find('=')
access_path = item[:index]
actual_path = item[(index+1):]
if access_path[0] != '/':
cleanup(tempfile_list, tempdir_list)
logging.critical("the path of an output should be absolute!")
sys.exit("the path of an output should be absolute!")
actual_path = os.path.abspath(actual_path)
output_dict[access_path] = actual_path
if len(output_dict) > 0:
if spec_json.has_key("output"):
files = []
dirs = []
if spec_json["output"].has_key("files"):
files = spec_json["output"]["files"]
if spec_json["output"].has_key("dirs"):
dirs = spec_json["output"]["dirs"]
for key in output_dict.keys():
if key in files:
output_f_dict[key] = output_dict[key]
elif key in dirs:
output_d_dict[key] = output_dict[key]
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("the output file (%s) is not specified in the spec file!", key)
sys.exit("the output file (%s) is not specified in the spec file!" % key)
else:
cleanup(tempfile_list, tempdir_list)
logging.critical("the specification does not have a output section!")
sys.exit("the specification does not have a output section!")
del output_dict
for f in output_f_dict.values():
if not os.path.exists(f):
logging.debug("create the output file: %s", f)
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
elif not os.path.isdir(d):
cleanup(tempfile_list, tempdir_list)
logging.critical("the parent path of the output file (%s) is not a directory!", f)
sys.exit("the parent path of the output file (%s) is not a directory!" % f)
else:
pass
new_file = open(f, 'a')
new_file.close()
elif len(f) != 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("the output file (%s) already exists!", f)
sys.exit("the output file (%s) already exists!\n" % f)
else:
pass
for d in output_d_dict.values():
if not os.path.exists(d):
logging.debug("create the output dir: %s", d)
os.makedirs(d)
elif len(d) != 0:
cleanup(tempfile_list, tempdir_list)
logging.critical("the output dir (%s) already exists!", d)
sys.exit("the output dir(%s) already exists!" % d)
else:
pass
meta_json = None
if behavior in ["run", "expand", "filter", "validate"]:
"""
meta_path is optional. If set, it provides the metadata information for the dependencies.
If not set, the umbrella specification is treated as a self-contained specification.
meta_path can be in either file:///filepath format or a http/https url like http:/ccl.cse.nd.edu/.... Otherwise, it is treated as a local path.
"""
meta_path = options.meta
if meta_path:
if meta_path[:7] == "file://":
meta_path = meta_path[7:]
logging.debug("Check the metatdata database file: %s", meta_path)
if not os.path.exists(meta_path):
cleanup(tempfile_list, tempdir_list)
logging.critical("the metatdata database file (%s) does not exist!", meta_path)
sys.exit("the metatdata database file (%s) does not exist!" % meta_path)
elif meta_path[:7] == "http://" or meta_path[:8] == "https://":
url = meta_path
if behavior in ["run"]:
meta_path = '%s/meta.json' % (sandbox_dir)
if behavior in ["expand", "filter", "validate"]:
#create a tempfile under /tmp
(fd, meta_path) = tempfile.mkstemp()
tempfile_list.append(meta_path)
os.close(fd)
logging.debug("Creating a temporary file (%s) to hold the metadata file specified by the --meta options!", meta_path)
logging.debug("Download metadata database from %s into %s", url, meta_path)
print "Download metadata database from %s into %s" % (url, meta_path)
url_download(url, meta_path)
else:
logging.debug("Check the metatdata database file: %s", meta_path)
if not os.path.exists(meta_path):
cleanup(tempfile_list, tempdir_list)
logging.critical("the metatdata database file (%s) does not exist!", meta_path)
sys.exit("the metatdata database file (%s) does not exist!" % meta_path)
else:
if behavior in ["run"]:
#the provided specification should be self-contained.
# One solution is to change all the current implementation of Umbrella to check whether the metadata information is included in the specification.
# Another solution is to extract all the metadata information into a separate metadata database file. (This solution is currently used).
meta_path = '%s/meta.json' % (sandbox_dir)
abstract_metadata(spec_json, meta_path)
elif behavior in ["expand", "filter"]:
cleanup(tempfile_list, tempdir_list)
logging.critical("The --meta option should be provided for the umbrella %s behavior!\n", behavior)
sys.exit("The --meta option should be provided for the umbrella %s behavior!\n" % behavior)
if meta_path:
with open(meta_path) as f: #python 2.4 does not support this syntax: with open () as
meta_json = json.load(f)
if behavior in ["upload"]:
#the provided specification should be self-contained.
# One solution is to change all the current implementation of Umbrella to check whether the metadata information is included in the specification.
# Another solution is to extract all the metadata information into a separate metadata database file. (This solution is currently used).
meta_path = '%s/meta.json' % (sandbox_dir)
abstract_metadata(spec_json, meta_path)
with open(meta_path) as f: #python 2.4 does not support this syntax: with open () as
meta_json = json.load(f)
if behavior in ["run", "validate", "split", "filter", "expand", "upload"]:
#for validate, if only --spec is provided, then check whether this spec is self-contained.
#for validate, if only --meta is provided, then check whether each item in the metadata db is well archived (for now, well-archived means the source attr is not null).
#for validate, if both --spec and --meta are provided, then check whether the dependencies of the spec file is well archived.
if spec_json == None:
if meta_json == None:
pass
else:
validate_meta(meta_json)
else:
if meta_json == None:
validate_spec(spec_json)
else:
validate_spec(spec_json, meta_json)
if behavior in ["run"]:
# user_name = 'root' #username who can access the VM instances from Amazon EC2
# ssh_key = 'hmeng_key_1018.pem' #the pem key file used to access the VM instances from Amazon EC2
if sandbox_mode == "ec2":
ec2log_path = options.ec2_log
ec2log_path = os.path.abspath(ec2log_path)
if os.path.exists(ec2log_path):
cleanup(tempfile_list, tempdir_list)
sys.exit("The ec2_log option <%s> already exists!" % ec2log_path)
ssh_key = os.path.abspath(options.ec2_sshkey)
if not os.path.exists(ssh_key):
cleanup(tempfile_list, tempdir_list)
logging.critical("The ssh key file (%s) does not exists!", ssh_key)
sys.exit("The ssh key file (%s) does not exists!\n" % ssh_key)
ec2_security_group = options.ec2_group
ec2_key_pair = options.ec2_key
ec2_instance_type = options.ec2_instance_type
ec2_process(spec_path, spec_json, options.meta, meta_path, ssh_key, ec2_key_pair, ec2_security_group, ec2_instance_type, sandbox_dir, output_dir, output_f_dict, output_d_dict, sandbox_mode, input_list, input_list_origin, env_para, env_para_dict, user_cmd, cwd_setting, ec2log_path, cvmfs_http_proxy)
elif sandbox_mode == "condor":
condorlog_path = options.condor_log
condorlog_path = os.path.abspath(condorlog_path)
if os.path.exists(condorlog_path):
cleanup(tempfile_list, tempdir_list)
sys.exit("The condor_log option <%s> already exists!" % condorlog_path)
condor_process(spec_path, spec_json, spec_path_basename, meta_path, sandbox_dir, output_dir, input_list_origin, user_cmd, cwd_setting, condorlog_path, cvmfs_http_proxy)
elif sandbox_mode == "local":
#first check whether Docker exists, if yes, use docker execution engine; if not, use parrot execution engine.
if dependency_check('docker') == 0:
logging.debug('docker exists, use docker execution engine')
specification_process(spec_json, sandbox_dir, behavior, meta_json, 'docker', output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, cwd_setting, cvmfs_http_proxy, osf_auth)
else:
logging.debug('docker does not exist, use parrot execution engine')
specification_process(spec_json, sandbox_dir, behavior, meta_json, 'parrot', output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, cwd_setting, cvmfs_http_proxy, osf_auth)
else:
if sandbox_mode == 'docker' and dependency_check('docker') != 0:
cleanup(tempfile_list, tempdir_list)
logging.critical('Docker is not installed on the host machine, please try other execution engines!')
sys.exit('Docker is not installed on the host machine, please try other execution engines!')
specification_process(spec_json, sandbox_dir, behavior, meta_json, sandbox_mode, output_f_dict, output_d_dict, input_dict, env_para_dict, user_cmd, cwd_setting, cvmfs_http_proxy, osf_auth)
if behavior in ["expand", "filter"]:
if len(args) != 2:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella %s is: umbrella ... %s <filepath>.\n", behavior, behavior)
sys.exit("The syntax for umbrella %s is: umbrella ... %s <filepath>.\n" % (behavior, behavior))
target_specpath = os.path.abspath(args[1])
path_exists(target_specpath)
dir_create(target_specpath)
if behavior == "expand":
new_json = separatize_spec(spec_json, meta_json, "spec")
else:
new_json = separatize_spec(spec_json, meta_json, "meta")
#write new_json into the file specified by the user.
json2file(target_specpath, new_json)
if behavior in ["split"]:
if len(args) != 3:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella split is: umbrella ... split <spec_filepath> <meta_filepath>.\n")
sys.exit("The syntax for umbrella split is: umbrella ... split <spec_filepath> <meata_filepath>.\n")
new_spec_path = os.path.abspath(args[1])
db_path = os.path.abspath(args[2])
path_exists(new_spec_path)
dir_create(new_spec_path)
path_exists(db_path)
dir_create(db_path)
abstract_metadata(spec_json, db_path)
new_json = prune_spec(spec_json)
json2file(new_spec_path, new_json)
if behavior in ["upload"]:
target = ["osf", "s3"]
if len(args) < 2 or args[1] not in target:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella upload is: umbrella ... upload <target> ... (target can be: %s)\n", " or ".join(target))
sys.exit("The syntax for umbrella upload is: umbrella ... upload <target> ... (target can be: %s)\n" % " or ".join(target))
if args[1] == "osf":
if not found_requests:
cleanup(tempfile_list, tempdir_list)
logging.critical("\nUploading umbrella spec dependencies to OSF requires a python package - requests. Please check the installation page of requests:\n\n\thttp://docs.python-requests.org/en/latest/user/install/\n")
sys.exit("\nUploading umbrella spec dependencies to OSF requires a python package - requests. Please check the installation page of requests:\n\n\thttp://docs.python-requests.org/en/latest/user/install/\n")
if len(args) != 5:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella upload osf is: umbrella ... upload osf <osf_project_name> <public_or_private> <target_specpath>\n")
sys.exit("The syntax for umbrella upload osf is: umbrella ... upload osf <osf_project_name> <public_or_private> <target_specpath>\n")
acl = ["private", "public"]
if args[3] not in acl:
cleanup(tempfile_list, tempdir_list)
sys.exit("The access control for s3 bucket and object can only be: %s" % " or ".join(acl))
target_specpath = os.path.abspath(args[4])
path_exists(target_specpath)
dir_create(target_specpath)
osf_info = []
osf_info.append("osf")
osf_info += [options.osf_user, options.osf_pass]
osf_proj_id = osf_create(options.osf_user, options.osf_pass, options.osf_userid, args[2], args[3] == "public")
osf_info.append(osf_proj_id)
spec_upload(spec_json, meta_json, osf_info, sandbox_dir, osf_auth)
if upload_count > 0:
json2file(target_specpath, spec_json)
osf_upload(options.osf_user, options.osf_pass, osf_proj_id, target_specpath)
else:
logging.debug("All the dependencies has been already inside OSF!")
print "All the dependencies has been already inside OSF!"
elif args[1] == "s3":
if not found_boto3 or not found_botocore:
cleanup(tempfile_list, tempdir_list)
logging.critical("\nUploading umbrella spec dependencies to s3 requires a python package - boto3. Please check the installation page of boto3:\n\n\thttps://boto3.readthedocs.org/en/latest/guide/quickstart.html#installation\n")
sys.exit("\nUploading umbrella spec dependencies to s3 requires a python package - boto3. Please check the installation page of boto3:\n\n\thttps://boto3.readthedocs.org/en/latest/guide/quickstart.html#installation\n")
if len(args) != 5:
cleanup(tempfile_list, tempdir_list)
logging.critical("The syntax for umbrella upload s3 is: umbrella ... upload s3 <bucket_name> <access_control> <target_specpath>\n")
sys.exit("The syntax for umbrella upload s3 is: umbrella ... upload s3 <bucket_name> <access_control> <target_specpath>\n")
acl = ["private", "public-read"]
if args[3] not in acl:
cleanup(tempfile_list, tempdir_list)
sys.exit("The access control for s3 bucket and object can only be: %s" % " or ".join(acl))
target_specpath = os.path.abspath(args[4])
path_exists(target_specpath)
dir_create(target_specpath)
s3_info = []
s3_info.append("s3")
s3_info.append(args[3])
bucket = s3_create(args[2], args[3])
spec_upload(spec_json, meta_json, s3_info, sandbox_dir, s3_bucket=bucket)
if upload_count > 0:
json2file(target_specpath, spec_json)
s3_upload(bucket, target_specpath, args[3])
else:
logging.debug("All the dependencies has been already inside S3!")
print "All the dependencies has been already inside S3!"
cleanup(tempfile_list, tempdir_list)
end = datetime.datetime.now()
diff = end - start
logging.debug("End time: %s", end)
logging.debug("execution time: %d seconds", diff.seconds)
if __name__ == "__main__":
main()
#set sts=4 sw=4 ts=4 expandtab ft=python
| gpl-2.0 | -3,054,751,530,107,959,000 | 42.014133 | 648 | 0.702808 | false |
allink/cookiecutter-allink-project | {{ cookiecutter.repo_name }}/{{ cookiecutter.project_name }}/settings/production.py | 1 | 2379 | from .default import * # noqa
# import djcelery
# ===================
# = Global Settings =
# ===================
DEBUG = False
# ===================
# = Server Settings =
# ===================
DATABASES['default']['CONN_MAX_AGE'] = 60
RAVEN_CONFIG = {
'dsn': '{{ cookiecutter.sentry_dsn}}',
}
CACHES = {
'default': {
'OPTIONS': {
"PARSER_CLASS": "redis.connection.HiredisParser",
},
'LOCATION': '127.0.0.1:6379:1',
'KEY_PREFIX': '{{ cookiecutter.project_name }}_production',
'BACKEND': 'django_redis.cache.RedisCache'
},
'sessions': {
'OPTIONS': {
"PARSER_CLASS": "redis.connection.HiredisParser",
},
'LOCATION': '127.0.0.1:6379:2',
'KEY_PREFIX': '{{ cookiecutter.project_name }}_production',
'BACKEND': 'django_redis.cache.RedisCache'
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': False,
'DIRS': [os.path.join(BASE_DIR, 'meinau', 'templates')],
'OPTIONS': {
'debug': False,
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
]),
],
'context_processors': CONTEXT_PROCESSORS
}
},
]
# ===========================
# = Django-specific Modules =
# ===========================
MIDDLEWARE_CLASSES += ['allink_essentials.middleware.validate_host_middleware.ValidateHostMiddleware']
# ===========================
# = Django-specific Modules =
# ===========================
INSTALLED_APPS += ('lockdown',)
LOCKDOWN_PASSWORDS = ('stage',)
LOCKDOWN_URL_EXCEPTIONS = (
r'^/robots.txt$', # unlock /about/
)
MIDDLEWARE_CLASSES += (
'allink_essentials.middleware.validate_host_middleware.ValidateHostMiddleware',
'lockdown.middleware.LockdownMiddleware'
)
# ===========
# = Webpack =
# ===========
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': True,
'BUNDLE_DIR_NAME': 'build/',
'STATS_FILE': os.path.join(BASE_DIR, '{{cookiecutter.project_name}}', 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'IGNORE': ['.+\.hot-update.js', '.+\.map']
}
}
# load celery
# djcelery.setup_loader()
| bsd-3-clause | -8,018,112,526,542,343,000 | 24.580645 | 102 | 0.517865 | false |
leshchevds/ganeti | lib/jqueue/__init__.py | 1 | 52112 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module implementing the job queue handling.
"""
import logging
import errno
import time
import weakref
import threading
import itertools
import operator
import os
try:
# pylint: disable=E0611
from pyinotify import pyinotify
except ImportError:
import pyinotify
from ganeti import asyncnotifier
from ganeti import constants
from ganeti import serializer
from ganeti import locking
from ganeti import luxi
from ganeti import opcodes
from ganeti import opcodes_base
from ganeti import errors
from ganeti import mcpu
from ganeti import utils
from ganeti import jstore
import ganeti.rpc.node as rpc
from ganeti import runtime
from ganeti import netutils
from ganeti import compat
from ganeti import ht
from ganeti import query
from ganeti import qlang
from ganeti import pathutils
from ganeti import vcluster
from ganeti.cmdlib import cluster
#: Retrieves "id" attribute
_GetIdAttr = operator.attrgetter("id")
class CancelJob(Exception):
"""Special exception to cancel a job.
"""
def TimeStampNow():
"""Returns the current timestamp.
@rtype: tuple
@return: the current time in the (seconds, microseconds) format
"""
return utils.SplitTime(time.time())
def _CallJqUpdate(runner, names, file_name, content):
"""Updates job queue file after virtualizing filename.
"""
virt_file_name = vcluster.MakeVirtualPath(file_name)
return runner.call_jobqueue_update(names, virt_file_name, content)
class _QueuedOpCode(object):
"""Encapsulates an opcode object.
@ivar log: holds the execution log and consists of tuples
of the form C{(log_serial, timestamp, level, message)}
@ivar input: the OpCode we encapsulate
@ivar status: the current status
@ivar result: the result of the LU execution
@ivar start_timestamp: timestamp for the start of the execution
@ivar exec_timestamp: timestamp for the actual LU Exec() function invocation
@ivar stop_timestamp: timestamp for the end of the execution
"""
__slots__ = ["input", "status", "result", "log", "priority",
"start_timestamp", "exec_timestamp", "end_timestamp",
"__weakref__"]
def __init__(self, op):
"""Initializes instances of this class.
@type op: L{opcodes.OpCode}
@param op: the opcode we encapsulate
"""
self.input = op
self.status = constants.OP_STATUS_QUEUED
self.result = None
self.log = []
self.start_timestamp = None
self.exec_timestamp = None
self.end_timestamp = None
# Get initial priority (it might change during the lifetime of this opcode)
self.priority = getattr(op, "priority", constants.OP_PRIO_DEFAULT)
@classmethod
def Restore(cls, state):
"""Restore the _QueuedOpCode from the serialized form.
@type state: dict
@param state: the serialized state
@rtype: _QueuedOpCode
@return: a new _QueuedOpCode instance
"""
obj = _QueuedOpCode.__new__(cls)
obj.input = opcodes.OpCode.LoadOpCode(state["input"])
obj.status = state["status"]
obj.result = state["result"]
obj.log = state["log"]
obj.start_timestamp = state.get("start_timestamp", None)
obj.exec_timestamp = state.get("exec_timestamp", None)
obj.end_timestamp = state.get("end_timestamp", None)
obj.priority = state.get("priority", constants.OP_PRIO_DEFAULT)
return obj
def Serialize(self):
"""Serializes this _QueuedOpCode.
@rtype: dict
@return: the dictionary holding the serialized state
"""
return {
"input": self.input.__getstate__(),
"status": self.status,
"result": self.result,
"log": self.log,
"start_timestamp": self.start_timestamp,
"exec_timestamp": self.exec_timestamp,
"end_timestamp": self.end_timestamp,
"priority": self.priority,
}
class _QueuedJob(object):
"""In-memory job representation.
This is what we use to track the user-submitted jobs. Locking must
be taken care of by users of this class.
@type queue: L{JobQueue}
@ivar queue: the parent queue
@ivar id: the job ID
@type ops: list
@ivar ops: the list of _QueuedOpCode that constitute the job
@type log_serial: int
@ivar log_serial: holds the index for the next log entry
@ivar received_timestamp: the timestamp for when the job was received
@ivar start_timestmap: the timestamp for start of execution
@ivar end_timestamp: the timestamp for end of execution
@ivar writable: Whether the job is allowed to be modified
"""
# pylint: disable=W0212
__slots__ = ["queue", "id", "ops", "log_serial", "ops_iter", "cur_opctx",
"received_timestamp", "start_timestamp", "end_timestamp",
"writable", "archived",
"livelock", "process_id",
"__weakref__"]
def AddReasons(self, pickup=False):
"""Extend the reason trail
Add the reason for all the opcodes of this job to be executed.
"""
count = 0
for queued_op in self.ops:
op = queued_op.input
if pickup:
reason_src_prefix = constants.OPCODE_REASON_SRC_PICKUP
else:
reason_src_prefix = constants.OPCODE_REASON_SRC_OPCODE
reason_src = opcodes_base.NameToReasonSrc(op.__class__.__name__,
reason_src_prefix)
reason_text = "job=%d;index=%d" % (self.id, count)
reason = getattr(op, "reason", [])
reason.append((reason_src, reason_text, utils.EpochNano()))
op.reason = reason
count = count + 1
def __init__(self, queue, job_id, ops, writable):
"""Constructor for the _QueuedJob.
@type queue: L{JobQueue}
@param queue: our parent queue
@type job_id: job_id
@param job_id: our job id
@type ops: list
@param ops: the list of opcodes we hold, which will be encapsulated
in _QueuedOpCodes
@type writable: bool
@param writable: Whether job can be modified
"""
if not ops:
raise errors.GenericError("A job needs at least one opcode")
self.queue = queue
self.id = int(job_id)
self.ops = [_QueuedOpCode(op) for op in ops]
self.AddReasons()
self.log_serial = 0
self.received_timestamp = TimeStampNow()
self.start_timestamp = None
self.end_timestamp = None
self.archived = False
self.livelock = None
self.process_id = None
self.writable = None
self._InitInMemory(self, writable)
assert not self.archived, "New jobs can not be marked as archived"
@staticmethod
def _InitInMemory(obj, writable):
"""Initializes in-memory variables.
"""
obj.writable = writable
obj.ops_iter = None
obj.cur_opctx = None
def __repr__(self):
status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
"id=%s" % self.id,
"ops=%s" % ",".join([op.input.Summary() for op in self.ops])]
return "<%s at %#x>" % (" ".join(status), id(self))
@classmethod
def Restore(cls, queue, state, writable, archived):
"""Restore a _QueuedJob from serialized state:
@type queue: L{JobQueue}
@param queue: to which queue the restored job belongs
@type state: dict
@param state: the serialized state
@type writable: bool
@param writable: Whether job can be modified
@type archived: bool
@param archived: Whether job was already archived
@rtype: _JobQueue
@return: the restored _JobQueue instance
"""
obj = _QueuedJob.__new__(cls)
obj.queue = queue
obj.id = int(state["id"])
obj.received_timestamp = state.get("received_timestamp", None)
obj.start_timestamp = state.get("start_timestamp", None)
obj.end_timestamp = state.get("end_timestamp", None)
obj.archived = archived
obj.livelock = state.get("livelock", None)
obj.process_id = state.get("process_id", None)
if obj.process_id is not None:
obj.process_id = int(obj.process_id)
obj.ops = []
obj.log_serial = 0
for op_state in state["ops"]:
op = _QueuedOpCode.Restore(op_state)
for log_entry in op.log:
obj.log_serial = max(obj.log_serial, log_entry[0])
obj.ops.append(op)
cls._InitInMemory(obj, writable)
return obj
def Serialize(self):
"""Serialize the _JobQueue instance.
@rtype: dict
@return: the serialized state
"""
return {
"id": self.id,
"ops": [op.Serialize() for op in self.ops],
"start_timestamp": self.start_timestamp,
"end_timestamp": self.end_timestamp,
"received_timestamp": self.received_timestamp,
"livelock": self.livelock,
"process_id": self.process_id,
}
def CalcStatus(self):
"""Compute the status of this job.
This function iterates over all the _QueuedOpCodes in the job and
based on their status, computes the job status.
The algorithm is:
- if we find a cancelled, or finished with error, the job
status will be the same
- otherwise, the last opcode with the status one of:
- waitlock
- canceling
- running
will determine the job status
- otherwise, it means either all opcodes are queued, or success,
and the job status will be the same
@return: the job status
"""
status = constants.JOB_STATUS_QUEUED
all_success = True
for op in self.ops:
if op.status == constants.OP_STATUS_SUCCESS:
continue
all_success = False
if op.status == constants.OP_STATUS_QUEUED:
pass
elif op.status == constants.OP_STATUS_WAITING:
status = constants.JOB_STATUS_WAITING
elif op.status == constants.OP_STATUS_RUNNING:
status = constants.JOB_STATUS_RUNNING
elif op.status == constants.OP_STATUS_CANCELING:
status = constants.JOB_STATUS_CANCELING
break
elif op.status == constants.OP_STATUS_ERROR:
status = constants.JOB_STATUS_ERROR
# The whole job fails if one opcode failed
break
elif op.status == constants.OP_STATUS_CANCELED:
status = constants.OP_STATUS_CANCELED
break
if all_success:
status = constants.JOB_STATUS_SUCCESS
return status
def CalcPriority(self):
"""Gets the current priority for this job.
Only unfinished opcodes are considered. When all are done, the default
priority is used.
@rtype: int
"""
priorities = [op.priority for op in self.ops
if op.status not in constants.OPS_FINALIZED]
if not priorities:
# All opcodes are done, assume default priority
return constants.OP_PRIO_DEFAULT
return min(priorities)
def GetLogEntries(self, newer_than):
"""Selectively returns the log entries.
@type newer_than: None or int
@param newer_than: if this is None, return all log entries,
otherwise return only the log entries with serial higher
than this value
@rtype: list
@return: the list of the log entries selected
"""
if newer_than is None:
serial = -1
else:
serial = newer_than
entries = []
for op in self.ops:
entries.extend(filter(lambda entry: entry[0] > serial, op.log))
return entries
def MarkUnfinishedOps(self, status, result):
"""Mark unfinished opcodes with a given status and result.
This is an utility function for marking all running or waiting to
be run opcodes with a given status. Opcodes which are already
finalised are not changed.
@param status: a given opcode status
@param result: the opcode result
"""
not_marked = True
for op in self.ops:
if op.status in constants.OPS_FINALIZED:
assert not_marked, "Finalized opcodes found after non-finalized ones"
continue
op.status = status
op.result = result
not_marked = False
def Finalize(self):
"""Marks the job as finalized.
"""
self.end_timestamp = TimeStampNow()
def Cancel(self):
"""Marks job as canceled/-ing if possible.
@rtype: tuple; (bool, string)
@return: Boolean describing whether job was successfully canceled or marked
as canceling and a text message
"""
status = self.CalcStatus()
if status == constants.JOB_STATUS_QUEUED:
self.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
"Job canceled by request")
self.Finalize()
return (True, "Job %s canceled" % self.id)
elif status == constants.JOB_STATUS_WAITING:
# The worker will notice the new status and cancel the job
self.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None)
return (True, "Job %s will be canceled" % self.id)
else:
logging.debug("Job %s is no longer waiting in the queue", self.id)
return (False, "Job %s is no longer waiting in the queue" % self.id)
def ChangePriority(self, priority):
"""Changes the job priority.
@type priority: int
@param priority: New priority
@rtype: tuple; (bool, string)
@return: Boolean describing whether job's priority was successfully changed
and a text message
"""
status = self.CalcStatus()
if status in constants.JOBS_FINALIZED:
return (False, "Job %s is finished" % self.id)
elif status == constants.JOB_STATUS_CANCELING:
return (False, "Job %s is cancelling" % self.id)
else:
assert status in (constants.JOB_STATUS_QUEUED,
constants.JOB_STATUS_WAITING,
constants.JOB_STATUS_RUNNING)
changed = False
for op in self.ops:
if (op.status == constants.OP_STATUS_RUNNING or
op.status in constants.OPS_FINALIZED):
assert not changed, \
("Found opcode for which priority should not be changed after"
" priority has been changed for previous opcodes")
continue
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
changed = True
# Set new priority (doesn't modify opcode input)
op.priority = priority
if changed:
return (True, ("Priorities of pending opcodes for job %s have been"
" changed to %s" % (self.id, priority)))
else:
return (False, "Job %s had no pending opcodes" % self.id)
def SetPid(self, pid):
"""Sets the job's process ID
@type pid: int
@param pid: the process ID
"""
status = self.CalcStatus()
if status in (constants.JOB_STATUS_QUEUED,
constants.JOB_STATUS_WAITING):
if self.process_id is not None:
logging.warning("Replacing the process id %s of job %s with %s",
self.process_id, self.id, pid)
self.process_id = pid
else:
logging.warning("Can set pid only for queued/waiting jobs")
class _OpExecCallbacks(mcpu.OpExecCbBase):
def __init__(self, queue, job, op):
"""Initializes this class.
@type queue: L{JobQueue}
@param queue: Job queue
@type job: L{_QueuedJob}
@param job: Job object
@type op: L{_QueuedOpCode}
@param op: OpCode
"""
super(_OpExecCallbacks, self).__init__()
assert queue, "Queue is missing"
assert job, "Job is missing"
assert op, "Opcode is missing"
self._queue = queue
self._job = job
self._op = op
def _CheckCancel(self):
"""Raises an exception to cancel the job if asked to.
"""
# Cancel here if we were asked to
if self._op.status == constants.OP_STATUS_CANCELING:
logging.debug("Canceling opcode")
raise CancelJob()
def NotifyStart(self):
"""Mark the opcode as running, not lock-waiting.
This is called from the mcpu code as a notifier function, when the LU is
finally about to start the Exec() method. Of course, to have end-user
visible results, the opcode must be initially (before calling into
Processor.ExecOpCode) set to OP_STATUS_WAITING.
"""
assert self._op in self._job.ops
assert self._op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Cancel here if we were asked to
self._CheckCancel()
logging.debug("Opcode is now running")
self._op.status = constants.OP_STATUS_RUNNING
self._op.exec_timestamp = TimeStampNow()
# And finally replicate the job status
self._queue.UpdateJobUnlocked(self._job)
def NotifyRetry(self):
"""Mark opcode again as lock-waiting.
This is called from the mcpu code just after calling PrepareRetry.
The opcode will now again acquire locks (more, hopefully).
"""
self._op.status = constants.OP_STATUS_WAITING
logging.debug("Opcode will be retried. Back to waiting.")
def _AppendFeedback(self, timestamp, log_type, log_msgs):
"""Internal feedback append function, with locks
@type timestamp: tuple (int, int)
@param timestamp: timestamp of the log message
@type log_type: string
@param log_type: log type (one of Types.ELogType)
@type log_msgs: any
@param log_msgs: log data to append
"""
# This should be removed once Feedback() has a clean interface.
# Feedback can be called with anything, we interpret ELogMessageList as
# messages that have to be individually added to the log list, but pushed
# in a single update. Other msgtypes are only transparently passed forward.
if log_type == constants.ELOG_MESSAGE_LIST:
log_type = constants.ELOG_MESSAGE
else:
log_msgs = [log_msgs]
for msg in log_msgs:
self._job.log_serial += 1
self._op.log.append((self._job.log_serial, timestamp, log_type, msg))
self._queue.UpdateJobUnlocked(self._job, replicate=False)
# TODO: Cleanup calling conventions, make them explicit
def Feedback(self, *args):
"""Append a log entry.
Calling conventions:
arg[0]: (optional) string, message type (Types.ELogType)
arg[1]: data to be interpreted as a message
"""
assert len(args) < 3
# TODO: Use separate keyword arguments for a single string vs. a list.
if len(args) == 1:
log_type = constants.ELOG_MESSAGE
log_msg = args[0]
else:
(log_type, log_msg) = args
# The time is split to make serialization easier and not lose
# precision.
timestamp = utils.SplitTime(time.time())
self._AppendFeedback(timestamp, log_type, log_msg)
def CurrentPriority(self):
"""Returns current priority for opcode.
"""
assert self._op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Cancel here if we were asked to
self._CheckCancel()
return self._op.priority
def SubmitManyJobs(self, jobs):
"""Submits jobs for processing.
See L{JobQueue.SubmitManyJobs}.
"""
# Locking is done in job queue
return self._queue.SubmitManyJobs(jobs)
def _EncodeOpError(err):
"""Encodes an error which occurred while processing an opcode.
"""
if isinstance(err, errors.GenericError):
to_encode = err
else:
to_encode = errors.OpExecError(str(err))
return errors.EncodeException(to_encode)
class _TimeoutStrategyWrapper:
def __init__(self, fn):
"""Initializes this class.
"""
self._fn = fn
self._next = None
def _Advance(self):
"""Gets the next timeout if necessary.
"""
if self._next is None:
self._next = self._fn()
def Peek(self):
"""Returns the next timeout.
"""
self._Advance()
return self._next
def Next(self):
"""Returns the current timeout and advances the internal state.
"""
self._Advance()
result = self._next
self._next = None
return result
class _OpExecContext:
def __init__(self, op, index, log_prefix, timeout_strategy_factory):
"""Initializes this class.
"""
self.op = op
self.index = index
self.log_prefix = log_prefix
self.summary = op.input.Summary()
# Create local copy to modify
if getattr(op.input, opcodes_base.DEPEND_ATTR, None):
self.jobdeps = op.input.depends[:]
else:
self.jobdeps = None
self._timeout_strategy_factory = timeout_strategy_factory
self._ResetTimeoutStrategy()
def _ResetTimeoutStrategy(self):
"""Creates a new timeout strategy.
"""
self._timeout_strategy = \
_TimeoutStrategyWrapper(self._timeout_strategy_factory().NextAttempt)
def CheckPriorityIncrease(self):
"""Checks whether priority can and should be increased.
Called when locks couldn't be acquired.
"""
op = self.op
# Exhausted all retries and next round should not use blocking acquire
# for locks?
if (self._timeout_strategy.Peek() is None and
op.priority > constants.OP_PRIO_HIGHEST):
logging.debug("Increasing priority")
op.priority -= 1
self._ResetTimeoutStrategy()
return True
return False
def GetNextLockTimeout(self):
"""Returns the next lock acquire timeout.
"""
return self._timeout_strategy.Next()
class _JobProcessor(object):
(DEFER,
WAITDEP,
FINISHED) = range(1, 4)
def __init__(self, queue, opexec_fn, job,
_timeout_strategy_factory=mcpu.LockAttemptTimeoutStrategy):
"""Initializes this class.
"""
self.queue = queue
self.opexec_fn = opexec_fn
self.job = job
self._timeout_strategy_factory = _timeout_strategy_factory
@staticmethod
def _FindNextOpcode(job, timeout_strategy_factory):
"""Locates the next opcode to run.
@type job: L{_QueuedJob}
@param job: Job object
@param timeout_strategy_factory: Callable to create new timeout strategy
"""
# Create some sort of a cache to speed up locating next opcode for future
# lookups
# TODO: Consider splitting _QueuedJob.ops into two separate lists, one for
# pending and one for processed ops.
if job.ops_iter is None:
job.ops_iter = enumerate(job.ops)
# Find next opcode to run
while True:
try:
(idx, op) = job.ops_iter.next()
except StopIteration:
raise errors.ProgrammerError("Called for a finished job")
if op.status == constants.OP_STATUS_RUNNING:
# Found an opcode already marked as running
raise errors.ProgrammerError("Called for job marked as running")
opctx = _OpExecContext(op, idx, "Op %s/%s" % (idx + 1, len(job.ops)),
timeout_strategy_factory)
if op.status not in constants.OPS_FINALIZED:
return opctx
# This is a job that was partially completed before master daemon
# shutdown, so it can be expected that some opcodes are already
# completed successfully (if any did error out, then the whole job
# should have been aborted and not resubmitted for processing).
logging.info("%s: opcode %s already processed, skipping",
opctx.log_prefix, opctx.summary)
@staticmethod
def _MarkWaitlock(job, op):
"""Marks an opcode as waiting for locks.
The job's start timestamp is also set if necessary.
@type job: L{_QueuedJob}
@param job: Job object
@type op: L{_QueuedOpCode}
@param op: Opcode object
"""
assert op in job.ops
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
update = False
op.result = None
if op.status == constants.OP_STATUS_QUEUED:
op.status = constants.OP_STATUS_WAITING
update = True
if op.start_timestamp is None:
op.start_timestamp = TimeStampNow()
update = True
if job.start_timestamp is None:
job.start_timestamp = op.start_timestamp
update = True
assert op.status == constants.OP_STATUS_WAITING
return update
@staticmethod
def _CheckDependencies(queue, job, opctx):
"""Checks if an opcode has dependencies and if so, processes them.
@type queue: L{JobQueue}
@param queue: Queue object
@type job: L{_QueuedJob}
@param job: Job object
@type opctx: L{_OpExecContext}
@param opctx: Opcode execution context
@rtype: bool
@return: Whether opcode will be re-scheduled by dependency tracker
"""
op = opctx.op
result = False
while opctx.jobdeps:
(dep_job_id, dep_status) = opctx.jobdeps[0]
(depresult, depmsg) = queue.depmgr.CheckAndRegister(job, dep_job_id,
dep_status)
assert ht.TNonEmptyString(depmsg), "No dependency message"
logging.info("%s: %s", opctx.log_prefix, depmsg)
if depresult == _JobDependencyManager.CONTINUE:
# Remove dependency and continue
opctx.jobdeps.pop(0)
elif depresult == _JobDependencyManager.WAIT:
# Need to wait for notification, dependency tracker will re-add job
# to workerpool
result = True
break
elif depresult == _JobDependencyManager.CANCEL:
# Job was cancelled, cancel this job as well
job.Cancel()
assert op.status == constants.OP_STATUS_CANCELING
break
elif depresult in (_JobDependencyManager.WRONGSTATUS,
_JobDependencyManager.ERROR):
# Job failed or there was an error, this job must fail
op.status = constants.OP_STATUS_ERROR
op.result = _EncodeOpError(errors.OpExecError(depmsg))
break
else:
raise errors.ProgrammerError("Unknown dependency result '%s'" %
depresult)
return result
def _ExecOpCodeUnlocked(self, opctx):
"""Processes one opcode and returns the result.
"""
op = opctx.op
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# The very last check if the job was cancelled before trying to execute
if op.status == constants.OP_STATUS_CANCELING:
return (constants.OP_STATUS_CANCELING, None)
timeout = opctx.GetNextLockTimeout()
try:
# Make sure not to hold queue lock while calling ExecOpCode
result = self.opexec_fn(op.input,
_OpExecCallbacks(self.queue, self.job, op),
timeout=timeout)
except mcpu.LockAcquireTimeout:
assert timeout is not None, "Received timeout for blocking acquire"
logging.debug("Couldn't acquire locks in %0.6fs", timeout)
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Was job cancelled while we were waiting for the lock?
if op.status == constants.OP_STATUS_CANCELING:
return (constants.OP_STATUS_CANCELING, None)
# Stay in waitlock while trying to re-acquire lock
return (constants.OP_STATUS_WAITING, None)
except CancelJob:
logging.exception("%s: Canceling job", opctx.log_prefix)
assert op.status == constants.OP_STATUS_CANCELING
return (constants.OP_STATUS_CANCELING, None)
except Exception, err: # pylint: disable=W0703
logging.exception("%s: Caught exception in %s",
opctx.log_prefix, opctx.summary)
return (constants.OP_STATUS_ERROR, _EncodeOpError(err))
else:
logging.debug("%s: %s successful",
opctx.log_prefix, opctx.summary)
return (constants.OP_STATUS_SUCCESS, result)
def __call__(self, _nextop_fn=None):
"""Continues execution of a job.
@param _nextop_fn: Callback function for tests
@return: C{FINISHED} if job is fully processed, C{DEFER} if the job should
be deferred and C{WAITDEP} if the dependency manager
(L{_JobDependencyManager}) will re-schedule the job when appropriate
"""
queue = self.queue
job = self.job
logging.debug("Processing job %s", job.id)
try:
opcount = len(job.ops)
assert job.writable, "Expected writable job"
# Don't do anything for finalized jobs
if job.CalcStatus() in constants.JOBS_FINALIZED:
return self.FINISHED
# Is a previous opcode still pending?
if job.cur_opctx:
opctx = job.cur_opctx
job.cur_opctx = None
else:
if __debug__ and _nextop_fn:
_nextop_fn()
opctx = self._FindNextOpcode(job, self._timeout_strategy_factory)
op = opctx.op
# Consistency check
assert compat.all(i.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_CANCELING)
for i in job.ops[opctx.index + 1:])
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
assert (op.priority <= constants.OP_PRIO_LOWEST and
op.priority >= constants.OP_PRIO_HIGHEST)
waitjob = None
if op.status != constants.OP_STATUS_CANCELING:
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
# Prepare to start opcode
if self._MarkWaitlock(job, op):
# Write to disk
queue.UpdateJobUnlocked(job)
assert op.status == constants.OP_STATUS_WAITING
assert job.CalcStatus() == constants.JOB_STATUS_WAITING
assert job.start_timestamp and op.start_timestamp
assert waitjob is None
# Check if waiting for a job is necessary
waitjob = self._CheckDependencies(queue, job, opctx)
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING,
constants.OP_STATUS_ERROR)
if not (waitjob or op.status in (constants.OP_STATUS_CANCELING,
constants.OP_STATUS_ERROR)):
logging.info("%s: opcode %s waiting for locks",
opctx.log_prefix, opctx.summary)
assert not opctx.jobdeps, "Not all dependencies were removed"
(op_status, op_result) = self._ExecOpCodeUnlocked(opctx)
op.status = op_status
op.result = op_result
assert not waitjob
if op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_QUEUED):
# waiting: Couldn't get locks in time
# queued: Queue is shutting down
assert not op.end_timestamp
else:
# Finalize opcode
op.end_timestamp = TimeStampNow()
if op.status == constants.OP_STATUS_CANCELING:
assert not compat.any(i.status != constants.OP_STATUS_CANCELING
for i in job.ops[opctx.index:])
else:
assert op.status in constants.OPS_FINALIZED
if op.status == constants.OP_STATUS_QUEUED:
# Queue is shutting down
assert not waitjob
finalize = False
# Reset context
job.cur_opctx = None
# In no case must the status be finalized here
assert job.CalcStatus() == constants.JOB_STATUS_QUEUED
elif op.status == constants.OP_STATUS_WAITING or waitjob:
finalize = False
if not waitjob and opctx.CheckPriorityIncrease():
# Priority was changed, need to update on-disk file
queue.UpdateJobUnlocked(job)
# Keep around for another round
job.cur_opctx = opctx
assert (op.priority <= constants.OP_PRIO_LOWEST and
op.priority >= constants.OP_PRIO_HIGHEST)
# In no case must the status be finalized here
assert job.CalcStatus() == constants.JOB_STATUS_WAITING
else:
# Ensure all opcodes so far have been successful
assert (opctx.index == 0 or
compat.all(i.status == constants.OP_STATUS_SUCCESS
for i in job.ops[:opctx.index]))
# Reset context
job.cur_opctx = None
if op.status == constants.OP_STATUS_SUCCESS:
finalize = False
elif op.status == constants.OP_STATUS_ERROR:
# If we get here, we cannot afford to check for any consistency
# any more, we just want to clean up.
# TODO: Actually, it wouldn't be a bad idea to start a timer
# here to kill the whole process.
to_encode = errors.OpExecError("Preceding opcode failed")
job.MarkUnfinishedOps(constants.OP_STATUS_ERROR,
_EncodeOpError(to_encode))
finalize = True
elif op.status == constants.OP_STATUS_CANCELING:
job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
"Job canceled by request")
finalize = True
else:
raise errors.ProgrammerError("Unknown status '%s'" % op.status)
if opctx.index == (opcount - 1):
# Finalize on last opcode
finalize = True
if finalize:
# All opcodes have been run, finalize job
job.Finalize()
# Write to disk. If the job status is final, this is the final write
# allowed. Once the file has been written, it can be archived anytime.
queue.UpdateJobUnlocked(job)
assert not waitjob
if finalize:
logging.info("Finished job %s, status = %s", job.id, job.CalcStatus())
return self.FINISHED
assert not waitjob or queue.depmgr.JobWaiting(job)
if waitjob:
return self.WAITDEP
else:
return self.DEFER
finally:
assert job.writable, "Job became read-only while being processed"
class _JobDependencyManager:
"""Keeps track of job dependencies.
"""
(WAIT,
ERROR,
CANCEL,
CONTINUE,
WRONGSTATUS) = range(1, 6)
def __init__(self, getstatus_fn):
"""Initializes this class.
"""
self._getstatus_fn = getstatus_fn
self._waiters = {}
def JobWaiting(self, job):
"""Checks if a job is waiting.
"""
return compat.any(job in jobs
for jobs in self._waiters.values())
def CheckAndRegister(self, job, dep_job_id, dep_status):
"""Checks if a dependency job has the requested status.
If the other job is not yet in a finalized status, the calling job will be
notified (re-added to the workerpool) at a later point.
@type job: L{_QueuedJob}
@param job: Job object
@type dep_job_id: int
@param dep_job_id: ID of dependency job
@type dep_status: list
@param dep_status: Required status
"""
assert ht.TJobId(job.id)
assert ht.TJobId(dep_job_id)
assert ht.TListOf(ht.TElemOf(constants.JOBS_FINALIZED))(dep_status)
if job.id == dep_job_id:
return (self.ERROR, "Job can't depend on itself")
# Get status of dependency job
try:
status = self._getstatus_fn(dep_job_id)
except errors.JobLost, err:
return (self.ERROR, "Dependency error: %s" % err)
assert status in constants.JOB_STATUS_ALL
job_id_waiters = self._waiters.setdefault(dep_job_id, set())
if status not in constants.JOBS_FINALIZED:
# Register for notification and wait for job to finish
job_id_waiters.add(job)
return (self.WAIT,
"Need to wait for job %s, wanted status '%s'" %
(dep_job_id, dep_status))
# Remove from waiters list
if job in job_id_waiters:
job_id_waiters.remove(job)
if (status == constants.JOB_STATUS_CANCELED and
constants.JOB_STATUS_CANCELED not in dep_status):
return (self.CANCEL, "Dependency job %s was cancelled" % dep_job_id)
elif not dep_status or status in dep_status:
return (self.CONTINUE,
"Dependency job %s finished with status '%s'" %
(dep_job_id, status))
else:
return (self.WRONGSTATUS,
"Dependency job %s finished with status '%s',"
" not one of '%s' as required" %
(dep_job_id, status, utils.CommaJoin(dep_status)))
def _RemoveEmptyWaitersUnlocked(self):
"""Remove all jobs without actual waiters.
"""
for job_id in [job_id for (job_id, waiters) in self._waiters.items()
if not waiters]:
del self._waiters[job_id]
class JobQueue(object):
"""Queue used to manage the jobs.
"""
def __init__(self, context, cfg):
"""Constructor for JobQueue.
The constructor will initialize the job queue object and then
start loading the current jobs from disk, either for starting them
(if they were queue) or for aborting them (if they were already
running).
@type context: GanetiContext
@param context: the context object for access to the configuration
data and other ganeti objects
"""
self.context = context
self._memcache = weakref.WeakValueDictionary()
self._my_hostname = netutils.Hostname.GetSysName()
# Get initial list of nodes
self._nodes = dict((n.name, n.primary_ip)
for n in cfg.GetAllNodesInfo().values()
if n.master_candidate)
# Remove master node
self._nodes.pop(self._my_hostname, None)
# Job dependencies
self.depmgr = _JobDependencyManager(self._GetJobStatusForDependencies)
def _GetRpc(self, address_list):
"""Gets RPC runner with context.
"""
return rpc.JobQueueRunner(self.context, address_list)
@staticmethod
def _CheckRpcResult(result, nodes, failmsg):
"""Verifies the status of an RPC call.
Since we aim to keep consistency should this node (the current
master) fail, we will log errors if our rpc fail, and especially
log the case when more than half of the nodes fails.
@param result: the data as returned from the rpc call
@type nodes: list
@param nodes: the list of nodes we made the call to
@type failmsg: str
@param failmsg: the identifier to be used for logging
"""
failed = []
success = []
for node in nodes:
msg = result[node].fail_msg
if msg:
failed.append(node)
logging.error("RPC call %s (%s) failed on node %s: %s",
result[node].call, failmsg, node, msg)
else:
success.append(node)
# +1 for the master node
if (len(success) + 1) < len(failed):
# TODO: Handle failing nodes
logging.error("More than half of the nodes failed")
def _GetNodeIp(self):
"""Helper for returning the node name/ip list.
@rtype: (list, list)
@return: a tuple of two lists, the first one with the node
names and the second one with the node addresses
"""
# TODO: Change to "tuple(map(list, zip(*self._nodes.items())))"?
name_list = self._nodes.keys()
addr_list = [self._nodes[name] for name in name_list]
return name_list, addr_list
def _UpdateJobQueueFile(self, file_name, data, replicate):
"""Writes a file locally and then replicates it to all nodes.
This function will replace the contents of a file on the local
node and then replicate it to all the other nodes we have.
@type file_name: str
@param file_name: the path of the file to be replicated
@type data: str
@param data: the new contents of the file
@type replicate: boolean
@param replicate: whether to spread the changes to the remote nodes
"""
getents = runtime.GetEnts()
utils.WriteFile(file_name, data=data, uid=getents.masterd_uid,
gid=getents.daemons_gid,
mode=constants.JOB_QUEUE_FILES_PERMS)
if replicate:
names, addrs = self._GetNodeIp()
result = _CallJqUpdate(self._GetRpc(addrs), names, file_name, data)
self._CheckRpcResult(result, self._nodes, "Updating %s" % file_name)
def _RenameFilesUnlocked(self, rename):
"""Renames a file locally and then replicate the change.
This function will rename a file in the local queue directory
and then replicate this rename to all the other nodes we have.
@type rename: list of (old, new)
@param rename: List containing tuples mapping old to new names
"""
# Rename them locally
for old, new in rename:
utils.RenameFile(old, new, mkdir=True)
# ... and on all nodes
names, addrs = self._GetNodeIp()
result = self._GetRpc(addrs).call_jobqueue_rename(names, rename)
self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename)
@staticmethod
def _GetJobPath(job_id):
"""Returns the job file for a given job id.
@type job_id: str
@param job_id: the job identifier
@rtype: str
@return: the path to the job file
"""
return utils.PathJoin(pathutils.QUEUE_DIR, "job-%s" % job_id)
@staticmethod
def _GetArchivedJobPath(job_id):
"""Returns the archived job file for a give job id.
@type job_id: str
@param job_id: the job identifier
@rtype: str
@return: the path to the archived job file
"""
return utils.PathJoin(pathutils.JOB_QUEUE_ARCHIVE_DIR,
jstore.GetArchiveDirectory(job_id),
"job-%s" % job_id)
@staticmethod
def _DetermineJobDirectories(archived):
"""Build list of directories containing job files.
@type archived: bool
@param archived: Whether to include directories for archived jobs
@rtype: list
"""
result = [pathutils.QUEUE_DIR]
if archived:
archive_path = pathutils.JOB_QUEUE_ARCHIVE_DIR
result.extend(map(compat.partial(utils.PathJoin, archive_path),
utils.ListVisibleFiles(archive_path)))
return result
@classmethod
def _GetJobIDsUnlocked(cls, sort=True, archived=False):
"""Return all known job IDs.
The method only looks at disk because it's a requirement that all
jobs are present on disk (so in the _memcache we don't have any
extra IDs).
@type sort: boolean
@param sort: perform sorting on the returned job ids
@rtype: list
@return: the list of job IDs
"""
jlist = []
for path in cls._DetermineJobDirectories(archived):
for filename in utils.ListVisibleFiles(path):
m = constants.JOB_FILE_RE.match(filename)
if m:
jlist.append(int(m.group(1)))
if sort:
jlist.sort()
return jlist
def _LoadJobUnlocked(self, job_id):
"""Loads a job from the disk or memory.
Given a job id, this will return the cached job object if
existing, or try to load the job from the disk. If loading from
disk, it will also add the job to the cache.
@type job_id: int
@param job_id: the job id
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
assert isinstance(job_id, int), "Job queue: Supplied job id is not an int!"
job = self._memcache.get(job_id, None)
if job:
logging.debug("Found job %s in memcache", job_id)
assert job.writable, "Found read-only job in memcache"
return job
try:
job = JobQueue._LoadJobFromDisk(self, job_id, False)
if job is None:
return job
except errors.JobFileCorrupted:
old_path = self._GetJobPath(job_id)
new_path = self._GetArchivedJobPath(job_id)
if old_path == new_path:
# job already archived (future case)
logging.exception("Can't parse job %s", job_id)
else:
# non-archived case
logging.exception("Can't parse job %s, will archive.", job_id)
self._RenameFilesUnlocked([(old_path, new_path)])
return None
assert job.writable, "Job just loaded is not writable"
self._memcache[job_id] = job
logging.debug("Added job %s to the cache", job_id)
return job
@staticmethod
def _LoadJobFromDisk(queue, job_id, try_archived, writable=None):
"""Load the given job file from disk.
Given a job file, read, load and restore it in a _QueuedJob format.
@type job_id: int
@param job_id: job identifier
@type try_archived: bool
@param try_archived: Whether to try loading an archived job
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
path_functions = [(JobQueue._GetJobPath, False)]
if try_archived:
path_functions.append((JobQueue._GetArchivedJobPath, True))
raw_data = None
archived = None
for (fn, archived) in path_functions:
filepath = fn(job_id)
logging.debug("Loading job from %s", filepath)
try:
raw_data = utils.ReadFile(filepath)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
break
if not raw_data:
logging.debug("No data available for job %s", job_id)
return None
if writable is None:
writable = not archived
try:
data = serializer.LoadJson(raw_data)
job = _QueuedJob.Restore(queue, data, writable, archived)
except Exception, err: # pylint: disable=W0703
raise errors.JobFileCorrupted(err)
return job
@staticmethod
def SafeLoadJobFromDisk(queue, job_id, try_archived, writable=None):
"""Load the given job file from disk.
Given a job file, read, load and restore it in a _QueuedJob format.
In case of error reading the job, it gets returned as None, and the
exception is logged.
@type job_id: int
@param job_id: job identifier
@type try_archived: bool
@param try_archived: Whether to try loading an archived job
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
try:
return JobQueue._LoadJobFromDisk(queue, job_id, try_archived,
writable=writable)
except (errors.JobFileCorrupted, EnvironmentError):
logging.exception("Can't load/parse job %s", job_id)
return None
@classmethod
def SubmitManyJobs(cls, jobs):
"""Create and store multiple jobs.
"""
return luxi.Client(address=pathutils.QUERY_SOCKET).SubmitManyJobs(jobs)
@staticmethod
def _ResolveJobDependencies(resolve_fn, deps):
"""Resolves relative job IDs in dependencies.
@type resolve_fn: callable
@param resolve_fn: Function to resolve a relative job ID
@type deps: list
@param deps: Dependencies
@rtype: tuple; (boolean, string or list)
@return: If successful (first tuple item), the returned list contains
resolved job IDs along with the requested status; if not successful,
the second element is an error message
"""
result = []
for (dep_job_id, dep_status) in deps:
if ht.TRelativeJobId(dep_job_id):
assert ht.TInt(dep_job_id) and dep_job_id < 0
try:
job_id = resolve_fn(dep_job_id)
except IndexError:
# Abort
return (False, "Unable to resolve relative job ID %s" % dep_job_id)
else:
job_id = dep_job_id
result.append((job_id, dep_status))
return (True, result)
def _GetJobStatusForDependencies(self, job_id):
"""Gets the status of a job for dependencies.
@type job_id: int
@param job_id: Job ID
@raise errors.JobLost: If job can't be found
"""
# Not using in-memory cache as doing so would require an exclusive lock
# Try to load from disk
job = JobQueue.SafeLoadJobFromDisk(self, job_id, True, writable=False)
if job:
assert not job.writable, "Got writable job" # pylint: disable=E1101
if job:
return job.CalcStatus()
raise errors.JobLost("Job %s not found" % job_id)
def UpdateJobUnlocked(self, job, replicate=True):
"""Update a job's on disk storage.
After a job has been modified, this function needs to be called in
order to write the changes to disk and replicate them to the other
nodes.
@type job: L{_QueuedJob}
@param job: the changed job
@type replicate: boolean
@param replicate: whether to replicate the change to remote nodes
"""
if __debug__:
finalized = job.CalcStatus() in constants.JOBS_FINALIZED
assert (finalized ^ (job.end_timestamp is None))
assert job.writable, "Can't update read-only job"
assert not job.archived, "Can't update archived job"
filename = self._GetJobPath(job.id)
data = serializer.DumpJson(job.Serialize())
logging.debug("Writing job %s to %s", job.id, filename)
self._UpdateJobQueueFile(filename, data, replicate)
def HasJobBeenFinalized(self, job_id):
"""Checks if a job has been finalized.
@type job_id: int
@param job_id: Job identifier
@rtype: boolean
@return: True if the job has been finalized,
False if the timeout has been reached,
None if the job doesn't exist
"""
job = JobQueue.SafeLoadJobFromDisk(self, job_id, True, writable=False)
if job is not None:
return job.CalcStatus() in constants.JOBS_FINALIZED
elif cluster.LUClusterDestroy.clusterHasBeenDestroyed:
# FIXME: The above variable is a temporary workaround until the Python job
# queue is completely removed. When removing the job queue, also remove
# the variable from LUClusterDestroy.
return True
else:
return None
def CancelJob(self, job_id):
"""Cancels a job.
This will only succeed if the job has not started yet.
@type job_id: int
@param job_id: job ID of job to be cancelled.
"""
logging.info("Cancelling job %s", job_id)
return self._ModifyJobUnlocked(job_id, lambda job: job.Cancel())
def ChangeJobPriority(self, job_id, priority):
"""Changes a job's priority.
@type job_id: int
@param job_id: ID of the job whose priority should be changed
@type priority: int
@param priority: New priority
"""
logging.info("Changing priority of job %s to %s", job_id, priority)
if priority not in constants.OP_PRIO_SUBMIT_VALID:
allowed = utils.CommaJoin(constants.OP_PRIO_SUBMIT_VALID)
raise errors.GenericError("Invalid priority %s, allowed are %s" %
(priority, allowed))
def fn(job):
(success, msg) = job.ChangePriority(priority)
return (success, msg)
return self._ModifyJobUnlocked(job_id, fn)
def _ModifyJobUnlocked(self, job_id, mod_fn):
"""Modifies a job.
@type job_id: int
@param job_id: Job ID
@type mod_fn: callable
@param mod_fn: Modifying function, receiving job object as parameter,
returning tuple of (status boolean, message string)
"""
job = self._LoadJobUnlocked(job_id)
if not job:
logging.debug("Job %s not found", job_id)
return (False, "Job %s not found" % job_id)
assert job.writable, "Can't modify read-only job"
assert not job.archived, "Can't modify archived job"
(success, msg) = mod_fn(job)
if success:
# If the job was finalized (e.g. cancelled), this is the final write
# allowed. The job can be archived anytime.
self.UpdateJobUnlocked(job)
return (success, msg)
| bsd-2-clause | -2,647,619,656,363,776,500 | 29.403734 | 80 | 0.644055 | false |
duke605/RunePy | tasks/configuration.py | 1 | 5798 | from secret import DISCORD_BOTS_TOKEN
from db.models import objects, Configuration
from datetime import datetime, timedelta
from secret import MIXPANEL_SECRET
import asyncio
import base64
import discord
import json
import util
class Config:
def __init__(self, bot):
self.bot = bot
self.prune_servers_task = bot.loop.create_task(self.prune_servers())
@util.ignore_exceptions(exception_handler=lambda info: None)
def __unload(self):
self.prune_servers_task.cancel()
async def _send_parting_message(self, server):
"""
Sends a message to the server owner and bot log telling the owner why the bot left the server
:param server: The server the bot is leaving
"""
owner = server.owner
bots = sum(m.bot for m in server.members)
users = sum(not m.bot for m in server.members)
message = '{0}, your server, **{1}**, has not used {2} in 14 or more days. To keep the bot statistics accurate and reduce memory ' \
'usage {2} will leave your server. If you wish to use {2}, but don\'t plan on using it at least once every 14 days, ' \
'please join the official development server <http://bit.ly/Duke605Development>. If you do plan on using {2} more ' \
'than every 14 days, use the following link to invite {2} back to your server <http://bit.ly/Rune-Py>.'
e = discord.Embed()
e.title = server.name
e.timestamp = server.me.joined_at
e.description = 'Did not use the bot for 14 or more days.'
e.add_field(name='Bots', value='{:,}'.format(bots))
e.add_field(name='Users', value='{:,}'.format(users))
e.set_author(name=str(owner), icon_url=owner.avatar_url or owner.default_avatar_url)
e.set_thumbnail(url=server.icon_url or owner.avatar_url or owner.default_avatar_url)
await self.bot.send_message(owner, message.format(owner.mention, server.name, server.me.mention))
await self.bot.send_message(discord.Object(id=241984924616359936), embed=e)
@util.ignore_exceptions_async()
async def _get_command_usages(self):
"""
Uses the Mixpanel API to call a script and get the past 2 nights of command usages
"""
# Getting script
with open('assets/scripts/command_usages.js') as file:
script = file.read()
data = {
'url': 'https://mixpanel.com/api/2.0/jql',
'headers': {
'authorization': 'Basic %s' % base64.b64encode(bytes(MIXPANEL_SECRET + ':', encoding='UTF-8')).decode('utf-8')
},
'params': {
'script': script,
'params': json.dumps({
'from_date': (datetime.utcnow() - timedelta(days=14)).strftime('%Y-%m-%d'),
'to_date': (datetime.utcnow() + timedelta(days=1)).strftime('%Y-%m-%d')
})
}
}
# Getting usages
async with self.bot.whttp.get(**data) as r:
if r.status != 200:
return None
data = {usage['key'][0]: usage['value'] for usage in await r.json()}
return data
@util.ignore_exceptions_async()
async def _evaluate_server(self, server, usages):
"""
Evaluates a server to see if it should be left
:param server: The server to evaluate
:param usages: The command usages for the past 2 weeks
"""
commands_used = usages.get(server.id, 0)
if commands_used <= 0 and (server.me.joined_at + timedelta(days=14)) <= datetime.utcnow():
await self._send_parting_message(server)
await self.bot.leave_server(server)
async def prune_servers(self):
"""
Leaves servers that have not used the bot in 2 weeks or 1 fortnight or 14 days
"""
await self.bot.wait_until_ready()
while not self.bot.is_closed:
# Getting command usages for the last 2 weeks
usages = await self._get_command_usages()
if not usages:
continue
# Evaluating servers
servers = [s for s in self.bot.servers]
for server in servers:
# Skipping my server
if server.id in ('240109767970783233', '110373943822540800'):
continue
# Evaluating if server should be left
await self._evaluate_server(server, usages)
# Waiting 1 hour
del usages, servers
await asyncio.sleep(60 * 60)
@util.ignore_exceptions_async()
async def _update_server_count(self):
"""
Updates bot's server count on bots.discord.pw
"""
data = {
'url': 'https://bots.discord.pw/api/bots/%s/stats' % self.bot.user.id,
'headers': {
'Authorization': DISCORD_BOTS_TOKEN,
'User-Agent': 'Python:RunePy:v%s (by /u/duke605)' % self.bot.cogs['About'].version,
'Content-Type': 'application/json'
},
'data': json.dumps({
'server_count': len(self.bot.servers)
})
}
async with self.bot.whttp.post(**data):
pass
async def on_server_join(self, server):
await objects.create(Configuration, server_id=server.id)
await self._update_server_count()
self.bot.configurations[server.id] = {'prefix': None}
async def on_server_remove(self, server):
await self._update_server_count()
await objects.execute(Configuration.raw('DELETE FROM configurations WHERE server_id = %s', server.id))
del self.bot.configurations[server.id]
def setup(bot):
bot.add_cog(Config(bot))
| mit | -5,977,478,621,429,887,000 | 35.929936 | 140 | 0.583994 | false |
sonus89/FIPER | client/direct.py | 1 | 6293 | from __future__ import print_function, unicode_literals, absolute_import
import sys
import time
from FIPER.generic.interface import InterfaceFactory
from FIPER.generic.abstract import AbstractListener
from FIPER.generic.subsystem import StreamDisplayer
from FIPER.generic.probeclient import Probe
class DirectConnection(object):
"""
Abstraction of a direct connection with a car
(no server or centralized controller present).
Methods used for interfacing with this class:
- connect(ip) initiates and builds a connection with a
TCPCar instance at the supplied IP address.
- probe(ip) probes the given IP address. If there is a
TCPCar instance there, the method returns its a list
containing its [IP, ID]. This method can accept multiple
addresses or an address range e.g.
probe("192.168.0.0-100") or probe("192.168.1.1", "192.168.1.5")
- get_stream() is a generator function, yielding the video
frames as numpy arrays.
- display_stream() displays the frames in a cv2 window.
- stop_stream() tears down the streaming thread.
- rc_command() sends remote control commands to the car.
- teardown() disassembles the communacion channels. After calling
this method, the DirectConnection instance is ready for deletion.
"""
def __init__(self, myIP):
"""
:param myIP: the local IP address
"""
self.target = None
self.interface = None
self.streamer = None
self.streaming = False
self.listener = self._OneTimeListener(self, myIP)
self.probeobj = Probe()
def probe(self, ip):
_, ID = self.probeobj.probe(ip)[0]
print("DC: probed {} response: {}".format(ip, ID))
return ID
def sweep(self, *ips):
responses = self.probeobj.probe(*ips)
print("PROBE RESPONSES:")
for ip, response in responses:
print("{}: {}".format(ip, response))
return responses
def connect(self, ip):
"""
Initiates via the probe protocol, then bootstraps the connection
via AbstractListener.mainloop()
"""
rIP, rID = Probe.initiate(ip)
if rIP == ip and rID is not None:
# Enter AbstractListener's mainloop and bootstrap the connetion
try:
self.listener.mainloop()
except Exception as E:
print("DC: exception in AbstractListener mainloop:", E)
return False
else:
return True
else:
print("DC: invalid response on initiation from {}: {}".format(rIP, rID))
return False
def get_stream(self, bytestream=False):
"""
Generator function used to create an infinite stream of
A/V data from the CarInterface.
:param bytestream: if set, raw bytes are yielded
instead of processed frames (numpy arrays)
"""
if self.interface is None:
raise RuntimeError("No connection available!")
stream = (self.interface.bytestream()
if bytestream else
self.interface.framestream())
for d in stream:
yield d
def display_stream(self):
if self.interface is None:
print("DC: no interface! Build a connection first!")
return
self.interface.send(b"stream on")
self.streaming = True
self.streamer = StreamDisplayer(self.interface) # launches the thread!
def stop_stream(self):
self.interface.send(b"stream off")
if self.streamer is not None:
self.streamer.teardown(0)
self.streamer = None
self.streaming = False
def teardown(self, sleep=0):
if self.streaming:
self.stop_stream()
if self.interface is not None:
self.interface.teardown()
time.sleep(sleep)
def rc_command(self, *commands):
self.interface.rcsocket.sendall(b"".join(commands))
class _OneTimeListener(AbstractListener):
def __init__(self, master, myIP):
super(DirectConnection._OneTimeListener, self).__init__(myIP)
self.master = master
def callback(self, msock):
self.master.interface = InterfaceFactory(
msock, self.dlistener, self.rclistener
).get()
self.running = False # Break the mainloop in AbstractListener
def run():
from random import choice
def probe_and_connect(dc, IP):
# Probe car up to 3 times
for probe in range(3, -1, -1):
ID = dc.probe(IP)
print("PROBE-{}: reponse: {} from {}"
.format(probe, IP, ID))
time.sleep(3)
if ID is not None:
break
else:
# If noone answers, return False success code
return False
# Try to build connection, return the success code
return dc.connect(IP)
def test_stream(dc):
print("STREAM TEST online...")
dc.display_stream()
while 1:
# noinspection PyUnboundLocalVariable
raw_input("Hit <enter> to stop! ")
dc.stop_stream()
break
print("STREAM TEST offline...")
def test_rc(dc):
msgs = b">", b"<", b"A", b"V"
choices = []
print("RC TEST online...")
while 1:
try:
chc = choice(msgs)
choices.append(chc)
dc.rc_command(chc)
time.sleep(0.1)
except KeyboardInterrupt:
break
except Exception as E:
print("RC Test caught exception:", E)
break
if len(choices) >= 10:
print("".join(choices))
choices = []
print("RC TEST offline...")
car_IP = ("127.0.0.1" if len(sys.argv) == 1 else sys.argv[1])
connection = DirectConnection(car_IP)
success = probe_and_connect(connection, car_IP)
if not success:
return
test_stream(connection)
test_rc(connection)
print(" -- TEARING DOWN -- ")
connection.teardown(3)
print(" -- END PROGRAM -- ")
if __name__ == '__main__':
run()
| mit | 2,546,170,463,574,380,000 | 31.107143 | 84 | 0.578262 | false |
tmoer/multimodal_varinf | networks/network_rl.py | 1 | 7832 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 2 14:48:24 2017
@author: thomas
"""
#from layers import Latent_Layer
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from tfutils.helpers import repeat_v2
from tfutils.distributions import logsumexp, discretized_logistic
from layers import Latent_Layer
class Network(object):
''' VAE & RL template '''
def __init__(self,hps,state_dim,binsize=6):
# binsize = the number of discrete categories per dimension of the state (x and y below).
# Input and output are normalized over this quantity.
# placeholders
self.x = x = tf.placeholder("float32", shape=[None,state_dim])
self.y = y = tf.placeholder("float32", shape=[None,state_dim])
self.a = a = tf.placeholder("float32", shape=[None,1])
self.Qtarget = Qtarget = tf.placeholder("float32", shape=[None,1])
self.is_training = is_training = tf.placeholder("bool") # if True: sample from q, else sample from p
self.k = k = tf.placeholder('int32') # number of importance samples
self.temp = temp = tf.Variable(5.0,name='temperature',trainable=False) # Temperature for discrete latents
self.lamb = lamb = tf.Variable(1.0,name="lambda",trainable=False) # Lambda for KL annealing
xa = tf.concat([x/binsize,a],axis=1)
# Importance sampling: repeats along second dimension
xa_rep = repeat_v2(xa,k)
y_rep = repeat_v2(y/binsize,k)
# RL part of the graph
with tf.variable_scope('q_net'):
rl1 = slim.fully_connected(x,50,tf.nn.relu)
rl2 = slim.fully_connected(rl1,50,tf.nn.relu)
rl3 = slim.fully_connected(rl2,50,activation_fn=None)
self.Qsa = Qsa = slim.fully_connected(rl3,4,activation_fn=None)
if hps.use_target_net:
with tf.variable_scope('target_net'):
rl1_t = slim.fully_connected(x,50,tf.nn.relu)
rl2_t = slim.fully_connected(rl1_t,50,tf.nn.relu)
rl3_t = slim.fully_connected(rl2_t,50,activation_fn=None)
self.Qsa_t = slim.fully_connected(rl3_t,4,activation_fn=None)
copy_ops = []
q_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_net')
tar_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')
for tar,q in zip(q_var,tar_var):
copy_op = q.assign(tar)
copy_ops.append(copy_op)
self.copy_op = tf.group(*copy_ops, name='copy_op')
a_onehot = tf.one_hot(tf.to_int32(tf.squeeze(a,axis=1)),4,1.0,0.0)
Qs = tf.reduce_sum(a_onehot*Qsa,reduction_indices=1) ## identify Qsa based on a
self.rl_cost = rl_cost = tf.nn.l2_loss(Qs - Qtarget)
# Batch norm: skip for now
# Encoder x,y --> h
xy = tf.concat([xa_rep,y_rep],1) # concatenate along last dim
h_up = slim.fully_connected(xy,hps.h_size,tf.nn.relu)
# Initialize ladders
layers = []
for i in range(hps.depth):
layers.append(Latent_Layer(hps,hps.var_type[i],i))
# Ladder up
for i,layer in enumerate(layers):
h_up = layer.up(h_up)
# Ladder down
# Prior x --> p_z
h_down = slim.fully_connected(xa_rep,hps.h_size,tf.nn.relu)
kl_sum = 0.0
kl_sample = 0.0
for i,layer in reversed(list(enumerate(layers))):
h_down, kl_cur, kl_sam = layer.down(h_down,is_training,temp,lamb)
kl_sum += kl_cur
kl_sample += kl_sam
# Decoder: x,z --> y
xz = tf.concat([slim.flatten(h_down),xa_rep],1)
dec1 = slim.fully_connected(xz,250,tf.nn.relu)
dec2 = slim.fully_connected(dec1,250,tf.nn.relu)
dec3 = slim.fully_connected(dec2,250,activation_fn=None)
mu_y = slim.fully_connected(dec3,state_dim,activation_fn=None)
if hps.ignore_sigma_outcome:
log_dec_noise = tf.zeros(tf.shape(mu_y))
else:
log_dec_noise = slim.fully_connected(dec3,1,activation_fn=None)
# p(y|x,z)
if hps.out_lik == 'normal':
dec_noise = tf.exp(tf.clip_by_value(log_dec_noise,-10,10))
outdist = tf.contrib.distributions.Normal(mu_y,dec_noise)
self.log_py_x = log_py_x = tf.reduce_sum(outdist.log_prob(y_rep),axis=1)
self.nats = -1*tf.reduce_mean(logsumexp(tf.reshape(log_py_x - kl_sample,[-1,k])) - tf.log(tf.to_float(k)))
y_sample = outdist.sample() if not hps.ignore_sigma_outcome else mu_y
self.y_sample = tf.to_int32(tf.round(tf.clip_by_value(y_sample,0,1)*binsize))
elif hps.out_lik == 'discretized_logistic':
self.log_py_x = log_py_x = tf.reduce_sum(discretized_logistic(mu_y,log_dec_noise,binsize=1,sample=y_rep),axis=1)
outdist = tf.contrib.distributions.Logistic(loc=mu_y,scale = tf.exp(log_dec_noise))
self.nats = -1*tf.reduce_mean(logsumexp(tf.reshape(tf.reduce_sum(outdist.log_prob(y_rep),axis=1) - kl_sample,[-1,k]))- tf.log(tf.to_float(k)))
y_sample = outdist.sample() if not hps.ignore_sigma_outcome else mu_y
self.y_sample = tf.to_int32(tf.round(tf.clip_by_value(y_sample,0,1)*binsize))
elif hps.out_lik == 'discrete':
logits_y = slim.fully_connected(dec3,state_dim*(binsize+1),activation_fn=None)
logits_y = tf.reshape(logits_y,[-1,state_dim,binsize+1])
disc_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_y,labels=tf.to_int32(tf.round(y_rep*6)))
self.log_py_x = log_py_x = -tf.reduce_sum(disc_loss,[1])
self.nats = -1*tf.reduce_mean(logsumexp(tf.reshape(log_py_x - kl_sample,[-1,k])) - tf.log(tf.to_float(k)))
outdist = tf.contrib.distributions.Categorical(logits=logits_y)
self.y_sample = outdist.sample() if not hps.ignore_sigma_outcome else tf.argmax(logits_y,axis=2)
# To display
self.kl = tf.reduce_mean(kl_sum)
# ELBO
log_divergence = tf.reshape(log_py_x - kl_sum,[-1,k]) # shape [batch_size,k]
if np.abs(hps.alpha-1.0)>1e-3: # use Renyi alpha-divergence
log_divergence = log_divergence * (1-hps.alpha)
logF = logsumexp(log_divergence)
self.elbo = elbo = tf.reduce_mean(logF - tf.log(tf.to_float(k)))/ (1-hps.alpha)
else:
# use KL divergence
self.elbo = elbo = tf.reduce_mean(log_divergence)
self.loss = loss = -elbo
### Optimizer
self.lr = lr = tf.Variable(0.001,name="learning_rate",trainable=False)
global_step = tf.Variable(0,name='global_step',trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
if hps.max_grad != None:
grads_and_vars = optimizer.compute_gradients(loss)
for idx, (grad, var) in enumerate(grads_and_vars):
if grad is not None:
grads_and_vars[idx] = (tf.clip_by_norm(grad, hps.max_grad), var)
self.train_op = optimizer.apply_gradients(grads_and_vars)
self.grads_and_vars = grads_and_vars
else:
self.train_op = optimizer.minimize(loss,global_step=global_step)
self.grads_and_vars = tf.constant(0)
self.train_op_rl = optimizer.minimize(rl_cost)
self.init_op=tf.global_variables_initializer() | mit | 8,419,598,953,240,772,000 | 47.652174 | 154 | 0.588994 | false |
KaiserAndres/kaiserBot | bot_executables.py | 1 | 4751 | import roller
import random
DEFAULT_CARD_AMOUNT = 1
MAX_CARDS = 15
CARD_SEPARATOR = "||"
def ping_exec(irc, message):
pong = 'PONG ' + message.text.split(" ")[1] + '\r\n'
irc.send(pong.encode("utf-8"))
def roll_exec(irc, message):
'''
A !roll comand has the following structure:
!roll diceAmount+d+diceSize+"+"+modifier
* Dice amount is an integer up to 20000
* Dice Size is an integer
* Modifier is an integer that is added onto the roll after
The !Roll command can also have this structure:
!!roll d+diceAmount+d+diceSize+"+"+modifier
* Dice amount is the result of a roll of said size and then proceeds
to roll that many of the following dice
* Dice Size is an integer
* Modifier is an integer that is added onto the roll after
'''
diceNumbers = roller.getRolledNumbers(message.text)
messageToSend = ''
# -------------------------------------------------------------------
# Hard limits on the dice sizes
# -------------------------------------------------------------------
if diceNumbers[0] > 10:
diceNumbers[0] = 10
if diceNumbers[0] < 1:
diceNumbers[0] = 1
if diceNumbers[1] > 2000:
diceNumbers[1] = 2000
if diceNumbers[1] < 1:
diceNumbers[1] = 1
if diceNumbers[2] < 1:
diceNumbers[2] = 1
rolledArray = roller.roll(diceNumbers[0],
diceNumbers[1],
diceNumbers[2])
for rollNum in rolledArray:
# REMINDER: make a message maker function cause this is ugly!
if (diceNumbers[3] == 0):
messageToSend = (messageToSend +
"\x0312,15(" + str(diceNumbers[1]) +
"d" + str(diceNumbers[2]) + ") \x032,15[" +
str(rollNum) + "]\x031,15 : \x034,15{" +
str(rollNum + diceNumbers[3]) + "} ")
else:
messageToSend = (messageToSend + "\x0312,15(" +
str(diceNumbers[1]) + "d" +
str(diceNumbers[2]) + "+" +
str(diceNumbers[3]) + ") \x032,15[" +
str(rollNum) + "+" +
str(diceNumbers[3]) +
"]\x031,15 : \x034,15{" +
str(rollNum + diceNumbers[3]) + "} ")
irc.send(message.reply(messageToSend))
def join_exec(irc, message):
'''
A join command has the following structure:
!JOIN #CHANNEL
A message is sent to the irc server requesting to join #CHANNEL
'''
chann = ""
foundLink = False
for char in message.text:
if char == "#":
foundLink = True
if foundLink:
chann = chann + char
if chann != "":
join_message = "JOIN " + chann + "\n"
irc.send(join_message.encode("utf-8"))
else:
irc.send(message.reply("Error 02: bad channel."))
def tarot_exec(irc, message):
'''
Tarot command asks for the number of cards to be drawn and returns them.
A tarot command has the following structure:
!tarot <NUMBER OF CARDS>
'''
card_amount = get_card_amount(message)
card_spread = spread_cards(card_amount)
output_message = "You got these cards: " + CARD_SEPARATOR.join(card_spread)
irc.send(message.reply(output_message))
def spread_cards(card_amount):
card_spread = []
local_deck = load_deck("deck")
for time in range(0, card_amount):
card_index = random.randint(0, len(local_deck) - 1)
is_reversed = random.randint(0, 1) == 1
card_text = local_deck[card_index]
if is_reversed:
card_text = card_text + "(reversed)"
card_spread.append(card_text)
local_deck.remove(local_deck[card_index])
return card_spread
def get_card_amount(message):
number_buffer = ""
number_end = 9
for characterIndex in range(0, len(message.text)):
try:
int(message.text[characterIndex])
if characterIndex < number_end:
number_buffer = number_buffer + message.text[characterIndex]
except ValueError:
continue
try:
card_amount = int(number_buffer)
except ValueError:
card_amount = DEFAULT_CARD_AMOUNT
if card_amount > MAX_CARDS:
card_amount = MAX_CARDS
return card_amount
def load_deck(deck_file_name):
deck_file = open(deck_file_name, "r")
deck_text = deck_file.readlines()
deck = []
deck_file.close()
for card in deck_text:
deck.append(card[:-1])
return deck
| mit | -1,279,902,818,268,667,600 | 29.455128 | 80 | 0.53273 | false |
thermokarst/advent-of-code-2015 | day20.py | 1 | 2724 | # Matthew Ryan Dillon
# github.com/thermokarst
#
# --- Day 20: Infinite Elves and Infinite Houses ---
#
# To keep the Elves busy, Santa has them deliver some presents by hand,
# door-to-door. He sends them down a street with infinite houses numbered
# sequentially: 1, 2, 3, 4, 5, and so on.
#
# Each Elf is assigned a number, too, and delivers presents to houses based on
# that number:
#
# - The first Elf (number 1) delivers presents to every house: 1, 2, 3, 4, 5, ....
# - The second Elf (number 2) delivers presents to every second house: 2, 4, 6,
# 8, 10, ....
# - Elf number 3 delivers presents to every third house: 3, 6, 9, 12, 15, ....
#
# There are infinitely many Elves, numbered starting with 1. Each Elf delivers
# presents equal to ten times his or her number at each house.
#
# So, the first nine houses on the street end up like this:
#
# House 1 got 10 presents.
# House 2 got 30 presents.
# House 3 got 40 presents.
# House 4 got 70 presents.
# House 5 got 60 presents.
# House 6 got 120 presents.
# House 7 got 80 presents.
# House 8 got 150 presents.
# House 9 got 130 presents.
#
# The first house gets 10 presents: it is visited only by Elf 1, which delivers 1
# * 10 = 10 presents. The fourth house gets 70 presents, because it is visited by
# Elves 1, 2, and 4, for a total of 10 + 20 + 40 = 70 presents.
#
# What is the lowest house number of the house to get at least as many presents
# as the number in your puzzle input?
#
# --- Part Two ---
#
# The Elves decide they don't want to visit an infinite number of houses.
# Instead, each Elf will stop after delivering presents to 50 houses. To make up
# for it, they decide to deliver presents equal to eleven times their number at
# each house.
#
# With these changes, what is the new lowest house number of the house to get at
# least as many presents as the number in your puzzle input?
INPUT = 34000000
def visit_homes(pph, max_visit=None):
homes = [0 for x in range(int(INPUT/pph))]
for elf in range(1, len(homes)+1):
house = elf
count = 0
while house < len(homes):
if max_visit and count >= max_visit:
break
homes[house] += elf*pph
house += elf
count += 1
return homes
def check_homes(homes):
for house, presents in enumerate(homes):
if presents >= INPUT:
return (house, presents)
homes = visit_homes(10)
house, presents = check_homes(homes)
print("pt 1: house {}, presents {}".format(house, presents))
homes = visit_homes(11, max_visit=50)
house, presents = check_homes(homes)
print("pt 2: house {}, presents {}".format(house, presents))
| mit | -2,547,104,543,869,876,000 | 34.842105 | 82 | 0.656021 | false |
ifuding/Kaggle | PMRCN/Code/siamese_net.py | 1 | 22230 |
from sklearn import *
import sklearn
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
from time import gmtime, strftime
import numpy.random as rng
from multiprocessing.dummy import Pool
import h5py
import concurrent.futures
import tensorflow as tf
import multiprocessing as mp
from sklearn.cross_validation import KFold
from keras.models import Sequential, Model
from keras.layers.core import Dense, Dropout, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.layers.embeddings import Embedding
from keras.layers import Input, concatenate, merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras import backend as K
from sklearn.metrics import log_loss
from keras import __version__ as keras_version
graph = tf.get_default_graph()
HIDDEN_UNITS = [64, 16, 8]
DNN_EPOCHS = 40
BATCH_SIZE = 5
DNN_BN = True
DROPOUT_RATE = 0.5
SIAMESE_PAIR_SIZE = 100000
MAX_WORKERS = 8
EMBEDDING_SIZE = 6
full_feature = True
data_folder = '../Data/'
train = pd.read_csv(data_folder + 'training_variants')
#print train.dtypes
test = pd.read_csv(data_folder + 'test_variants')
trainx = pd.read_csv(data_folder + 'training_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
#print trainx.dtypes
testx = pd.read_csv(data_folder + 'test_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
train = pd.merge(train, trainx, how='left', on='ID').fillna('')
#train = train.iloc[1:1000]
y = train['Class'].values
train = train.drop(['Class'], axis=1)
test = pd.merge(test, testx, how='left', on='ID').fillna('')
pid = test['ID'].values
#df_all = pd.concat((train, test), axis=0, ignore_index=True)
#df_all['Gene_Share'] = df_all.apply(lambda r: sum([1 for w in r['Gene'].split(' ') if w in r['Text'].split(' ')]), axis=1).astype(np.int8)
#df_all['Variation_Share'] = df_all.apply(lambda r: sum([1 for w in r['Variation'].split(' ') if w in r['Text'].split(' ')]), axis=1).astype(np.int8)
#
#print df_all[['Gene_Share', 'Variation_Share']].max()
## exit(0)
#if full_feature:
# #commented for Kaggle Limits
# for i in range(5):
# df_all['Gene_'+str(i)] = df_all['Gene'].map(lambda x: str(x[i]) if len(x)>i else '')
# df_all['Variation'+str(i)] = df_all['Variation'].map(lambda x: str(x[i]) if len(x)>i else '')
# print df_all.dtypes
#
# gen_var_lst = sorted(list(train.Gene.unique()) + list(train.Variation.unique()))
# print(len(gen_var_lst))
# gen_var_lst = [x for x in gen_var_lst if len(x.split(' '))==1]
# print(len(gen_var_lst))
# i_ = 0
# #commented for Kaggle Limits
# for gen_var_lst_itm in gen_var_lst:
# if i_ % 100 == 0: print(i_)
# df_all['GV_'+str(gen_var_lst_itm)] = df_all['Text'].map(lambda x: str(x).count(str(gen_var_lst_itm))).astype(np.int8)
# i_ += 1
# if i_ == 5:
# break
#
#for c in df_all.columns:
# if df_all[c].dtype == 'object':
# if c in ['Gene','Variation']:
# lbl = preprocessing.LabelEncoder()
# df_all[c+'_lbl_enc'] = lbl.fit_transform(df_all[c].values)
# df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
# df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x).split(' ')))
# elif c != 'Text':
# lbl = preprocessing.LabelEncoder()
# df_all[c] = lbl.fit_transform(df_all[c].values)
# if c=='Text':
# df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
# df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x).split(' ')))
#
#train = df_all.iloc[:len(train)]
#print "... train dtypes before svd ..."
#print train.dtypes
#print train.head()
#exit(0)
#test = df_all.iloc[len(train):]
#
#class cust_regression_vals(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
# def fit(self, x, y=None):
# return self
# def transform(self, x):
# x = x.drop(['Gene', 'Variation','ID','Text'],axis=1).values
# return x
#
#class cust_txt_col(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
# def __init__(self, key):
# self.key = key
# def fit(self, x, y=None):
# return self
# def transform(self, x):
# return x[self.key].apply(str)
#
#print('Pipeline...')
#fp = pipeline.Pipeline([
# ('union', pipeline.FeatureUnion(
# n_jobs = -1,
# transformer_list = [
# ('standard', cust_regression_vals()),
# ('pi1', pipeline.Pipeline([('Gene', cust_txt_col('Gene')), ('count_Gene', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 8))), ('tsvd1', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12))])),
# ('pi2', pipeline.Pipeline([('Variation', cust_txt_col('Variation')), ('count_Variation', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 8))), ('tsvd2', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12))])),
# #commented for Kaggle Limits
# ('pi3', pipeline.Pipeline([('Text', cust_txt_col('Text')), ('tfidf_Text', feature_extraction.text.TfidfVectorizer(ngram_range=(1, 2))), ('tsvd3', decomposition.TruncatedSVD(n_components=50, n_iter=25, random_state=12))]))
# ])
# )])
#
#train = fp.fit_transform(train);
#print type(train)
#print(train.shape)
#print (train.nbytes)
#np.save("train_array", train)
## print(df.dtypes)
## print(df.memory_usage())
#test = fp.transform(test); print(test.shape)
#np.save("test_array", test)
#exit(0)
train = np.load("./train_array.npy")
test = np.load("./test_array.npy")
# siamese_features_array = np.load("./siamese_features_array_2017_09_15_07_57_44.npy")
y = y - 1 #fix for zero bound array
CONTINUOUS_INDICES = []
SPARSE_INDICES = []
for i in range((train.shape)[1]):
if (i >= 3205 and i <= 3212):
pass
elif (i >= 2 and i <= 113): # or (i >= 114 and i <= 3204):
SPARSE_INDICES.append(i)
else:
CONTINUOUS_INDICES.append(i)
#train = train[:, CONTINUOUS_INDICES]
#test = test[:, CONTINUOUS_INDICES]
print('train shape after loading and selecting trainging columns: %s' % str(train.shape))
siamese_train_len = len(train) // 3
print('siamese_train_len is %d' % (siamese_train_len))
siamese_train_data = train[:siamese_train_len]
siamese_train_label = y[:siamese_train_len]
lgbm_train_data = train[siamese_train_len:]
lgbm_train_label = y[siamese_train_len:]
#train = train[:200]
#y = y[:200]
#test = test[:200]
#pid = pid[:200]
def xgbTrain(train_data, train_label, flod = 5):
"""
"""
denom = 0
fold = 5 #Change to 5, 1 for Kaggle Limits
models = []
for i in range(fold):
params = {
'eta': 0.03333,
'max_depth': 4,
'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'num_class': 9,
'seed': i,
'silent': True
}
x1, x2, y1, y2 = model_selection.train_test_split(train_data, train_label, test_size=0.18, random_state=i)
watchlist = [(xgb.DMatrix(x1, y1), 'train'), (xgb.DMatrix(x2, y2), 'valid')]
model = xgb.train(params, xgb.DMatrix(x1, y1), 1000, watchlist, verbose_eval=50, early_stopping_rounds=100)
score1 = metrics.log_loss(y2, model.predict(xgb.DMatrix(x2), ntree_limit=model.best_ntree_limit), labels = list(range(9)))
#print(score1)
models.append((model, 'x'))
return models
def lgbm_train(train_data, train_label, fold = 5):
"""
LGB Training
"""
# print train.shape
# print siamese_features_array.shape
# train_merge = siamese_features_array #np.concatenate((train, siamese_features_array), axis = 1)
# print train_merge.shape
# # exit(0)
print("Over all training size:")
print(train_data.shape)
# train_data = train_merge#[:train_len * 3 / 10]
# train_label = lgbm_train_label#[:train_len * 3 / 10]
#valide_data = train_merge[train_len * 9 / 10:]
#valide_label = y[train_len * 9 / 10:]
models = []
for i in range(fold):
d_train = lgb.Dataset(train_data, train_label) #, categorical_feature = SPARCE_INDICES)
#d_valide = lgb.Dataset(valide_data, valide_label)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'multiclass',
'metric': {'multi_logloss'},
'num_class': 9,
# 'num_leaves': 256,
# 'max_depth': 12,
# 'feature_fraction': 0.9,
# 'bagging_fraction': 0.95,
# 'bagging_freq': 5,
'num_leaves': 60, # 60,
# 'min_sum_hessian_in_leaf': 20,
'max_depth': 10, # 10,
'learning_rate': 0.02, # 0.02,
'feature_fraction': 0.5,
'verbose': 0,
# 'valid_sets': [d_valide],
'num_boost_round': 327,
'feature_fraction_seed': i,
# 'bagging_fraction': 0.9,
# 'bagging_freq': 15,
# 'bagging_seed': i,
# 'early_stopping_round': 10
# 'random_state': 10
# 'verbose_eval': 20
#'min_data_in_leaf': 665
}
# ROUNDS = 1
print('fold: %d th light GBM train :-)' % (i))
# params['feature_fraction_seed'] = i
#bst = lgb.train(
# params ,
# d_train,
# verbose_eval = False
# # valid_sets = [d_valide]
# #num_boost_round = 1
# )
cv_result = lgb.cv(params, d_train, nfold=10)
pd.DataFrame(cv_result).to_csv('cv_result', index = False)
exit(0)
# pred = model_eval(bst, 'l', test)
#print pred.shape
#print pred[0, :]
models.append((bst, 'l'))
return models
def create_model(input_len):
model = Sequential()
model.add(Dense(HIDDEN_UNITS[0], activation='sigmoid', input_dim = input_len))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(HIDDEN_UNITS[1], activation='sigmoid'))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
# model.add(Dropout(0.1))
#model.add(Dense(32, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(9, activation='softmax'))
# optimizer = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics = ['accuracy'])
return model
def create_embedding_model(CONTINUE_SIZE, SPARSE_SIZE):
"""
"""
print('CONTINUOUS_SIZE = %d' % CONTINUE_SIZE)
print('SPARSE_SIZE = %d' % SPARSE_SIZE)
sparse_feature = Input(shape=(SPARSE_SIZE,))
sparse_embedding = Embedding(55, EMBEDDING_SIZE, input_length = SPARSE_SIZE)(sparse_feature)
sparse_embedding = Reshape((EMBEDDING_SIZE * SPARSE_SIZE,))(sparse_embedding)
# print "model input size: %d" % CONTINUOUS_COLUMNS
dense_input = Input(shape=(CONTINUE_SIZE,))
merge_input = concatenate([dense_input, sparse_embedding], axis = 1)
merge_len = CONTINUE_SIZE + EMBEDDING_SIZE * SPARSE_SIZE
output = create_model(merge_len)(merge_input)
model = Model([dense_input, sparse_feature], output)
optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
# optimizer = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer = Adam(),
loss='categorical_crossentropy', metrics = ['accuracy'])
return model
def keras_train(train_data, train_target, nfolds = 10):
"""
Detect Fish or noFish
"""
print("Start gen training data, shuffle and normalize!")
#train_data = train
train_target = np_utils.to_categorical(train_target)
# train_data, train_target, siamese_data_loader = siamese_train(siamese_train_data, siamese_train_label)
kf = KFold(len(train_target), n_folds=nfolds, shuffle=True)
num_fold = 0
models = []
for train_index, test_index in kf:
# model = create_model(classes = 2)
model = create_embedding_model(len(CONTINUOUS_INDICES), len(SPARSE_INDICES))
# model = create_siamese_net((train.shape)[1])
X_train = train_data[train_index]
Y_train = train_target[train_index]
print('Positive samples in train: %d' % np.sum(Y_train))
print('Negative samples in train: %d' % (len(Y_train) - np.sum(Y_train)))
X_valid = train_data[test_index]
Y_valid = train_target[test_index]
print('Positive samples in valide: %d' % np.sum(Y_valid))
print('Negative samples in valide: %d' % (len(Y_valid) - np.sum(Y_valid)))
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
print('Split train: ', len(X_train), len(Y_train))
print('Split valid: ', len(X_valid), len(Y_valid))
callbacks = [
EarlyStopping(monitor='val_loss', patience=5, verbose=0),
]
model.fit([X_train[:, CONTINUOUS_INDICES], X_train[:, SPARSE_INDICES]],
Y_train, batch_size=BATCH_SIZE, epochs=DNN_EPOCHS,
shuffle=True, verbose=2,
validation_data=([X_valid[:, CONTINUOUS_INDICES], X_valid[:, SPARSE_INDICES]], Y_valid)
, callbacks=callbacks)
model_name = 'keras' + strftime('_%Y_%m_%d_%H_%M_%S', gmtime())
#model.save_weights(model_name)
#siamese_features_array = gen_siamese_features(model, lgbm_train_data, siamese_train_data, siamese_train_label)
models.append((model, 'k'))
break
return models #, siamese_features_array
def model_eval(model, model_type, data_frame):
"""
"""
if model_type == 'l':
preds = model.predict(data_frame)
elif model_type == 'k':
preds = model.predict(data_frame, batch_size=BATCH_SIZE, verbose=2)
elif model_type == 't':
print("ToDO")
elif model_type == 'x':
preds = model.predict(xgb.DMatrix(data_frame), ntree_limit=model.best_ntree_limit+80)
return preds
def gen_sub(models, merge_features):
"""
Evaluate single Type model
"""
print('Start generate submission!')
preds = None
for (model, model_type) in models:
pred = model_eval(model, model_type, merge_features)
#print pred.shape
#print pred[0, :]
if preds is None:
preds = pred.copy()
else:
preds += pred
preds /= len(models)
submission = pd.DataFrame(preds, columns=['class'+str(c+1) for c in range(9)])
submission['ID'] = pid
sub_name = "submission" + strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) + ".csv"
print('Output to ' + sub_name)
submission.to_csv(sub_name, index=False)
def create_siamese_net(input_size):
"""
"""
input_shape = (input_size, )
left_input = Input(input_shape)
right_input = Input(input_shape)
#build model to use in each siamese 'leg'
model = Sequential()
model.add(Dense(HIDDEN_UNITS[0], activation='sigmoid', input_dim = input_size))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(HIDDEN_UNITS[1], activation='sigmoid'))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
#encode each of the two inputs into a vector with the convnet
encoded_l = model(left_input)
encoded_r = model(right_input)
#merge two encoded inputs with the l1 distance between them
L1_distance = lambda x: K.abs(x[0]-x[1])
both = merge([encoded_l,encoded_r], mode = L1_distance, output_shape=lambda x: x[0])
merge_layer = Dense(HIDDEN_UNITS[2],activation='sigmoid')(both)
prediction = Dense(1,activation='sigmoid')(merge_layer)
siamese_net = Model(input=[left_input,right_input],output=prediction)
#optimizer = SGD(0.0004,momentum=0.6,nesterov=True,decay=0.0003)
optimizer = Adam()
#//TODO: get layerwise learning rates and momentum annealing scheme described in paperworking
siamese_net.compile(loss="binary_crossentropy",optimizer=optimizer)
# print siamese_net.count_params()
return siamese_net
class Siamese_Loader:
#For loading batches and testing tasks to a siamese net
def __init__(self,Xtrain,Xval = None):
self.Xval = Xval
self.Xtrain = Xtrain
self.n_classes = Xtrain.shape[0]
self.feature_size = (Xtrain[0].shape)[1]
self.n_examples = np.array([x.shape[0] for x in Xtrain])
self.n_tot_examples = np.sum(self.n_examples)
print('examples of different classes: %s' % str(self.n_examples))
# self.n_val,self.n_ex_val,_,_ = Xval.shape
def get_batch(self,n):
#Create batch of pairs, half same class, half different class
categories = rng.choice(self.n_classes,size=(n,),replace=True)
pairs=np.zeros((2, n, self.feature_size))
targets=np.zeros((n,))
positive_begin_pos = n * 1 // 2
targets[positive_begin_pos:] = 1
categories_list = []
for i in range(n):
category = categories[i]
idx_1 = rng.randint(0, self.n_examples[category])
pairs[0][i] = self.Xtrain[category][idx_1] #.reshape(self.feature_size)
#pick images of same class for 1st half, different for 2nd
category_2 = category if i >= positive_begin_pos else (category + rng.randint(1,self.n_classes)) % self.n_classes
idx_2 = rng.randint(0,self.n_examples[category_2])
while i >= positive_begin_pos and idx_2 == idx_1:
idx_2 = rng.randint(0,self.n_examples[category_2])
pairs[1][i] = self.Xtrain[category_2][idx_2] #.reshape(self.w,self.h,1)
categories_list.append((category, category_2))
#pd.DataFrame(categories_list).to_csv('categories', index=False)
#exit(0)
# shuflle pairs to mix positive and negative
rng.shuffle(pairs)
return pairs, targets
def gen_test_on_support_data(Xsupport, Xtest):
"""
"""
n_support, feature_size = Xsupport.shape
pairs = np.zeros((2, n_support, feature_size))
pairs[0] = Xtest
pairs[1] = Xsupport
return list(pairs)
def siamese_train(siamese_train_data, siamese_train_label):
"""
"""
train_data = [[] for i in range(9)]
label_ind = 0
for feature in siamese_train_data:
train_data[siamese_train_label[label_ind]].append(feature)
label_ind += 1
train_data = np.array([np.array(xi) for xi in train_data])
print("train data shape before gen pair")
print(train_data.shape)
siamese_data_loader = Siamese_Loader(train_data, test)
pairs, targets = siamese_data_loader.get_batch(SIAMESE_PAIR_SIZE)
return pairs, targets, siamese_data_loader
def gen_siamese_features_meta(model, Xsupport_label, Xsupport, Xtest):
"""
"""
siamese_pair = gen_test_on_support_data(Xsupport, Xtest)
global graph
with graph.as_default():
preds = model.predict(siamese_pair, batch_size=BATCH_SIZE, verbose=2)
preds = np.insert(preds, 1, Xsupport_label, axis = 1)
preds = pd.DataFrame(preds, columns = ['sim', 'class'])
siamese_features = preds.groupby('class', sort = False) \
.agg({'sim': ['max', 'min', 'median', 'mean', 'std']})
max_class = siamese_features['sim']['max'].idxmax()
siamese_features = np.insert(siamese_features.values.flatten(), 0, max_class, axis = 0)
return siamese_features
def gen_siamese_features(siamese_model, Xtest, Xsupport, Xsupport_label):
"""
"""
if MAX_WORKERS <= 0:
print("MAX_WORKERS should >= 1", file=sys.stderr)
exit(1)
siamese_features_array = list(range(len(Xtest)))
test_begin = 0
while test_begin < len(Xtest):
test_end = min(test_begin + MAX_WORKERS, len(Xtest))
with concurrent.futures.ThreadPoolExecutor(max_workers = MAX_WORKERS) as executor:
future_predict = {executor.submit(gen_siamese_features_meta, siamese_model,
Xsupport_label,
Xsupport,
Xtest[ind]): ind for ind in range(test_begin, test_end)}
for future in concurrent.futures.as_completed(future_predict):
ind = future_predict[future]
try:
siamese_features = future.result()
siamese_features_array[ind] = siamese_features
except Exception as exc:
print('%dth feature generated an exception: %s' % (ind, exc))
test_begin = test_end
if test_begin % 100 == 0:
print('Gen %d siamsese features' % test_begin)
if test_begin != len(Xtest):
print("Only gen %d siamese features" % test_begin, file=sys.stderr)
exit(1)
siamese_features_array = np.array(siamese_features_array)
pd.DataFrame(siamese_features_array[:, 0]).astype(np.int8).to_csv('pred_label', index = False)
return siamese_features_array
if __name__ == "__main__":
model_k = keras_train(train, y, 10)
#np.save("siamese_features_array" + \
# strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) , siamese_features_array)
# gen_sub(model_k, 'k', th, F1)
# ind = np.array([i * 5 for i in range(9)])
# xgbTrain(siamese_features_array[:, ind], lgbm_train_label);
#lgbm_features = siamese_features_array #np.concatenate((lgbm_train_data, siamese_features_array),
# model_l = lgbm_train(train, y, 10) #lgbm_features, lgbm_train_label, 10)#model_k)
# siamese_features_test_array = siamese_test(model_k[0][0], test)
#np.save("siamese_features_test_array" + \
# strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) , siamese_features_test_array)
##model_x = xgbTrain(5)#model_k)
#gen_sub(model_l, siamese_features_test_array) #model_k)
| apache-2.0 | 3,541,480,684,479,684,600 | 37.66087 | 268 | 0.609852 | false |
emmettk/pvrsex | tiff_file_folder_divider.py | 1 | 5268 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 28 16:14:22 2017
@author: ekrupczak on LaVision
Divide tiffs up into subfolders with maximum number of files per folder
This allows them to be imported into DaVis which seems unwilling to import more than about 10000 files at a time.
Or pull all tiffs from specified folder between start and end file numbers
"""
import os
import math
import re
def divide_tiffs(path, max_per_folder = 10000):
"""
Divides all tiffs in path into subfolders with up to max_per_folder files per subfolder
"""
tiffs = [file for file in os.listdir(path) if ".tif" in file]
num_sub_dir = math.ceil(len(tiffs)/max_per_folder)
print("Dividing", len(tiffs), "tiffs into", num_sub_dir, "directories")
for i in range(1,num_sub_dir+1):
os.mkdir(path+r"\tiffs_pt"+str(i).zfill(2))
if i < num_sub_dir:
for file in tiffs[max_per_folder*(i-1):max_per_folder*i]:
os.rename(path+r"\\"+file, path+r"\tiffs_pt"+str(i)+r"\\"+file)
elif i == num_sub_dir:
for file in tiffs[max_per_folder*(i-1):]:
os.rename(path+r"\\"+file, path+r"\tiffs_pt"+str(i)+r"\\"+file)
print("Directory", "tiffs_pt"+str(i), "populated")
def unpack_folders(path):
"""
Undoes divide_tiffs by unpacking all files in subfolders into the main folder
"""
for folder in [f for f in os.listdir(path) if "." not in f]:
print("unpacking", folder)
for file in os.listdir(path+"/"+folder):
os.rename(path+"/"+folder+"/"+file, path+"/"+file)
print("deleting empty folder", folder)
os.rmdir(path+"/"+folder)
def pull_nth_tiff(path, n):
"""
Pulls every nth tiff into a separate folder
Designed for reading into DaVis
Run 'unpack folders' after to undo this action
"""
tiffs = [file for file in os.listdir(path) if '.tif' in file.lower()]
print(len(tiffs), "tiffs in ", path)
newdirname = r"\every_"+str(n)+"th_tiff"
os.mkdir(path+newdirname)
for tiff in tiffs[0::n]:
os.rename(path+r"\\"+tiff, path+newdirname+"\\"+tiff)
print("Every", str(n)+"th tiff put in ", path+newdirname)
print("Folder contains ", len(os.listdir(path+newdirname)), "files")
def pull_tiffs_in_range(path, start, stop):
"""
Pull all tiffs between file number start and file number stop.
Assumes tiff names are formatted as follows: tiff_name_00001.tif
"""
tiffs = [file for file in os.listdir(path) if '.tif' in file.lower()]
print(len(tiffs), "tiffs in ", path)
newdirname = r"\tiffs_in_range"+str(start)+"_"+str(stop)
os.mkdir(path+newdirname)
for tiff in tiffs:
filenum = int(re.findall("(?:_)([0-9]+)(?:_grayscale\.tif|\.TIF)", tiff)[0])
# print(filenum, filenum > start, filenum < stop)
if start<=filenum<=stop:
# print(filenum, tiff)
os.rename(path+r"\\"+tiff, path+newdirname+"\\"+tiff)
print("Files placed in",path+newdirname)
print("Folder contains", len(os.listdir(path+newdirname)))
if __name__ == "__main__":
##2.5 hz
# n = 2 ##tower EO
# n = 6 ##pier EO
## ~2hz
# n = 8 ##pier EO (1.875hz)
##1.5 hz
# n = 3 #tower EO (1.66 hz)
##1 hz
# n = 15 ##pier EO
# n = 30 ##tower IR / pier IR
# n = 5 ## tower EO
## 0.66 Hz
# n = 8 ##tower EO (0.625 Hz)
##0.5 Hz
# n = 30 #pier EO
##0.33 Hz
n = 15 #tower EO
##0.166 Hz
# n = 30 #tower EO
#
# camera = "tower_EO_12mm"
# camera = "pier_EO_08mm"
# camera = "tower_IR_16mm"
# camera = "pier_IR_09mm"
# run = r"20170926_1000_towerEO_pierEO/"
# run = r"20170926_1100_pierIR_pierEO/"
# run = r"20170926_1200_towerIR_pierIR/"
# run = r"20170926_1300_towerIR_towerEO/"
#
# path = r"D:/RSEX17_TIFF/0926/"+run+camera
# path = r'D:/RSEX17_TIFF/1005/201710051000/'+camera+"/tiffs_in_range4488_7488"
#path = r'D:\RSEX17_TIFF\1015\201710151610\201710151610_tower_color'
# path = r'D:\RSEX17_TIFF\1005\201710051000\tower_1015_1025'
# path = r'D:\RSEX17_TIFF\1005\201710051000\tower_EO_12mm_range_4488_7488_grayscale'
# path = r'D:\RSEX17_TIFF\1005\201710051000\pier_EO_08mm'
path = r'D:\RSEX17_TIFF\1005\201710051000\tower_EO_12mm'
# path = r'E:\RSEX17_TIFF\1005\201710051000\pier_EO_08mm\tiffs_in_range13464_22464'
# path = r'D:\RSEX17_TIFF\1015\201710151610\201710151610_tower_grayscale'
# path = r"D:/RSEX17_TIFF/1013/201710131200/"+camera
# divide_tiffs(path, max_per_folder = 20*10**3)
# print("Tiffs divided")
# path = r'D:\RSEX17_TIFF\1005\201710051000\tower_EO_12mm_range_4488_7488_grayscale'
unpack_folders(path)
# pull_nth_tiff(path, n)
# path = r'D:/RSEX17_TIFF/1005/201710051000/tower_EO_12mm'
# unpack_folders(path)
#
# path = r"D:\RSEX17_TIFF\1005\201710051000\tower_EO_12mm_every_2th_tiff_grayscale"
# pull_tiffs_in_range(path, 4488, 7488)
# pull_tiffs_in_range(path, 13464, 22464)
# path = path+"\every_"+str(n)+"th_tiff"
# unpack_folders(path)
# path = r'E:\RSEX17_TIFF\1005\201710051000\pier_EO_08mm\tiffs_in_range13464_22464'
# pull_nth_tiff(path, n) | mit | -5,883,737,634,653,318,000 | 33.664474 | 113 | 0.610478 | false |
czcorpus/kontext | lib/bgcalc/csv_cache.py | 1 | 1387 | # Copyright (c) 2021 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2021 Tomas Machalek <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import csv
def load_cached_partial(path, offset, limit):
with open(path, 'r') as fr:
csv_reader = csv.reader(fr)
_, total_str = next(csv_reader)
for i in range(0, offset):
next(csv_reader)
ans = []
i = offset
for row in csv_reader:
if i == offset + limit:
break
ans.append((row[0], ) + tuple(int(x) for x in row[1:]))
i += 1
return int(total_str), ans
def load_cached_full(path):
ans = []
with open(path, 'r') as fr:
csv_reader = csv.reader(fr)
_, total_str = next(csv_reader)
for row in csv_reader:
ans.append((row[0], ) + tuple(int(x) for x in row[1:]))
return int(total_str), ans
| gpl-2.0 | 5,040,845,778,632,978,000 | 32.829268 | 67 | 0.618601 | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/demos/smt2011.py | 1 | 5140 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: smt2011.py
# Purpose: Demonstrations for the SMT 2011 demo
#
# Authors: Christopher Ariza
# Michael Scott Cuthbert
#
# Copyright: Copyright © 2011 Michael Scott Cuthbert and the music21 Project
# License: BSD or LGPL, see license.txt
#-------------------------------------------------------------------------------
import copy
from music21 import environment, corpus
_MOD = 'demo/smt2011.py'
environLocal = environment.Environment(_MOD)
def ex01():
# beethoven
#s1 = corpus.parse('opus18no1/movement3.xml')
#s1.show()
# has lots of triplets toward end
# viola not coming in as alto clef
# s2 = corpus.parse('haydn/opus17no1/movement3.zip')
# s2.show()
s2 = corpus.parse('haydn/opus17no2/movement3.zip')
# works well; some triplets are missing but playback correctly
s2Chordified = s2.measures(1, 25).chordify()
s2Chordified.show()
#-------------------------------------------------------------------------------
def chordsToAnalysis(chordStream, manifest, scale):
'''
manifest is a list of tuples in the following form:
(measureNumber, chordNumberOrNone, scaleDegree, octaveDisplay, durationTypeDisplay)
'''
from music21 import note, bar
chordMeasures = chordStream.getElementsByClass('Measure')
measureTemplate = copy.deepcopy(chordMeasures)
for i, m in enumerate(measureTemplate):
m.removeByClass(['GeneralNote'])
# assuming we have measure numbers
for (measureNumber, chordNumberOrNone, scaleDegree, octaveDisplay,
durationTypeDisplay, textDisplay) in manifest:
# assume measures are in order; replace with different method
m = chordMeasures[measureNumber-1]
mPost = measureTemplate[measureNumber-1]
if chordNumberOrNone is None:
c = m.notes[0]
else:
c = m.notes[chordNumberOrNone-1] # assume counting from 1
pTarget = scale.pitchFromDegree(scaleDegree)
match = False
p = None
for p in c.pitches:
if p.name == pTarget.name:
match = True
break
if not match:
print('no scale degree found in specified chord', p, pTarget)
pTarget.octave = octaveDisplay
n = note.Note(pTarget)
if durationTypeDisplay in ['whole']:
n.noteheadFill = False
else:
n.noteheadFill = True
n.stemDirection = 'noStem'
n.addLyric(textDisplay)
mPost.insert(c.getOffsetBySite(m), n)
# fill with rests
for m in measureTemplate:
m.rightBarline = bar.Barline('none')
# need to hide rests
if len(m.notes) == 0:
r = note.Rest(quarterLength=4)
r.hideObjectOnPrint = True
m.append(r)
return measureTemplate
def exShenker():
from music21 import stream, scale, bar
# wtc no 1
src = corpus.parse('bwv846')
#src.show()
melodicSrc = src.parts[0]
measureTemplate = copy.deepcopy(melodicSrc.getElementsByClass('Measure'))
for i, m in enumerate(measureTemplate):
m.removeByClass(['GeneralNote'])
m.number = i + 1
# this stream has triple bar lines, clefs, etc
unused_chords = src.flat.makeChords(minimumWindowSize=2)
analysis = stream.Score()
chordReduction = copy.deepcopy(measureTemplate)
for i, m in enumerate(chordReduction.getElementsByClass('Measure')):
mNotes = src.flat.getElementsByOffset(m.offset,
m.offset+m.barDuration.quarterLength, includeEndBoundary=False)
mNotes.makeChords(minimumWindowSize=4, inPlace=True)
c = mNotes.flat.notes[0]
c.duration.type = 'whole'
m.append(c)
m.rightBarline = bar.Barline('regular')
# add parts
scaleCMajor = scale.MajorScale('c')
#measureNumber, chordNumberOrNone, scaleDegree, octaveDisplay,
# durationTypeDisplay, textDisplay
manifest = [(1, None, 3, 5, 'whole', '3'),
(24, None, 2, 5, 'whole', '2'),
(35, None, 1, 5, 'whole', '1'),
]
analysis1 = chordsToAnalysis(chordReduction, manifest, scaleCMajor)
manifest = [(1, None, 1, 4, 'whole', 'I'),
(24, None, 5, 3, 'whole', 'V'),
(31, None, 4, 4, 'quarter', '--7'),
(35, None, 1, 4, 'whole', 'I'),
]
analysis2 = chordsToAnalysis(chordReduction, manifest, scaleCMajor)
analysis.insert(0, analysis1)
analysis.insert(0, analysis2)
analysis.insert(0, chordReduction)
analysis.show()
def demoMakeChords():
# wtc no 1
#src = corpus.parse('bwv65.2').measures(0, 5)
src = corpus.parse('opus18no1/movement3.xml').measures(0, 10)
src.flattenParts().makeChords(minimumWindowSize=3).show()
src = corpus.parse('opus18no1/movement3.xml').measures(0, 10)
src.chordify().show()
if __name__ == '__main__':
#ex01()
#exShenker()
demoMakeChords()
| mit | -2,055,209,700,920,989,400 | 30.527607 | 87 | 0.590971 | false |
jaeddy/bripipetools | bripipetools/dbification/flowcellrun.py | 1 | 8314 | """
Class for importing data from a sequencing run into GenLIMS and the
Research DB as new objects.
"""
import logging
import os
import re
from .. import parsing
from .. import database
from .. import annotation
logger = logging.getLogger(__name__)
class FlowcellRunImporter(object):
"""
Collects FlowcellRun and SequencedLibrary objects from a sequencing run,
converts to documents, inserts into database.
"""
def __init__(self, path, db, run_opts):
logger.debug("creating `SequencingImporter` instance")
logger.debug("...with arguments (path: '{}', db: '{}')"
.format(path, db.name))
self.path = path
self.db = db
self.run_opts = run_opts
def _collect_flowcellrun(self):
"""
Collect FlowcellRun object for flowcell run.
"""
path_items = parsing.parse_flowcell_path(self.path)
logger.info("collecting info for flowcell run {}"
.format(path_items['run_id']))
return annotation.FlowcellRunAnnotator(
run_id=path_items['run_id'],
pipeline_root=path_items['pipeline_root'],
db=self.db
).get_flowcell_run()
def _collect_sequencedlibraries(self):
"""
Collect list of SequencedLibrary objects for flowcell run.
"""
path_items = parsing.parse_flowcell_path(self.path)
logger.info("Collecting sequenced libraries for flowcell run '{}'"
.format(path_items['run_id']))
return annotation.FlowcellRunAnnotator(
run_id=path_items['run_id'],
pipeline_root=path_items['pipeline_root'],
db=self.db
).get_sequenced_libraries()
def _collect_librarygenecounts(self):
"""
Collect list of library gene count objects for flowcell run.
"""
path_items = parsing.parse_flowcell_path(self.path)
# print("path: {}, items: {}".format(self.path, path_items))
logger.info("Collecting library gene counts for flowcell run '{}'"
.format(path_items['run_id']))
return annotation.FlowcellRunAnnotator(
run_id=path_items['run_id'],
pipeline_root=path_items['pipeline_root'],
db=self.db
).get_library_gene_counts()
def _collect_librarymetrics(self):
"""
Collect list of library metrics objects for flowcell run.
"""
path_items = parsing.parse_flowcell_path(self.path)
# print("path: {}, items: {}".format(self.path, path_items))
logger.info("Collecting library metrics for flowcell run '{}'"
.format(path_items['run_id']))
return annotation.FlowcellRunAnnotator(
run_id=path_items['run_id'],
pipeline_root=path_items['pipeline_root'],
db=self.db
).get_library_metrics()
def _insert_flowcellrun(self, collection='all'):
"""
Convert FlowcellRun object and insert into GenLIMS database.
"""
flowcellrun = self._collect_flowcellrun()
logger.debug("inserting flowcell run {} into {}"
.format(flowcellrun, self.db.name))
database.put_runs(self.db, flowcellrun.to_json())
def _insert_sequencedlibraries(self):
"""
Convert SequencedLibrary objects and insert into GenLIMS database.
"""
sequencedlibraries = self._collect_sequencedlibraries()
for sl in sequencedlibraries:
logger.debug("inserting sequenced library {}".format(sl))
database.put_samples(self.db, sl.to_json())
def _insert_genomicsSequencedlibraries(self):
"""
Convert SequencedLibrary objects and insert into Research database.
"""
sequencedlibraries = self._collect_sequencedlibraries()
for sl in sequencedlibraries:
logger.debug("inserting sequenced library {}".format(sl))
database.put_genomicsSamples(self.db, sl.to_json())
def _insert_librarygenecounts(self):
"""
Convert Library Results objects and insert into Research database.
"""
librarygenecounts = self._collect_librarygenecounts()
for lgc in librarygenecounts:
logger.debug("inserting library gene counts '{}'".format(lgc))
database.put_genomicsCounts(self.db, lgc.to_json())
def _insert_librarymetrics(self):
"""
Convert Library Results objects and insert into GenLIMS database.
"""
librarymetrics = self._collect_librarymetrics()
for lgc in librarymetrics:
logger.debug("inserting library metrics '{}'".format(lgc))
database.put_metrics(self.db, lgc.to_json())
def _insert_genomicsLibrarymetrics(self):
"""
Convert Library Results objects and insert into Research database.
"""
librarymetrics = self._collect_librarymetrics()
for lgc in librarymetrics:
logger.debug("inserting library metrics '{}'".format(lgc))
database.put_genomicsMetrics(self.db, lgc.to_json())
def _insert_genomicsWorkflowbatches(self):
"""
Collect WorkflowBatch objects and insert them into database.
"""
path_items = parsing.parse_flowcell_path(self.path)
batchfile_dir = os.path.join(self.path, "globus_batch_submission")
logger.info("collecting info for workflow batch files in '{}'"
.format(batchfile_dir))
batchfile_list = [batchfile for batchfile in os.listdir(batchfile_dir)
if not re.search('DS_Store', batchfile)]
for curr_batchfile in batchfile_list:
workflowbatch = annotation.WorkflowBatchAnnotator(
workflowbatch_file=os.path.join(batchfile_dir, curr_batchfile),
pipeline_root=path_items['pipeline_root'],
db=self.db,
run_opts = self.run_opts
).get_workflow_batch()
logger.debug("inserting workflow batch '{}'".format(workflowbatch))
database.put_genomicsWorkflowbatches(self.db, workflowbatch.to_json())
def insert(self, collection='genlims'):
"""
Insert documents into GenLIMS or ResearchDB databases.
Note that ResearchDB collections are prepended by 'genomics'
to indicate the data origin.
"""
# Sample information into ResDB/GenLIMS
if collection in ['all', 'researchdb', 'genomicsSamples']:
logger.info(("Inserting sequenced libraries for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_genomicsSequencedlibraries()
if collection in ['all', 'genlims', 'samples']:
logger.info(("Inserting sequenced libraries for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_sequencedlibraries()
# Gene counts - only into ResDB
if collection in ['all', 'researchdb', 'genomicsCounts']:
logger.info(("Inserting gene counts for libraries for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_librarygenecounts()
# Metrics information - only into ResDB
if collection in ['all', 'researchdb', 'genomicsMetrics']:
logger.info(("Inserting metrics for libraries for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_genomicsLibrarymetrics()
# Workflow Batch files - only into ResDB
if collection in ['all', 'researchdb', 'genomicsWorkflowbatches']:
logger.info(("Inserting workflow batches for flowcell '{}' "
"into '{}'").format(self.path, self.db.name))
self._insert_genomicsWorkflowbatches()
# Run information into GenLIMS
if collection in ['all', 'genlims', 'flowcell', 'runs']:
logger.info("Inserting flowcell run '{}' into '{}'"
.format(self.path, self.db.name))
self._insert_flowcellrun()
| mit | 3,064,618,017,734,528,000 | 39.955665 | 82 | 0.595141 | false |
hidashun/django-typed-models | typedmodels/tests.py | 1 | 8182 | from django.utils import unittest
try:
import yaml # NOQA
PYYAML_AVAILABLE = True
except ImportError:
PYYAML_AVAILABLE = False
from django.core import serializers
from django.test import TestCase
from django.db.models.query_utils import DeferredAttribute
from .test_models import AngryBigCat, Animal, BigCat, Canine, Feline, Parrot, AbstractVegetable, Vegetable, Fruit
class SetupStuff(TestCase):
def setUp(self):
Feline.objects.create(name="kitteh")
Feline.objects.create(name="cheetah")
Canine.objects.create(name="fido")
BigCat.objects.create(name="simba")
AngryBigCat.objects.create(name="mufasa")
Parrot.objects.create(name="Kajtek")
class TestTypedModels(SetupStuff):
def test_cant_instantiate_base_model(self):
# direct instantiation shouldn't work
self.assertRaises(RuntimeError, Animal.objects.create, name="uhoh")
# ... unless a type is specified
Animal.objects.create(name="dingo", type="typedmodels.canine")
# ... unless that type is stupid
try:
Animal.objects.create(name="dingo", type="macaroni.buffaloes")
except ValueError:
pass
def test_get_types(self):
self.assertEqual(set(Animal.get_types()), set(['typedmodels.canine', 'typedmodels.bigcat', 'typedmodels.parrot', 'typedmodels.angrybigcat', 'typedmodels.feline']))
self.assertEqual(set(Canine.get_types()), set(['typedmodels.canine']))
self.assertEqual(set(Feline.get_types()), set(['typedmodels.bigcat', 'typedmodels.angrybigcat', 'typedmodels.feline']))
def test_get_type_classes(self):
self.assertEqual(set(Animal.get_type_classes()), set([Canine, BigCat, Parrot, AngryBigCat, Feline]))
self.assertEqual(set(Canine.get_type_classes()), set([Canine]))
self.assertEqual(set(Feline.get_type_classes()), set([BigCat, AngryBigCat, Feline]))
def test_base_model_queryset(self):
# all objects returned
qs = Animal.objects.all().order_by('type')
self.assertEqual(len(qs), 6)
self.assertEqual([obj.type for obj in qs], ['typedmodels.angrybigcat', 'typedmodels.bigcat', 'typedmodels.canine', 'typedmodels.feline', 'typedmodels.feline', 'typedmodels.parrot'])
self.assertEqual([type(obj) for obj in qs], [AngryBigCat, BigCat, Canine, Feline, Feline, Parrot])
def test_proxy_model_queryset(self):
qs = Canine.objects.all().order_by('type')
self.assertEqual(qs.count(), 1)
self.assertEqual(len(qs), 1)
self.assertEqual([obj.type for obj in qs], ['typedmodels.canine'])
self.assertEqual([type(obj) for obj in qs], [Canine])
qs = Feline.objects.all().order_by('type')
self.assertEqual(qs.count(), 4)
self.assertEqual(len(qs), 4)
self.assertEqual([obj.type for obj in qs], ['typedmodels.angrybigcat', 'typedmodels.bigcat', 'typedmodels.feline', 'typedmodels.feline'])
self.assertEqual([type(obj) for obj in qs], [AngryBigCat, BigCat, Feline, Feline])
def test_doubly_proxied_model_queryset(self):
qs = BigCat.objects.all().order_by('type')
self.assertEqual(qs.count(), 2)
self.assertEqual(len(qs), 2)
self.assertEqual([obj.type for obj in qs], ['typedmodels.angrybigcat', 'typedmodels.bigcat'])
self.assertEqual([type(obj) for obj in qs], [AngryBigCat, BigCat])
def test_triply_proxied_model_queryset(self):
qs = AngryBigCat.objects.all().order_by('type')
self.assertEqual(qs.count(), 1)
self.assertEqual(len(qs), 1)
self.assertEqual([obj.type for obj in qs], ['typedmodels.angrybigcat'])
self.assertEqual([type(obj) for obj in qs], [AngryBigCat])
def test_recast_auto(self):
cat = Feline.objects.get(name='kitteh')
cat.type = 'typedmodels.bigcat'
cat.recast()
self.assertEqual(cat.type, 'typedmodels.bigcat')
self.assertEqual(type(cat), BigCat)
def test_recast_string(self):
cat = Feline.objects.get(name='kitteh')
cat.recast('typedmodels.bigcat')
self.assertEqual(cat.type, 'typedmodels.bigcat')
self.assertEqual(type(cat), BigCat)
def test_recast_modelclass(self):
cat = Feline.objects.get(name='kitteh')
cat.recast(BigCat)
self.assertEqual(cat.type, 'typedmodels.bigcat')
self.assertEqual(type(cat), BigCat)
def test_recast_fail(self):
cat = Feline.objects.get(name='kitteh')
self.assertRaises(ValueError, cat.recast, AbstractVegetable)
self.assertRaises(ValueError, cat.recast, 'typedmodels.abstractvegetable')
self.assertRaises(ValueError, cat.recast, Vegetable)
self.assertRaises(ValueError, cat.recast, 'typedmodels.vegetable')
def test_fields_in_subclasses(self):
canine = Canine.objects.all()[0]
angry = AngryBigCat.objects.all()[0]
angry.mice_eaten = 5
angry.save()
self.assertEqual(AngryBigCat.objects.get(pk=angry.pk).mice_eaten, 5)
angry.canines_eaten.add(canine)
self.assertEqual(list(angry.canines_eaten.all()), [canine])
# Feline class was created before Parrot and has mice_eaten field which is non-m2m, so it may break accessing
# known_words field in Parrot instances (since Django 1.5).
parrot = Parrot.objects.all()[0]
parrot.known_words = 500
parrot.save()
self.assertEqual(Parrot.objects.get(pk=parrot.pk).known_words, 500)
def test_fields_cache(self):
mice_eaten = Feline._meta.get_field('mice_eaten')
known_words = Parrot._meta.get_field('known_words')
self.assertIn(mice_eaten, AngryBigCat._meta.fields)
self.assertIn(mice_eaten, Feline._meta.fields)
self.assertNotIn(mice_eaten, Parrot._meta.fields)
self.assertIn(known_words, Parrot._meta.fields)
self.assertNotIn(known_words, AngryBigCat._meta.fields)
self.assertNotIn(known_words, Feline._meta.fields)
def test_m2m_cache(self):
canines_eaten = AngryBigCat._meta.get_field_by_name('canines_eaten')[0]
self.assertIn(canines_eaten, AngryBigCat._meta.many_to_many)
self.assertNotIn(canines_eaten, Feline._meta.many_to_many)
self.assertNotIn(canines_eaten, Parrot._meta.many_to_many)
def test_related_names(self):
'''Ensure that accessor names for reverse relations are generated properly.'''
canine = Canine.objects.all()[0]
self.assertTrue(hasattr(canine, 'angrybigcat_set'))
def test_queryset_defer(self):
"""
Ensure that qs.defer() works correctly
"""
Vegetable.objects.create(name='cauliflower', color='white', yumness=1)
Vegetable.objects.create(name='spinach', color='green', yumness=5)
Vegetable.objects.create(name='sweetcorn', color='yellow', yumness=10)
Fruit.objects.create(name='Apple', color='red', yumness=7)
qs = AbstractVegetable.objects.defer('yumness')
objs = set(qs)
for o in objs:
print(o)
self.assertIsInstance(o, AbstractVegetable)
self.assertTrue(o._deferred)
self.assertIsInstance(o.__class__.__dict__['yumness'], DeferredAttribute)
# does a query, since this field was deferred
self.assertIsInstance(o.yumness, float)
def _check_serialization(self, serialization_format):
"""Helper function used to check serialization and deserialization for concrete format."""
animals = Animal.objects.order_by('pk')
serialized_animals = serializers.serialize(serialization_format, animals)
deserialized_animals = [wrapper.object for wrapper in serializers.deserialize(serialization_format, serialized_animals)]
self.assertEqual(set(deserialized_animals), set(animals))
def test_xml_serialization(self):
self._check_serialization('xml')
def test_json_serialization(self):
self._check_serialization('json')
@unittest.skipUnless(PYYAML_AVAILABLE, 'PyYAML is not available.')
def test_yaml_serialization(self):
self._check_serialization('yaml')
| bsd-3-clause | -235,403,817,046,583,680 | 43.467391 | 189 | 0.666585 | false |
Zogg/Tiltai | tiltai/sdn/docker.py | 1 | 1700 | from tiltai.utils import tiltai_logs_format
import socket
from logbook import Logger, StderrHandler
err = StderrHandler(format_string=tiltai_logs_format)
log = Logger("sdn[docker]")
def dockersdn(queue_name, resolver, storage):
"""
Get addresses and type of the socket from within docker container. A
hostname of the container is used as the identifier to receive network links
definition.
Parameters
----------
queue_name : string
Name of the queue, for which to get network settings
resolver : callable
A `name` -> `network address` mapper. More than likely one of resolvers
provided by `tiltai.sdn` modules
storage : callable
A data backend which provides network mapping: definition of links
between gates. More than likely one of the methods provided by
`tiltai.sdn` modules
Returns
-------
network : dict
A dict of shape `{'endpoints': [], 'type': value}`
"""
with err.applicationbound():
hostname = socket.gethostname()
log.debug('My hostname is: ' + hostname)
links = storage(hostname)
if links:
for link in links['links']:
if link['queue'] == queue_name:
if link.get('outgate', None):
protocolized_nodes = ['tcp://' + address for address in resolver(link['outgate'])]
endpoints = {'endpoints': protocolized_nodes}
else:
endpoints = {'endpoints': link.get('addresses', [])}
if link.get('type', None):
endpoints['type'] = link['type']
log.debug('Topology resolved to ip addresses: ' + str(endpoints))
return endpoints
return {'endpoints': []}
| gpl-3.0 | -5,203,554,288,579,424,000 | 29.357143 | 94 | 0.63 | false |
BFH-BTI7301-project1/ClockAlarm | _clockalarm/UI/NotificationWidget.py | 1 | 3442 | # ClockAlarm is a cross-platform alarm manager
# Copyright (C) 2017 Loïc Charrière, Samuel Gauthier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import join, abspath, dirname
from PyQt5.QtCore import Qt, pyqtSignal, QRect
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget, QLabel
from _clockalarm.utils.importExportUtils import get_default_config
class NotificationWidget(QWidget):
"""Notification widget
Attributes:
geometry: The position and size of the widget on the screen
notification: The notification
"""
popup_close = pyqtSignal('PyQt_PyObject')
def __init__(self, geometry, notification, parent=None):
"""Inits the NotificationWidget with a position and a notification.
Note:
By default, the NotificationWidget has no parent
"""
super(NotificationWidget, self).__init__(parent=parent,
flags=Qt.Tool | Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
self.parent = parent
self.notification = notification
self.init_ui(geometry)
def init_ui(self, geom):
"""Helper method that sets the style of the NotificationWidget.
Attributes:
geom: The position and size of the widget on the screen
"""
self.setGeometry(geom)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setAttribute(Qt.WA_ShowWithoutActivating)
"""Background Image"""
im_name = get_default_config("WIDGET_FILE_NAME")
im_path = join(dirname(dirname(abspath(__file__))), 'resources', 'images',
im_name)
lbl_im = QLabel(self)
lbl_im.setPixmap(QPixmap(im_path))
"""Notification message"""
color = self.notification.get_color()
alpha = get_default_config("WIDGET_TRANSPARENCY", "int")
rgba = "{r}, {g}, {b}, {a}".format(r=color.red(), g=color.green(), b=color.blue(), a=alpha)
lbl = QLabel(self.notification.message, self)
lbl.setAlignment(Qt.AlignVCenter)
lbl.setWordWrap(True)
padding_top = get_default_config("WIDGET_TEXT_PADDING_TOP", "int")
padding_left = get_default_config("WIDGET_TEXT_PADDING_LEFT", "int")
text_width = get_default_config("WIDGET_TEXT_WIDTH", "int")
text_height = get_default_config("WIDGET_TEXT_HEIGHT", "int")
lbl.setGeometry(QRect(padding_left, padding_top, text_width, text_height))
lbl.setFont(self.notification.get_font())
lbl.setStyleSheet(
"QLabel { color : rgba(" + rgba + ")}")
def mousePressEvent(self, event):
"""Override of :class:~PyQt5.QtWidgets.QWidget.mousePressEvent method"""
if self.underMouse():
self.close()
self.popup_close.emit(self)
| gpl-3.0 | 9,079,899,574,267,566,000 | 39.952381 | 114 | 0.65843 | false |
takus/dd-agent | tests/checks/integration/test_php_fpm.py | 1 | 2958 | # 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
# sample from /status?json
# {
# "accepted conn": 350,
# "active processes": 1,
# "idle processes": 2,
# "listen queue": 0,
# "listen queue len": 0,
# "max active processes": 2,
# "max children reached": 0,
# "max listen queue": 0,
# "pool": "www",
# "process manager": "dynamic",
# "slow requests": 0,
# "start since": 4758,
# "start time": 1426601833,
# "total processes": 3
# }
@attr(requires='phpfpm')
class PHPFPMCheckTest(AgentCheckTest):
CHECK_NAME = 'php_fpm'
def test_bad_status(self):
instance = {
'status_url': 'http://localhost:9001/status',
'tags': ['expectedbroken']
}
self.assertRaises(Exception, self.run_check, {'instances': [instance]})
def test_bad_ping(self):
instance = {
'ping_url': 'http://localhost:9001/status',
'tags': ['expectedbroken']
}
self.run_check({'instances': [instance]})
self.assertServiceCheck(
'php_fpm.can_ping',
status=AgentCheck.CRITICAL,
tags=['ping_url:http://localhost:9001/status', 'expectedbroken'],
count=1
)
self.coverage_report()
def test_bad_ping_reply(self):
instance = {
'ping_url': 'http://localhost:42424/ping',
'ping_reply': 'blah',
'tags': ['expectedbroken']
}
self.run_check({'instances': [instance]})
self.assertServiceCheck(
'php_fpm.can_ping',
status=AgentCheck.CRITICAL,
tags=['ping_url:http://localhost:42424/ping', 'expectedbroken'],
count=1
)
self.coverage_report()
def test_status(self):
instance = {
'status_url': 'http://localhost:42424/status',
'ping_url': 'http://localhost:42424/ping',
'tags': ['cluster:forums']
}
self.run_check_twice({'instances': [instance]})
metrics = [
'php_fpm.listen_queue.size',
'php_fpm.processes.idle',
'php_fpm.processes.active',
'php_fpm.processes.total',
'php_fpm.requests.slow',
'php_fpm.requests.accepted',
]
expected_tags = ['cluster:forums', 'pool:www']
for mname in metrics:
self.assertMetric(mname, count=1, tags=expected_tags)
self.assertMetric('php_fpm.processes.idle', count=1, value=1)
self.assertMetric('php_fpm.processes.total', count=1, value=2)
self.assertServiceCheck('php_fpm.can_ping', status=AgentCheck.OK,
count=1,
tags=['ping_url:http://localhost:42424/ping', 'cluster:forums'])
self.assertMetric('php_fpm.processes.max_reached', count=1)
| bsd-3-clause | -3,591,520,637,008,432,600 | 28 | 96 | 0.55071 | false |
AdamsLee/mongo-connector | mongo_connector/connector.py | 1 | 31437 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discovers the mongo cluster and starts the connector.
"""
import json
import logging
import logging.handlers
import optparse
import os
import pymongo
import re
import shutil
import sys
import threading
import time
import imp
from mongo_connector import constants, errors, util
from mongo_connector.locking_dict import LockingDict
from mongo_connector.oplog_manager import OplogThread
from mongo_connector.doc_managers import doc_manager_simulator as simulator
from pymongo import MongoClient
class Connector(threading.Thread):
"""Checks the cluster for shards to tail.
"""
def __init__(self, address, oplog_checkpoint, target_url, ns_set,
u_key, auth_key, doc_manager=None, auth_username=None,
collection_dump=True, batch_size=constants.DEFAULT_BATCH_SIZE,
fields=None, dest_mapping={},
auto_commit_interval=constants.DEFAULT_COMMIT_INTERVAL):
if target_url and not doc_manager:
raise errors.ConnectorError("Cannot create a Connector with a "
"target URL but no doc manager!")
def is_string(s):
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def load_doc_manager(path):
name, _ = os.path.splitext(os.path.basename(path))
try:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(name, path)
module = loader.load_module(name)
except ImportError:
module = imp.load_source(name, path)
return module
doc_manager_modules = None
if doc_manager is not None:
# backwards compatilibity: doc_manager may be a string
if is_string(doc_manager):
doc_manager_modules = [load_doc_manager(doc_manager)]
# doc_manager is a list
else:
doc_manager_modules = []
for dm in doc_manager:
doc_manager_modules.append(load_doc_manager(dm))
super(Connector, self).__init__()
#can_run is set to false when we join the thread
self.can_run = True
#The name of the file that stores the progress of the OplogThreads
self.oplog_checkpoint = oplog_checkpoint
#main address - either mongos for sharded setups or a primary otherwise
self.address = address
#The URLs of each target system, respectively
if is_string(target_url):
self.target_urls = [target_url]
elif target_url:
self.target_urls = list(target_url)
else:
self.target_urls = None
#The set of relevant namespaces to consider
self.ns_set = ns_set
#The dict of source namespace to destination namespace
self.dest_mapping = dest_mapping
#The key that is a unique document identifier for the target system.
#Not necessarily the mongo unique key.
self.u_key = u_key
#Password for authentication
self.auth_key = auth_key
#Username for authentication
self.auth_username = auth_username
#The set of OplogThreads created
self.shard_set = {}
#Boolean chooses whether to dump the entire collection if no timestamp
# is present in the config file
self.collection_dump = collection_dump
#Num entries to process before updating config file with current pos
self.batch_size = batch_size
#Dict of OplogThread/timestamp pairs to record progress
self.oplog_progress = LockingDict()
# List of fields to export
self.fields = fields
try:
docman_kwargs = {"unique_key": u_key,
"namespace_set": ns_set,
"auto_commit_interval": auto_commit_interval}
# No doc managers specified, using simulator
if doc_manager is None:
self.doc_managers = [simulator.DocManager(**docman_kwargs)]
else:
self.doc_managers = []
for i, d in enumerate(doc_manager_modules):
# self.target_urls may be shorter than
# self.doc_managers, or left as None
if self.target_urls and i < len(self.target_urls):
target_url = self.target_urls[i]
else:
target_url = None
if target_url:
self.doc_managers.append(
d.DocManager(self.target_urls[i],
**docman_kwargs))
else:
self.doc_managers.append(
d.DocManager(**docman_kwargs))
# If more target URLs were given than doc managers, may need
# to create additional doc managers
for url in self.target_urls[i + 1:]:
self.doc_managers.append(
doc_manager_modules[-1].DocManager(url,
**docman_kwargs))
except errors.ConnectionFailed:
err_msg = "MongoConnector: Could not connect to target system"
logging.critical(err_msg)
self.can_run = False
return
if self.oplog_checkpoint is not None:
if not os.path.exists(self.oplog_checkpoint):
info_str = ("MongoConnector: Can't find %s, "
"attempting to create an empty progress log" %
self.oplog_checkpoint)
logging.info(info_str)
try:
# Create oplog progress file
open(self.oplog_checkpoint, "w").close()
except IOError as e:
logging.critical("MongoConnector: Could not "
"create a progress log: %s" %
str(e))
sys.exit(2)
else:
if (not os.access(self.oplog_checkpoint, os.W_OK)
and not os.access(self.oplog_checkpoint, os.R_OK)):
logging.critical("Invalid permissions on %s! Exiting" %
(self.oplog_checkpoint))
sys.exit(2)
def join(self):
""" Joins thread, stops it from running
"""
self.can_run = False
for dm in self.doc_managers:
dm.stop()
threading.Thread.join(self)
def write_oplog_progress(self):
""" Writes oplog progress to file provided by user
"""
if self.oplog_checkpoint is None:
return None
# write to temp file
backup_file = self.oplog_checkpoint + '.backup'
os.rename(self.oplog_checkpoint, backup_file)
# for each of the threads write to file
with open(self.oplog_checkpoint, 'w') as dest:
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
for oplog, time_stamp in oplog_dict.items():
oplog_str = str(oplog)
timestamp = util.bson_ts_to_long(time_stamp)
json_str = json.dumps([oplog_str, timestamp])
try:
dest.write(json_str)
except IOError:
# Basically wipe the file, copy from backup
dest.truncate()
with open(backup_file, 'r') as backup:
shutil.copyfile(backup, dest)
break
os.remove(self.oplog_checkpoint + '.backup')
def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
logging.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
source = open(self.oplog_checkpoint, 'r')
try:
data = json.load(source)
except ValueError: # empty file
reason = "It may be empty or corrupt."
logging.info("MongoConnector: Can't read oplog progress file. %s" %
(reason))
source.close()
return None
source.close()
count = 0
oplog_dict = self.oplog_progress.get_dict()
for count in range(0, len(data), 2):
oplog_str = data[count]
time_stamp = data[count + 1]
oplog_dict[oplog_str] = util.long_to_bson_ts(time_stamp)
#stored as bson_ts
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
main_conn = MongoClient(self.address)
if self.auth_key is not None:
main_conn['admin'].authenticate(self.auth_username, self.auth_key)
self.read_oplog_progress()
conn_type = None
try:
main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
# Make sure we are connected to a replica set
is_master = main_conn.admin.command("isMaster")
if not "setName" in is_master:
logging.error(
'No replica set at "%s"! A replica set is required '
'to run mongo-connector. Shutting down...' % self.address
)
return
# Establish a connection to the replica set as a whole
main_conn.disconnect()
main_conn = MongoClient(self.address,
replicaSet=is_master['setName'])
if self.auth_key is not None:
main_conn.admin.authenticate(self.auth_username, self.auth_key)
#non sharded configuration
oplog_coll = main_conn['local']['oplog.rs']
oplog = OplogThread(
primary_conn=main_conn,
main_address=self.address,
oplog_coll=oplog_coll,
is_sharded=False,
doc_manager=self.doc_managers,
oplog_progress_dict=self.oplog_progress,
namespace_set=self.ns_set,
auth_key=self.auth_key,
auth_username=self.auth_username,
repl_set=is_master['setName'],
collection_dump=self.collection_dump,
batch_size=self.batch_size,
fields=self.fields,
dest_mapping=self.dest_mapping
)
self.shard_set[0] = oplog
logging.info('MongoConnector: Starting connection thread %s' %
main_conn)
oplog.start()
while self.can_run:
if not self.shard_set[0].running:
logging.error("MongoConnector: OplogThread"
" %s unexpectedly stopped! Shutting down" %
(str(self.shard_set[0])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run is True:
for shard_doc in main_conn['config']['shards'].find():
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
if not self.shard_set[shard_id].running:
logging.error("MongoConnector: OplogThread "
"%s unexpectedly stopped! Shutting "
"down" %
(str(self.shard_set[shard_id])))
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
logging.error("MongoConnector: %s", cause)
self.oplog_thread_join()
for dm in self.doc_managers:
dm.stop()
return
shard_conn = MongoClient(hosts, replicaSet=repl_set)
oplog_coll = shard_conn['local']['oplog.rs']
oplog = OplogThread(
primary_conn=shard_conn,
main_address=self.address,
oplog_coll=oplog_coll,
is_sharded=True,
doc_manager=self.doc_managers,
oplog_progress_dict=self.oplog_progress,
namespace_set=self.ns_set,
auth_key=self.auth_key,
auth_username=self.auth_username,
collection_dump=self.collection_dump,
batch_size=self.batch_size,
fields=self.fields,
dest_mapping=self.dest_mapping
)
self.shard_set[shard_id] = oplog
msg = "Starting connection thread"
logging.info("MongoConnector: %s %s" % (msg, shard_conn))
oplog.start()
self.oplog_thread_join()
self.write_oplog_progress()
def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
logging.info('MongoConnector: Stopping all OplogThreads')
for thread in self.shard_set.values():
thread.join()
def main():
""" Starts the mongo connector (assuming CLI)
"""
parser = optparse.OptionParser()
#-m is for the main address, which is a host:port pair, ideally of the
#mongos. For non sharded clusters, it can be the primary.
parser.add_option("-m", "--main", action="store", type="string",
dest="main_addr", default="localhost:27217",
help="""Specify the main address, which is a"""
""" host:port pair. For sharded clusters, this"""
""" should be the mongos address. For individual"""
""" replica sets, supply the address of the"""
""" primary. For example, `-m localhost:27217`"""
""" would be a valid argument to `-m`. Don't use"""
""" quotes around the address.""")
#-o is to specify the oplog-config file. This file is used by the system
#to store the last timestamp read on a specific oplog. This allows for
#quick recovery from failure.
parser.add_option("-o", "--oplog-ts", action="store", type="string",
dest="oplog_config", default="config.txt",
help="""Specify the name of the file that stores the """
"""oplog progress timestamps. """
"""This file is used by the system to store the last """
"""timestamp read on a specific oplog. This allows """
"""for quick recovery from failure. By default this """
"""is `config.txt`, which starts off empty. An empty """
"""file causes the system to go through all the mongo """
"""oplog and sync all the documents. Whenever the """
"""cluster is restarted, it is essential that the """
"""oplog-timestamp config file be emptied - otherwise """
"""the connector will miss some documents and behave """
"""incorrectly.""")
#--no-dump specifies whether we should read an entire collection from
#scratch if no timestamp is found in the oplog_config.
parser.add_option("--no-dump", action="store_true", default=False, help=
"If specified, this flag will ensure that "
"mongo_connector won't read the entire contents of a "
"namespace iff --oplog-ts points to an empty file.")
#--batch-size specifies num docs to read from oplog before updating the
#--oplog-ts config file with current oplog position
parser.add_option("--batch-size", action="store",
default=constants.DEFAULT_BATCH_SIZE, type="int",
help="Specify an int to update the --oplog-ts "
"config file with latest position of oplog every "
"N documents. By default, the oplog config isn't "
"updated until we've read through the entire oplog. "
"You may want more frequent updates if you are at risk "
"of falling behind the earliest timestamp in the oplog")
#-t is to specify the URL to the target system being used.
parser.add_option("-t", "--target-url", "--target-urls", action="store",
type="string", dest="urls", default=None, help=
"""Specify the URL to each target system being """
"""used. For example, if you were using Solr out of """
"""the box, you could use '-t """
"""http://localhost:8080/solr' with the """
"""SolrDocManager to establish a proper connection. """
"""URLs should be specified in the same order as """
"""their respective doc managers in the """
"""--doc-managers option. URLs are assigned to doc """
"""managers respectively. Additional doc managers """
"""are implied to have no target URL. Additional """
"""URLs are implied to have the same doc manager """
"""type as the last doc manager for which a URL was """
"""specified. """
"""Don't use quotes around addresses. """)
#-n is to specify the namespaces we want to consider. The default
#considers all the namespaces
parser.add_option("-n", "--namespace-set", action="store", type="string",
dest="ns_set", default=None, help=
"""Used to specify the namespaces we want to """
"""consider. For example, if we wished to store all """
"""documents from the test.test and alpha.foo """
"""namespaces, we could use `-n test.test,alpha.foo`. """
"""The default is to consider all the namespaces, """
"""excluding the system and config databases, and """
"""also ignoring the "system.indexes" collection in """
"""any database.""")
#-u is to specify the mongoDB field that will serve as the unique key
#for the target system,
parser.add_option("-u", "--unique-key", action="store", type="string",
dest="u_key", default="_id", help=
"""The name of the MongoDB field that will serve """
"""as the unique key for the target system. """
"""Note that this option does not apply """
"""when targeting another MongoDB cluster. """
"""Defaults to "_id".""")
#-f is to specify the authentication key file. This file is used by mongos
#to authenticate connections to the shards, and we'll use it in the oplog
#threads.
parser.add_option("-f", "--password-file", action="store", type="string",
dest="auth_file", default=None, help=
"""Used to store the password for authentication."""
""" Use this option if you wish to specify a"""
""" username and password but don't want to"""
""" type in the password. The contents of this"""
""" file should be the password for the admin user.""")
#-p is to specify the password used for authentication.
parser.add_option("-p", "--password", action="store", type="string",
dest="password", default=None, help=
"""Used to specify the password."""
""" This is used by mongos to authenticate"""
""" connections to the shards, and in the"""
""" oplog threads. If authentication is not used, then"""
""" this field can be left empty as the default """)
#-a is to specify the username for authentication.
parser.add_option("-a", "--admin-username", action="store", type="string",
dest="admin_name", default="__system", help=
"""Used to specify the username of an admin user to """
"""authenticate with. To use authentication, the user """
"""must specify both an admin username and a keyFile. """
"""The default username is '__system'""")
#-d is to specify the doc manager file.
parser.add_option("-d", "--docManager", "--doc-managers", action="store",
type="string", dest="doc_managers", default=None, help=
"""Used to specify the path to each doc manager """
"""file that will be used. DocManagers should be """
"""specified in the same order as their respective """
"""target addresses in the --target-urls option. """
"""URLs are assigned to doc managers """
"""respectively. Additional doc managers are """
"""implied to have no target URL. Additional URLs """
"""are implied to have the same doc manager type as """
"""the last doc manager for which a URL was """
"""specified. By default, Mongo Connector will use """
"""'doc_manager_simulator.py'. It is recommended """
"""that all doc manager files be kept in the """
"""doc_managers folder in mongo-connector. For """
"""more information about making your own doc """
"""manager, see 'Writing Your Own DocManager' """
"""section of the wiki""")
#-g is the destination namespace
parser.add_option("-g", "--dest-namespace-set", action="store",
type="string", dest="dest_ns_set", default=None, help=
"""Specify a destination namespace mapping. Each """
"""namespace provided in the --namespace-set option """
"""will be mapped respectively according to this """
"""comma-separated list. These lists must have """
"""equal length. The default is to use the identity """
"""mapping. This is currently only implemented """
"""for mongo-to-mongo connections.""")
#-s is to enable syslog logging.
parser.add_option("-s", "--enable-syslog", action="store_true",
dest="enable_syslog", default=False, help=
"""Used to enable logging to syslog."""
""" Use -l to specify syslog host.""")
#--syslog-host is to specify the syslog host.
parser.add_option("--syslog-host", action="store", type="string",
dest="syslog_host", default="localhost:514", help=
"""Used to specify the syslog host."""
""" The default is 'localhost:514'""")
#--syslog-facility is to specify the syslog facility.
parser.add_option("--syslog-facility", action="store", type="string",
dest="syslog_facility", default="user", help=
"""Used to specify the syslog facility."""
""" The default is 'user'""")
#-i to specify the list of fields to export
parser.add_option("-i", "--fields", action="store", type="string",
dest="fields", default=None, help=
"""Used to specify the list of fields to export. """
"""Specify a field or fields to include in the export. """
"""Use a comma separated list of fields to specify multiple """
"""fields. The '_id', 'ns' and '_ts' fields are always """
"""exported.""")
#--auto-commit-interval to specify auto commit time interval
parser.add_option("--auto-commit-interval", action="store",
dest="commit_interval", type="int",
default=constants.DEFAULT_COMMIT_INTERVAL,
help="""Seconds in-between calls for the Doc Manager"""
""" to commit changes to the target system. A value of"""
""" 0 means to commit after every write operation."""
""" When left unset, Mongo Connector will not make"""
""" explicit commits. Some systems have"""
""" their own mechanism for adjusting a commit"""
""" interval, which should be preferred to this"""
""" option.""")
#-v enables vebose logging
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Sets verbose logging to be on.")
#-w enable logging to a file
parser.add_option("-w", "--logfile", dest="logfile",
help=("Log all output to a file rather than stream to "
"stderr. Omit to stream to stderr."))
(options, args) = parser.parse_args()
logger = logging.getLogger()
loglevel = logging.INFO
if options.verbose:
loglevel = logging.DEBUG
logger.setLevel(loglevel)
if options.enable_syslog and options.logfile:
print ("You cannot specify syslog and a logfile simultaneously, please"
" choose the logging method you would prefer.")
sys.exit(1)
if options.enable_syslog:
syslog_info = options.syslog_host.split(":")
syslog_host = logging.handlers.SysLogHandler(
address=(syslog_info[0], int(syslog_info[1])),
facility=options.syslog_facility
)
syslog_host.setLevel(loglevel)
logger.addHandler(syslog_host)
elif options.logfile is not None:
log_out = logging.FileHandler(options.logfile)
log_out.setLevel(loglevel)
log_out.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(log_out)
else:
log_out = logging.StreamHandler()
log_out.setLevel(loglevel)
log_out.setFormatter(logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(log_out)
logger.info('Beginning Mongo Connector')
# Get DocManagers and target URLs
# Each DocManager is assigned the respective (same-index) target URL
# Additional DocManagers may be specified that take no target URL
doc_managers = options.doc_managers
doc_managers = doc_managers.split(",") if doc_managers else doc_managers
target_urls = options.urls.split(",") if options.urls else None
if options.doc_managers is None:
logger.info('No doc managers specified, using simulator.')
if options.ns_set is None:
ns_set = []
else:
ns_set = options.ns_set.split(',')
if options.dest_ns_set is None:
dest_ns_set = ns_set
else:
dest_ns_set = options.dest_ns_set.split(',')
if len(dest_ns_set) != len(ns_set):
logger.error("Destination namespace must be the same length as the "
"origin namespace!")
sys.exit(1)
elif len(set(ns_set)) + len(set(dest_ns_set)) != 2 * len(ns_set):
logger.error("Namespace set and destination namespace set should not "
"contain any duplicates!")
sys.exit(1)
else:
## Create a mapping of source ns to dest ns as a dict
dest_mapping = dict(zip(ns_set, dest_ns_set))
fields = options.fields
if fields is not None:
fields = options.fields.split(',')
key = None
if options.auth_file is not None:
try:
key = open(options.auth_file).read()
re.sub(r'\s', '', key)
except IOError:
logger.error('Could not parse password authentication file!')
sys.exit(1)
if options.password is not None:
key = options.password
if key is None and options.admin_name != "__system":
logger.error("Admin username specified without password!")
sys.exit(1)
if options.commit_interval is not None and options.commit_interval < 0:
raise ValueError("--auto-commit-interval must be non-negative")
connector = Connector(
address=options.main_addr,
oplog_checkpoint=options.oplog_config,
target_url=target_urls,
ns_set=ns_set,
u_key=options.u_key,
auth_key=key,
doc_manager=doc_managers,
auth_username=options.admin_name,
collection_dump=(not options.no_dump),
batch_size=options.batch_size,
fields=fields,
dest_mapping=dest_mapping,
auto_commit_interval=options.commit_interval
)
connector.start()
while True:
try:
time.sleep(3)
if not connector.is_alive():
break
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt, exiting!")
connector.join()
break
if __name__ == '__main__':
main()
| apache-2.0 | -3,153,926,013,907,341,300 | 42.967832 | 85 | 0.533193 | false |
willzfarmer/HadoopInspector | hadoopinspector/tests/test_registry.py | 1 | 12740 | #!/usr/bin/env python2
"""
This source code is protected by the BSD license. See the file "LICENSE"
in the source code root directory for the full language or refer to it here:
http://opensource.org/licenses/BSD-3-Clause
Copyright 2015, 2016 Will Farmer and Ken Farmer
"""
from __future__ import division
import sys, os, shutil
import logging
import tempfile, json
from pprint import pprint as pp
from os.path import exists, isdir, isfile
from os.path import join as pjoin
from os.path import dirname
import pytest
sys.path.insert(0, dirname(dirname(dirname(os.path.abspath(__file__)))))
sys.path.insert(0, dirname(dirname(os.path.abspath(__file__))))
import hadoopinspector.registry as mod
logging.basicConfig()
class TestRegistry(object):
def setup_method(self, method):
self.temp_dir = tempfile.mkdtemp(prefix='hadinsp_')
def teardown_method(self, method):
shutil.rmtree(self.temp_dir)
def test_loading_good_registry(self):
good_data = { "asset": {
"rule_pk1": {
"check_type": "rule",
"check_name": "rule_uniqueness",
"check_mode": "full",
"check_scope": "row",
"check_status": "active",
"hapinsp_checkcustom_cols": 999
}
},
"cust": {
"rule_pk1": {
"check_type": "rule",
"check_name": "rule_uniqueness",
"check_mode": "full",
"check_scope": "row",
"check_status": "active",
"hapinsp_checkcustom_cols": 999
}
}
}
with open(pjoin(self.temp_dir, 'registry.json'), 'w') as f:
json.dump(good_data, f)
reg = mod.Registry()
reg.load_registry(pjoin(self.temp_dir, 'registry.json'))
#reg.validate_file(pjoin(self.temp_dir, 'registry.json'))
reg.validate_file(pjoin(self.temp_dir))
def test_loading_bad_registry_extra_comma(self):
# extra comma before last field in registry
bad_data = ("""{"asset": {"rule_pk1": {"check_type": "rule", """
""" "check_name": "rule_uniqueness", "check_mode": "full", "check_scope": "row", """
""" "hapinsp_checkcustom_cols": 999, "check_status": "active"}}, """
""" "cust": {"rule_pk1": {"check_type": "rule", "check_name": "rule_uniqueness", """
""" "check_mode": "full", "check_scope": "row", "hapinsp_checkcustom_cols": 999,, """
""" "check_status": "active"}}} """)
with open(pjoin(self.temp_dir, 'registry.json'), 'w') as f:
f.write(bad_data)
reg = mod.Registry()
with pytest.raises(SystemExit):
reg.load_registry(pjoin(self.temp_dir, 'registry.json'))
def test_loading_bad_registry_unquoted_key(self):
# check_status at end of registry is unquoted
bad_data = ("""{"asset": {"rule_pk1": {"check_type": "rule", """
""" "check_name": "rule_uniqueness", "check_mode": "full", "check_scope": "row", """
""" "hapinsp_checkcustom_cols": 999, "check_status": "active"}}, """
""" "cust": {"rule_pk1": {"check_type": "rule", "check_name": "rule_uniqueness", """
""" "check_mode": "full", "check_scope": "row", "hapinsp_checkcustom_cols": 999, """
""" check_status: "active"}}} """)
with open(pjoin(self.temp_dir, 'registry.json'), 'w') as f:
f.write(bad_data)
reg = mod.Registry()
with pytest.raises(SystemExit):
reg.load_registry(pjoin(self.temp_dir, 'registry.json'))
def test_loading_bad_registry_missing_brace(self):
# check_status at end of registry is unquoted
bad_data = ("""{"asset": {"rule_pk1": {"check_type": "rule", """
""" "check_name": "rule_uniqueness", "check_mode": "full", "check_scope": "row", """
""" "hapinsp_checkcustom_cols": 999, "check_status": "active"}}, """
""" "cust": {"rule_pk1": {"check_type": "rule", "check_name": "rule_uniqueness", """
""" "check_mode": "full", "check_scope": "row", "hapinsp_checkcustom_cols": 999, """
""" "check_status": "active"}} """)
with open(pjoin(self.temp_dir, 'registry.json'), 'w') as f:
f.write(bad_data)
reg = mod.Registry()
with pytest.raises(SystemExit):
reg.load_registry(pjoin(self.temp_dir, 'registry.json'))
def test_creating_then_writing(self):
reg = mod.Registry()
reg.add_table('asset')
reg.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness',
check_status='active',
check_type='rule',
check_mode='full',
check_scope='row')
reg.write(pjoin(self.temp_dir, 'registry.json'))
reg.validate_file(pjoin(self.temp_dir, 'registry.json'))
#with open(pjoin(self.temp_dir, 'registry.json')) as f:
# pp(f.read())
def test_creating_then_loading(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness',
check_status='active',
check_type='rule',
check_mode='full',
check_scope='row')
reg1.write(pjoin(self.temp_dir, 'registry.json'))
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
reg2 = mod.Registry()
reg2.load_registry(pjoin(self.temp_dir, 'registry.json'))
assert reg1.registry == reg2.registry
def test_validating_bad_check(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness',
check_status='active',
check_type='rule',
check_mode='fullish', # this is bad!
check_scope='row')
reg1.write(pjoin(self.temp_dir, 'registry.json'))
with pytest.raises(SystemExit):
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
def test_validating_bad_setup(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness',
check_status='active',
check_type='setup',
check_mode='full', # this is bad! should be None
check_scope='row') # this is bad! should be None
reg1.write(pjoin(self.temp_dir, 'registry.json'))
with pytest.raises(SystemExit):
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
def test_creating_then_loading_with_setup_and_teardown(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'asset_setup',
check_name='asset_setup', check_status='active',
check_type='setup', check_mode=None, check_scope=None)
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.add_check('asset', 'teardown',
check_name='asset_teardown', check_status='active',
check_type='setup', check_mode=None, check_scope=None)
reg1.write(pjoin(self.temp_dir, 'registry.json'))
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
reg2 = mod.Registry()
reg2.load_registry(pjoin(self.temp_dir, 'registry.json'))
assert reg1.registry == reg2.registry
def test_filter_registry_no_args(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'asset_setup',
check_name='asset_setup', check_status='active',
check_type='setup', check_mode=None, check_scope=None)
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.add_check('asset', 'teardown',
check_name='asset_teardown', check_status='active',
check_type='setup', check_mode=None, check_scope=None)
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
reg1.filter_registry()
assert 'check_name' in reg1.registry['asset']['rule_pk1']
def test_filter_registry_with_table(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.add_table('cust')
reg1.add_check('cust', 'rule_pk1',
check_name='rule_uniqueness', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
reg1.filter_registry('cust')
assert 'cust' in reg1.registry
assert 'rule_pk1' in reg1.registry['cust']
assert 'asset' not in reg1.registry
def test_filter_registry_with_check(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'asset_setup',
check_name='asset_setup', check_status='active',
check_type='setup', check_mode=None, check_scope=None)
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.add_check('asset', 'rule_fk1',
check_name='rule_foreign_key', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.add_check('asset', 'teardown',
check_name='asset_teardown', check_status='active',
check_type='setup', check_mode=None, check_scope=None)
reg1.add_table('cust')
reg1.add_check('cust', 'rule_pk1',
check_name='rule_uniqueness', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
reg1.filter_registry(None, 'rule_pk1')
assert 'cust' in reg1.registry
assert 'rule_pk1' in reg1.registry['cust']
assert 'asset' in reg1.registry
assert 'rule_pk1' in reg1.registry['asset']
assert 'rule_fk1' not in reg1.registry['asset']
def test_filter_registry_with_table_and_check(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'asset_setup',
check_name='asset_setup', check_status='active',
check_type='setup', check_mode=None, check_scope=None)
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.add_check('asset', 'rule_fk1',
check_name='rule_foreign_key', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.add_check('asset', 'teardown',
check_name='asset_teardown', check_status='active',
check_type='setup', check_mode=None, check_scope=None)
reg1.add_table('cust')
reg1.add_check('cust', 'rule_pk1',
check_name='rule_uniqueness', check_status='active',
check_type='rule', check_mode='full', check_scope='row')
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
reg1.filter_registry('cust', 'rule_pk1')
assert 'cust' in reg1.registry
assert 'rule_pk1' in reg1.registry['cust']
assert 'asset' not in reg1.registry
def test_validation(self):
reg1 = mod.Registry()
with open(pjoin(self.temp_dir, 'registry.json'), 'w') as f:
f.write('im an invalid data structure')
with pytest.raises(SystemExit):
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
def test_validating_checkvars(self):
reg1 = mod.Registry()
reg1.add_table('asset')
reg1.add_check('asset', 'rule_pk1',
check_name='rule_uniqueness',
check_status='active',
check_type='setup',
check_mode=None, # this is bad! should be None
check_scope=None, # this is bad! should be None
hapinsp_checkcustom_foo='bar')
reg1.write(pjoin(self.temp_dir, 'registry.json'))
reg1.validate_file(pjoin(self.temp_dir, 'registry.json'))
| bsd-3-clause | 901,230,051,959,237,400 | 42.040541 | 105 | 0.569937 | false |
v4hn/ecto | test/scripts/test_tendrils.py | 1 | 2868 | #!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto
import ecto.ecto_test as ecto_test
def test_tendrils():
t = ecto.Tendrils()
t.declare("Hello","doc str",6)
assert t.Hello == 6
assert t["Hello"] == 6
t.declare("x","a number", "str")
assert len(t) == 2
assert t["x"] == "str"
assert t.x == "str"
#test the redeclare
try:
t.declare("Hello","new doc", "you")
util.fail()
except ecto.TendrilRedeclaration, e:
print str(e)
assert('TendrilRedeclaration' in str(e))
try:
#read error
t.nonexistant = 1
util.fail()
except ecto.NonExistant, e:
print str(e)
assert "tendril_key nonexistant" in str(e)
try:
#index error
print t["nonexistant"]
util.fail()
except ecto.NonExistant, e:
print str(e)
assert "tendril_key nonexistant" in str(e)
assert len(t.keys()) == 2
assert len(t.values()) == 2
print t
#by value
_x = t.x
_x = 10
assert t.x != 10
x = t.x
t.x = 11
assert x != 11
#by reference
x = t.at("x")
t.x = 13
assert x.val == 13
t.x = 17
assert t.x == 17
t.x = 199
t.x = 15
print t.x
assert t.x == 15
if __name__ == '__main__':
test_tendrils()
| bsd-3-clause | 3,189,837,938,713,107,000 | 31.590909 | 77 | 0.656206 | false |
lazuxd/teste-admitere-snpap | slidingpanel.py | 1 | 3425 | # -*- coding: utf-8 -*-
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import OptionProperty, NumericProperty, StringProperty, \
BooleanProperty, ListProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.relativelayout import RelativeLayout
Builder.load_string("""
#: import Window kivy.core.window.Window
<SlidingPanel>
orientation: 'vertical'
size_hint_x: None
width: Window.width - dp(56) if Window.width - dp(56) < dp(320) else dp(320)
x: -1 * self.width if self.side == 'left' else Window.width
<PanelShadow>
canvas:
Color:
rgba: root.color
Rectangle:
size: root.size
""")
class PanelShadow(BoxLayout):
color = ListProperty([0, 0, 0, 0])
class SlidingPanel(BoxLayout):
anim_length_close = NumericProperty(0.3)
anim_length_open = NumericProperty(0.3)
animation_t_open = StringProperty('out_sine')
animation_t_close = StringProperty('out_sine')
side = OptionProperty('left', options=['left', 'right'])
_open = False
def __init__(self, **kwargs):
super(SlidingPanel, self).__init__(**kwargs)
self.width = Window.width - dp(56) if Window.width - dp(56) < dp(320) else dp(320)
self.shadow = PanelShadow()
Clock.schedule_once(lambda x: Window.add_widget(self.shadow,89), 0)
Clock.schedule_once(lambda x: Window.add_widget(self,90), 0)
def toggle(self):
Animation.stop_all(self, 'x')
Animation.stop_all(self.shadow, 'color')
if self._open:
if self.side == 'left':
target_x = -1 * self.width
else:
target_x = Window.width
sh_anim = Animation(duration=self.anim_length_open,
t=self.animation_t_open,
color=[0, 0, 0, 0])
sh_anim.start(self.shadow)
self._get_main_animation(duration=self.anim_length_close,
t=self.animation_t_close,
x=target_x,
is_closing=True).start(self)
self._open = False
else:
if self.side == 'left':
target_x = 0
else:
target_x = Window.width - self.width
Animation(duration=self.anim_length_open, t=self.animation_t_open,
color=[0, 0, 0, 0.5]).start(self.shadow)
self._get_main_animation(duration=self.anim_length_open,
t=self.animation_t_open,
x=target_x,
is_closing=False).start(self)
self._open = True
def _get_main_animation(self, duration, t, x, is_closing):
return Animation(duration=duration, t=t, x=x)
def on_touch_down(self, touch):
# Prevents touch events from propagating to anything below the widget.
super(SlidingPanel, self).on_touch_down(touch)
if self.collide_point(*touch.pos) or self._open:
return True
def on_touch_up(self, touch):
super(SlidingPanel, self).on_touch_up(touch)
if not self.collide_point(touch.x, touch.y) and self._open:
self.toggle()
return True
| mit | -6,720,822,278,794,531,000 | 35.827957 | 90 | 0.574015 | false |
sophilabs/py101 | py101/lists/__init__.py | 1 | 1763 | """"
Introduction Adventure
Author: Ignacio Avas ([email protected])
"""
import codecs
import io
import sys
import unittest
from story.adventures import AdventureVerificationError, BaseAdventure
from story.translation import gettext as _
class TestOutput(unittest.TestCase):
"""Variables Adventure test"""
def __init__(self, candidate_code, file_name='<inline>'):
"""Init the test"""
super(TestOutput, self).__init__()
self.candidate_code = candidate_code
self.file_name = file_name
def setUp(self):
self.__old_stdout = sys.stdout
sys.stdout = self.__mockstdout = io.StringIO()
def tearDown(self):
sys.stdout = self.__old_stdout
self.__mockstdout.close()
def runTest(self):
"""Makes a simple test of the output"""
code = compile(self.candidate_code, self.file_name, 'exec', optimize=0)
self.assertIn('languages',
code.co_names,
'Should have defined languages variable')
exec(code)
lines = self.__mockstdout.getvalue().split('\n')
self.assertEqual([str(["ADA", "Pascal", "Fortran", "Smalltalk"]), ''],
lines,
'Should have same output'
)
class Adventure(BaseAdventure):
"""Lists Adventure"""
title = _('Lists')
@classmethod
def test(cls, sourcefile):
"""Test against the provided file"""
suite = unittest.TestSuite()
raw_program = codecs.open(sourcefile).read()
suite.addTest(TestOutput(raw_program, sourcefile))
result = unittest.TextTestRunner().run(suite)
if not result.wasSuccessful():
raise AdventureVerificationError()
| mit | 1,598,433,818,799,941,000 | 29.396552 | 79 | 0.601815 | false |
ZeitOnline/zeit.push | src/zeit/push/browser/mobile.py | 1 | 1348 | from zope.cachedescriptors.property import Lazy as cachedproperty
import logging
import sys
import zeit.push.interfaces
import zope.security.proxy
log = logging.getLogger(__name__)
class FindTitle(object):
def __call__(self):
name = self.request.form.get('q')
if not name:
return ''
source = zeit.push.interfaces.PAYLOAD_TEMPLATE_SOURCE.factory
template = source.find(name)
return source.getDefaultTitle(template)
class PreviewPayload(object):
@cachedproperty
def message(self):
# We need to talk to private API
push = zope.security.proxy.getObject(
zeit.push.interfaces.IPushMessages(self.context))
return push._create_message(
'mobile', self.context, push.get(type='mobile'))
@cachedproperty
def rendered(self):
return self.message._render()
def rendered_linenumbers(self):
result = []
for i, line in enumerate(self.rendered.split('\n')):
result.append(u'%03d %s' % (i, line))
return '\n'.join(result)
@cachedproperty
def error(self):
try:
self.message.validate_template(self.rendered)
except Exception, e:
e.traceback = zeit.cms.browser.error.getFormattedException(
sys.exc_info())
return e
| bsd-3-clause | 2,564,557,959,152,968,000 | 27.083333 | 71 | 0.626855 | false |
gitoffdabus/inf1340_2015_asst1 | exercise3.py | 1 | 5527 | #!/usr/bin/env python
""" Assignment 1, Exercise 3, INF1340, Fall, 2015. Troubleshooting Car Issues.
This module contains one function diagnose_car(). It is an expert system to
interactive diagnose car issues.
"""
__author__ = 'Susan Sim'
__email__ = "[email protected]"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
def diagnose_car():
# ans1, ans2, ans3, ans4, ans5 are the variables used to collect user response to the queries
ans1 = raw_input("Is the car silent when you turn the key? Press [Y]Yes or [N]No")
# if statement is used to provide a response to the user for a final answer or a possible further query
if ans1 == "Y":
ans2 = raw_input("Are the battery terminals corroded? Press [Y]Yes or [N]No")
if ans2 == "Y":
print("Clean terminals and try starting again.")
elif ans2 == "N":
print("Replace cables and try again.")
else:
print("Error")
elif ans1 == "N":
ans2 = raw_input("Does the car make a clicking noise? Press [Y]Yes or [N]No")
if ans2 == "Y":
print("Replace the battery.")
elif ans2 == "N":
ans3 = raw_input("Does the car crank up but fail to start? Press [Y]Yes or [N]No")
if ans3 == "Y":
print("Check spark plug connections.")
elif ans3 == "N":
ans4 = raw_input("Does the engine start and then die? Press [Y]Yes or [N]No" + " ")
if ans4 == "Y":
ans5 = raw_input("Does your car have fuel injection?Press [Y]Yes or [N]No" + " ")
if ans5 == "Y":
print("Get it in for service.")
elif ans5 == "N":
print("Check to ensure the choke is opening and closing.")
else:
print("Error")
elif ans4 == "N":
print("Engine is not getting enough fuel. Clean fuel pump.")
else:
print("Error")
else:
print("Error")
else:
print("Error")
# Error is printed when the user does not press either "Y" or "N"
#diagnose_car()
"""
Interactively queries the user with yes/no questions to identify a
possible issue with a car.
Test Case 1
Input_1: Y
Expected Output1: Are the battery terminals corroded?
Errors: None
Input_2: Y
Expected Output_1: Clean terminals and try starting again.
Errors: None
Test Case 2
Input_1: Y
Expected Output_1: Are the battery terminals corroded?
Error: None
Input_2: N
Expected Output_2: Replace cables and try again.
Error: None
Test Case 3
Input_1: N
Expected Output_1: Does the car make a clicking noise?
Error: None
Input_2: Y
Expected Output_2: Replace the battery.
Error: None
Test Case 4
Input_1: N
Expected Output_1: Does the car make a clicking noise?
Error: None
Input_2: N
Expected Output_2: Does the car crank up but fail to start?
Error: None
Input_3: Y
Expected Output_3: Check spark plug connections.
Error: None
Test Case 5
Input_1: N
Expected Output_1: Does the car make a clicking noise?
Error: None
Input_2: N
Expected Output_2: Does the car crank up but fail to start?
Error: None
Input_3:N
Expected Output_3: Does the engine start and then die?
Error: None
Input_4: N
Expected Output_4: Engine is not getting enough fuel. Clean fuel pump.
Error: None
Test Case 6
Input_1: N
Expected Output_1: Does the car make a clicking noise?
Error: None
Input_2: N
Expected Output_2: Does the car crank up but fail to start?
Error: None
Input_3: N
Expected Output_3: Does the engine start and then die?
Error: None
Input_4: Y
Expected Output_4: Does your car have fuel injection?
Error: None
Input_5: N
Expected Output_5: Check to ensure the choke is opening and closing.
Error: Get it in service
Solution: Yes and No are reversed on last two outcome nodes. Switch order and problem fixed.
Test Case 7
Input_1: N
Expected Output_1: Does the car make a clicking noise?
Error: None
Input_2: N
Expected Output_2: Does the car crank up but fail to start?
Error: None
Input_3: N
Expected Output_3: Does the engine start and then die?
Error: None
Input_4: Y
Expected Output_4: Does your car have fuel injection?
Error: None
Input_5: N
Expected Output_5: Check to ensure the choke is opening and closing.
Error: None
Test Case 8
Input_1: N
Expected Output_1: Does the car make a clicking noise?
Error: None
Input_2: N
Expected Output_2: Does the car crank up but fail to start?
Error: None
Input_3: N
Expected Output_3: Does the engine start and then die?
Error: None
Input_4: Y
Expected Output_4: Does your car have fuel injection?
Error: None
Input_5: Y
Expected Output_5: Get it in for service.
Error: None
"""
| mit | 1,235,066,256,256,096,000 | 32.70122 | 121 | 0.562692 | false |
grafgustav/accessmail | src/Service/ImapReceiver.py | 1 | 1024 | __author__ = 'phillip'
from .MailReceiver import MailReceiver
import poplib
class IMAPReceiver(MailReceiver):
def __init__(self, config):
self._conn = None
def connect(self, config):
self._server = poplib.POP3_SSL()
self._server.apop()
def delete_mail(self, n):
self._server.dele(n)
def list_folders(self):
pass
def create_folder(self, name):
pass
def get_number_of_mails(self):
count, size = self._server.stat()
return count
def change_folder(self, path):
pass
def get_header(self, n):
return self._server.top(n,0)
def can_create_folder(self):
return False
def delete_folder(self, name):
pass
def get_total_mails(self):
return self.get_number_of_mails()
def get_mail(self, n):
return self._server.retr(n)
def get_mailbox_size(self):
count, size = self._server.stat()
return size
def quit(self):
self._server.quit() | mit | -1,807,214,119,877,362,200 | 18.711538 | 41 | 0.584961 | false |
kurennon/misc-tools | find_validator/find_validator.py | 1 | 1259 | #!/usr/bin/env python3
DIG_CHARS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def find_validator(dig_string, old_base):
dig_sum = sum_digits(dig_string, old_base)
return dig_sum[-1:].upper()
def sum_digits(dig_string, old_base):
int_sum = 0
while dig_string:
int_sum += int(dig_string[:1], base=old_base)
dig_string = dig_string[1:]
dig_sum = unint(int_sum, old_base)
return dig_sum
def unint(int_val, new_base):
if int_val < new_base:
return DIG_CHARS[int_val]
else:
return unint(int_val//new_base, new_base) + DIG_CHARS[int_val%new_base]
if __name__ == "__main__":
print("Welcome to find_validator.py!\nPlease enter an invalid base to quit" +
"\nor q at the validator to choose a new base.")
work_base = 1
while 0 < work_base < 35:
dig_string = ""
work_base = int(input("\nEnter the base of the number(s) you would like to validate: "))
if work_base <= 0 or work_base > 35:
break
while dig_string.lower() != "q":
dig_string = input("Enter a number to validate: ")
if dig_string.lower() == "q":
break
print("The validator is:", find_validator(dig_string, work_base))
| gpl-3.0 | 3,645,768,671,179,305,500 | 36.029412 | 96 | 0.590151 | false |
yggi49/wtforms-polyglot | wtf_polyglot/meta.py | 1 | 2876 | from __future__ import unicode_literals
try:
from html import escape
from html.parser import HTMLParser
except ImportError:
from cgi import escape
from HTMLParser import HTMLParser
from wtforms.meta import DefaultMeta
from wtforms.widgets.core import HTMLString
class PolyglotHTMLParser(HTMLParser):
"""This simplified ``HTMLParser`` converts its input to polyglot HTML.
It works by making sure that stand-alone tags like ``<input>`` have a
slash before the closing angle bracket, that attribute values are always
quoted, and that boolean attributes have their value set to the attribute
name (e.g., ``checked="checked"``).
Note: boolean attributes are simply identified as attributes with no value
at all. Specifically, an attribute with an empty string (e.g.,
``checked=""``) will *not* be identified as boolean attribute, i.e., there
is no semantic intelligence involved.
>>> parser = PolyglotHTMLParser()
>>> parser.feed('''<input type=checkbox name=foo value=y checked>''')
>>> print(parser.get_output())
<input type="checkbox" name="foo" value="y" checked="checked" />
"""
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.output = []
def html_params(self, attrs):
output = []
for key, value in attrs:
if value is None:
value = key
output.append(' {}="{}"'.format(key, escape(value, quote=True)))
return ''.join(output)
def handle_starttag(self, tag, attrs):
if tag == 'input':
return self.handle_startendtag(tag, attrs)
self.output.append('<{}{}>'.format(tag, self.html_params(attrs)))
def handle_endtag(self, tag):
self.output.append('</{}>'.format(tag))
def handle_startendtag(self, tag, attrs):
self.output.append('<{}{} />'.format(tag, self.html_params(attrs)))
def handle_data(self, data):
self.output.append(data)
def handle_entityref(self, name):
self.output.append('&{};'.format(name))
def handle_charref(self, name):
self.output.append('&#{};'.format(name))
def get_output(self):
return ''.join(self.output)
class PolyglotMeta(DefaultMeta):
"""
This meta class works exactly like ``DefaultMeta``, except that fields of
forms using this meta class will output polyglot markup.
"""
def render_field(self, field, render_kw):
"""
Render a widget, and convert its output to polyglot HTML.
"""
other_kw = getattr(field, 'render_kw', None)
if other_kw is not None:
render_kw = dict(other_kw, **render_kw)
html = field.widget(field, **render_kw)
parser = PolyglotHTMLParser()
parser.feed(html)
output = HTMLString(parser.get_output())
return output
| bsd-3-clause | -4,385,307,373,991,363,000 | 31.681818 | 78 | 0.631085 | false |
torchingloom/edx-platform | common/lib/xmodule/xmodule/master_class_module.py | 1 | 15766 | # -*- coding: utf-8 -*-
"""Word cloud is ungraded xblock used by students to
generate and view word cloud.
On the client side we show:
If student does not yet answered - `num_inputs` numbers of text inputs.
If student have answered - words he entered and cloud.
"""
import json
import logging
import datetime
import csv
import StringIO
from pkg_resources import resource_string
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.x_module import XModule
from django.contrib.auth.models import User
from django.utils.timezone import UTC
from xblock.fields import Scope, Dict, Boolean, List, Integer, String
from xmodule.modulestore import Location
log = logging.getLogger(__name__)
from django.utils.translation import ugettext as _
from django.conf import settings
def pretty_bool(value):
"""Check value for possible `True` value.
Using this function we can manage different type of Boolean value
in xml files.
"""
bool_dict = [True, "True", "true", "T", "t", "1"]
return value in bool_dict
class MasterClassFields(object):
"""XFields for word cloud."""
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_("Master Class")
)
total_places = Integer(
display_name=_("Max places"),
help=_("Number of places available for students to register for masterclass."),
scope=Scope.settings,
default=30,
values={"min": 1}
)
autopass_score = Integer(
display_name=_("Autopass score"),
help=_("Autopass score to automaticly pass registration for masterclass."),
scope=Scope.settings,
default=250,
values={"min": 1}
)
problem_id = String(
display_name=_("Masterclass problem id"),
help=_("Full id of the problem which is to be acomplished to pass registration for masterclass."),
scope=Scope.settings,
#default=_("Master Class") # no default
)
auto_register_if_passed = Boolean(
display_name=_("Auto registration"),
help=_("Auto registration for masterclass if a user passed the test"),
scope=Scope.settings,
default=False,
)
# Fields for descriptor.
submitted = Boolean(
help=_("Whether this student has been register for this master class."),
scope=Scope.user_state,
default=False
)
all_registrations = List(
help=_("All registrations from all students."),
scope=Scope.user_state_summary
)
passed_registrations = List(
help=_("Passed registrations."),
scope=Scope.user_state_summary
)
passed_masterclass_test = Boolean(
help=_("Whether this student has passed the task to register for the masterclass."),
scope=Scope.user_state,
default=False
)
class MasterClassModule(MasterClassFields, XModule):
"""MasterClass Xmodule"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [resource_string(__name__, 'js/src/word_cloud/d3.min.js'),
resource_string(__name__, 'js/src/word_cloud/d3.layout.cloud.js'),
resource_string(__name__, 'js/src/master_class/master_class.js'),
resource_string(__name__, 'js/src/master_class/master_class_main.js')]
}
css = {'scss': [resource_string(__name__, 'css/master_class/display.scss')]}
js_module_name = "MasterClass"
def get_state(self):
"""Return success json answer for client."""
total_register = len(self.passed_registrations)
message = ""
message2 = ""
if self.runtime.user.email in self.passed_registrations:
message = _("You have been registered for this master class. We will provide addition information soon.")
elif self.runtime.user.email in self.all_registrations:
message = _("You are pending for registration for this master class. Please visit this page later for result.")
else:
message2 = _("You have not been registered for this master class. Probably you have to pass a test first or there is not enough places.")
if (total_register is None):
total_register = 0
additional_data = {}
allreg = []
passreg = []
for email in self.all_registrations:
try:
user = User.objects.get(email=email)
allreg += [{'email': email, 'name': user.profile.lastname + ' ' + user.profile.firstname + ' ' + user.profile.middlename}]
except:
pass
for email in self.passed_registrations:
try:
user = User.objects.get(email=email)
passreg += [{'email': email, 'name': user.profile.lastname + ' ' + user.profile.firstname + ' ' + user.profile.middlename}]
except:
pass
if self.runtime.user_is_staff:
additional_data['all_registrations'] = allreg
additional_data['passed_registrations'] = passreg
additional_data['is_staff'] = self.runtime.user_is_staff
additional_data['csv_name'] = self.runtime.course_id + " " + self.display_name
if self.submitted and self.runtime.user.email not in self.all_registrations and self.runtime.user.email not in self.passed_registrations:
self.submitted = False
if self.submitted:
data = {
'status': 'success',
'submitted': True,
'is_closed': self.is_past_due(),
'total_places': self.total_places,
'total_register': total_register,
'message': message,
'problem_id': self.problem_id,
'auto_register_if_passed': self.auto_register_if_passed,
}
data.update(additional_data)
return json.dumps(data)
else:
data = {
'status': 'success',
'submitted': False,
'is_closed': self.is_past_due(),
'total_places': self.total_places,
'total_register': total_register,
'problem_id': self.problem_id,
'message': message2,
'auto_register_if_passed': self.auto_register_if_passed,
}
data.update(additional_data)
return json.dumps(data)
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request get parameters
Returns:
json string
"""
if dispatch == 'submit':
if self.is_past_due():
return json.dumps({
'status': 'fail',
'error': 'Registration is closed due to date.'
})
if self.submitted:
return json.dumps({
'status': 'fail',
'error': 'You have already posted your data.'
})
# Student words from client.
# FIXME: we must use raw JSON, not a post data (multipart/form-data)
master_class = data.getall('master_class[]')
if self.problem_id is None:
self.all_registrations.append(self.runtime.user.email)
self.submitted = True
return self.get_state()
problem_location = Location(self.problem_id)
problem_descriptor = self.runtime.descriptor_runtime.modulestore.get_item(problem_location)
problem_score = self.runtime.get_score(self.runtime.course_id, self.runtime.user, problem_descriptor, self.runtime.get_module)
self.passed_masterclass_test = problem_score is not None and len(problem_score) >= 2 and problem_score[0] >= self.autopass_score
if self.passed_masterclass_test:
if self.auto_register_if_passed:
if len(self.passed_registrations) < self.total_places:
self.passed_registrations.append(self.runtime.user.email)
self.submitted = True
else:
self.all_registrations.append(self.runtime.user.email)
self.submitted = True
return self.get_state()
elif dispatch == 'get_state':
return self.get_state()
elif dispatch == 'register':
logging.error(data)
if self.runtime.user_is_staff:
for email in data.getall('emails[]'):
if (len(self.passed_registrations) < self.total_places):
if (self.all_registrations.count(email) > 0):
self.passed_registrations.append(email)
self.all_registrations.remove(email)
subject = u"Подтверждение регистрации на {masterclass}".format(masterclass=self.display_name)
body = u"Уважаемый(ая) {fullname}!\nВаша заявка на {masterclass} была одобрена. Подробности Вы можете узнать по ссылке: {url}.\nС уважением, Команда ГБОУ ЦПМ.".format(
fullname=User.objects.get(email=email).profile.name,
masterclass=self.display_name,
url='https://' + settings.SITE_NAME + '/courses/' + self.course_id + '/jump_to/{}'.format(Location(self.location))
)
mail = self.runtime.bulkmail.create(self.course_id,
self.runtime.user,
'list',
subject,
body,
location=self.id,
to_list=[email]
)
try:
mail.send()
return self.get_state()
except:
return json.dumps({
'status': 'fail',
'msg': _('Your email can not be sent.')
})
else:
return json.dumps({
'status': 'fail',
'error': _("Not enough places for this master class.")
})
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
elif dispatch == 'unregister':
logging.error(data)
if self.runtime.user_is_staff:
for email in data.getall('emails[]'):
if (self.passed_registrations.count(email) > 0):
self.passed_registrations.remove(email)
self.all_registrations.append(email)
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
elif dispatch == 'remove':
logging.error(data)
if self.runtime.user_is_staff:
for email in data.getall('emails[]'):
if (self.passed_registrations.count(email) > 0):
self.passed_registrations.remove(email)
if (self.all_registrations.count(email) > 0):
self.all_registrations.remove(email)
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
elif dispatch == 'csv':
if self.runtime.user_is_staff:
header = [u'Email', u'Фамилия', u'Имя', u'Отчество',]
datatable = {'header': header, 'students': []}
data = []
for email in self.passed_registrations:
datarow = []
user = User.objects.get(email=email)
datarow += [user.email, user.profile.lastname, user.profile.firstname, user.profile.middlename]
data += [datarow]
datatable['data'] = data
return self.return_csv(" ", datatable, encoding="cp1251", dialect="excel-tab")
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
elif dispatch == 'email':
subject = data.get('subject')
body = data.get('body')
mail = self.runtime.bulkmail.create(self.course_id, self.runtime.user, 'list', subject, body, location=self.id, to_list=self.passed_registrations)
mail.send()
return json.dumps({
'status': 'success',
'msg': _('Your email was successfully queued for sending.')
})
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
def is_past_due(self):
"""
Is it now past this problem's due date, including grace period?
"""
return (self.due is not None and
datetime.datetime.now(UTC()) > self.due)
def get_html(self):
"""Template rendering."""
logging.info(type(self.location))
logging.info(self.get_progress())
logging.info(self.runtime.seed)
logging.info(self.runtime.anonymous_student_id)
logging.info(self.runtime)
context = {
'display_name': self.display_name,
'due': self.due,
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'submitted': self.submitted,
'is_staff': self.runtime.user_is_staff,
'all_registrations': self.all_registrations,
'passed_registrations': self.passed_registrations
}
self.content = self.system.render_template('master_class.html', context)
return self.content
def return_csv(self, func, datatable, file_pointer=None, encoding="utf-8", dialect="excel"):
"""Outputs a CSV file from the contents of a datatable."""
if file_pointer is None:
response = StringIO.StringIO()
else:
response = file_pointer
writer = csv.writer(response, dialect=dialect, quotechar='"', quoting=csv.QUOTE_ALL)
encoded_row = [unicode(s).encode(encoding) for s in datatable['header']]
writer.writerow(encoded_row)
for datarow in datatable['data']:
encoded_row = [unicode(s).encode(encoding) for s in datarow]
writer.writerow(encoded_row)
if file_pointer is None:
return response.getvalue()
else:
return response
class MasterClassDescriptor(MasterClassFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""Descriptor for MasterClass Xmodule."""
module_class = MasterClassModule
template_dir_name = 'master_class'
| agpl-3.0 | -7,321,570,871,203,777,000 | 39.703125 | 195 | 0.540179 | false |
EmanueleCannizzaro/scons | test/DVIPDF/DVIPDFCOM.py | 1 | 2061 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/DVIPDF/DVIPDFCOM.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test the ability to configure the $DVIPDFCOM construction variable.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mypdf.py', """
import sys
outfile = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
infile = open(f, 'rb')
for l in [l for l in infile.readlines() if l != '/*pdf*/\\n']:
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(tools=['default', 'dvipdf'],
DVIPDFCOM = r'%(_python_)s mypdf.py $TARGET $SOURCES')
env.PDF(target = 'aaa', source = 'aaa.dvi')
""" % locals())
test.write('aaa.dvi', "aaa.dvi\n/*pdf*/\n")
test.run()
test.must_match('aaa.pdf', "aaa.dvi\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -6,095,781,751,735,169,000 | 28.869565 | 98 | 0.70985 | false |
googleapis/googleapis-gen | google/cloud/aiplatform/v1/aiplatform-v1-py/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py | 1 | 18872 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import pipeline_service
from google.cloud.aiplatform_v1.types import training_pipeline
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import PipelineServiceGrpcTransport
class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport):
"""gRPC AsyncIO backend transport for PipelineService.
A service for creating and managing Vertex AI's pipelines. This
includes both ``TrainingPipeline`` resources (used for AutoML and
custom training) and ``PipelineJob`` resources (used for Vertex
Pipelines).
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'aiplatform.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'aiplatform.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_training_pipeline(self) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
Awaitable[gca_training_pipeline.TrainingPipeline]]:
r"""Return a callable for the create training pipeline method over gRPC.
Creates a TrainingPipeline. A created
TrainingPipeline right away will be attempted to be run.
Returns:
Callable[[~.CreateTrainingPipelineRequest],
Awaitable[~.TrainingPipeline]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_training_pipeline' not in self._stubs:
self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline',
request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize,
response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize,
)
return self._stubs['create_training_pipeline']
@property
def get_training_pipeline(self) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
Awaitable[training_pipeline.TrainingPipeline]]:
r"""Return a callable for the get training pipeline method over gRPC.
Gets a TrainingPipeline.
Returns:
Callable[[~.GetTrainingPipelineRequest],
Awaitable[~.TrainingPipeline]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_training_pipeline' not in self._stubs:
self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline',
request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize,
response_deserializer=training_pipeline.TrainingPipeline.deserialize,
)
return self._stubs['get_training_pipeline']
@property
def list_training_pipelines(self) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
Awaitable[pipeline_service.ListTrainingPipelinesResponse]]:
r"""Return a callable for the list training pipelines method over gRPC.
Lists TrainingPipelines in a Location.
Returns:
Callable[[~.ListTrainingPipelinesRequest],
Awaitable[~.ListTrainingPipelinesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_training_pipelines' not in self._stubs:
self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines',
request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize,
response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize,
)
return self._stubs['list_training_pipelines']
@property
def delete_training_pipeline(self) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete training pipeline method over gRPC.
Deletes a TrainingPipeline.
Returns:
Callable[[~.DeleteTrainingPipelineRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_training_pipeline' not in self._stubs:
self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline',
request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_training_pipeline']
@property
def cancel_training_pipeline(self) -> Callable[
[pipeline_service.CancelTrainingPipelineRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the cancel training pipeline method over gRPC.
Cancels a TrainingPipeline. Starts asynchronous cancellation on
the TrainingPipeline. The server makes a best effort to cancel
the pipeline, but success is not guaranteed. Clients can use
[PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]
or other methods to check whether the cancellation succeeded or
whether the pipeline completed despite cancellation. On
successful cancellation, the TrainingPipeline is not deleted;
instead it becomes a pipeline with a
[TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state]
is set to ``CANCELLED``.
Returns:
Callable[[~.CancelTrainingPipelineRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'cancel_training_pipeline' not in self._stubs:
self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary(
'/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline',
request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['cancel_training_pipeline']
__all__ = (
'PipelineServiceGrpcAsyncIOTransport',
)
| apache-2.0 | 227,159,472,070,711,040 | 46.417085 | 109 | 0.632524 | false |
puttarajubr/commcare-hq | custom/ilsgateway/tests/test_locations_sync.py | 1 | 4004 | from datetime import datetime
import json
import os
from django.test import TestCase
from corehq.apps.commtrack.models import CommtrackConfig
from corehq.apps.commtrack.tests.util import bootstrap_domain as initial_bootstrap
from corehq.apps.locations.models import Location, SQLLocation
from custom.ilsgateway.api import Location as Loc, ILSGatewayAPI
from custom.ilsgateway.tests.mock_endpoint import MockEndpoint
from custom.logistics.api import ApiSyncObject
from custom.logistics.commtrack import synchronization
from custom.logistics.models import MigrationCheckpoint
TEST_DOMAIN = 'ilsgateway-commtrack-locations-test'
class LocationSyncTest(TestCase):
def setUp(self):
self.endpoint = MockEndpoint('http://test-api.com/', 'dummy', 'dummy')
self.api_object = ILSGatewayAPI(TEST_DOMAIN, self.endpoint)
self.datapath = os.path.join(os.path.dirname(__file__), 'data')
domain = initial_bootstrap(TEST_DOMAIN)
CommtrackConfig(domain=domain.name).save()
self.api_object.prepare_commtrack_config()
for location in Location.by_domain(TEST_DOMAIN):
location.delete()
def test_create_facility_location(self):
with open(os.path.join(self.datapath, 'sample_locations.json')) as f:
location = Loc(**json.loads(f.read())[0])
ilsgateway_location = self.api_object.location_sync(location)
self.assertEqual(ilsgateway_location.name, location.name)
self.assertEqual(ilsgateway_location.location_type, location.type)
self.assertEqual(ilsgateway_location.longitude, float(location.longitude))
self.assertEqual(ilsgateway_location.latitude, float(location.latitude))
self.assertEqual(int(ilsgateway_location.parent.sql_location.external_id), location.parent_id)
self.assertIsNotNone(ilsgateway_location.linked_supply_point())
self.assertIsNotNone(ilsgateway_location.sql_location.supply_point_id)
def test_create_non_facility_location(self):
with open(os.path.join(self.datapath, 'sample_locations.json')) as f:
location = Loc(**json.loads(f.read())[1])
ilsgateway_location = self.api_object.location_sync(location)
self.assertEqual(ilsgateway_location.name, location.name)
self.assertEqual(ilsgateway_location.location_type, location.type)
self.assertEqual(ilsgateway_location.longitude, float(location.longitude))
self.assertEqual(ilsgateway_location.latitude, float(location.latitude))
self.assertIsNone(ilsgateway_location.parent)
self.assertIsNone(ilsgateway_location.linked_supply_point())
self.assertIsNone(ilsgateway_location.sql_location.supply_point_id)
def test_locations_migration(self):
checkpoint = MigrationCheckpoint(
domain=TEST_DOMAIN,
start_date=datetime.utcnow(),
date=datetime.utcnow(),
api='product',
limit=100,
offset=0
)
location_api = ApiSyncObject(
'location_facility',
self.endpoint.get_locations,
self.api_object.location_sync,
filters=dict(type='facility')
)
synchronization(location_api, checkpoint, None, 100, 0)
self.assertEqual('location_facility', checkpoint.api)
self.assertEqual(100, checkpoint.limit)
self.assertEqual(0, checkpoint.offset)
self.assertEqual(5, len(list(Location.by_domain(TEST_DOMAIN))))
self.assertEqual(5, SQLLocation.objects.filter(domain=TEST_DOMAIN).count())
sql_location = SQLLocation.objects.get(domain=TEST_DOMAIN, site_code='DM520053')
self.assertEqual('FACILITY', sql_location.location_type.name)
self.assertIsNotNone(sql_location.supply_point_id)
sql_location2 = SQLLocation.objects.get(domain=TEST_DOMAIN, site_code='region-dodoma')
self.assertEqual('REGION', sql_location2.location_type.name)
self.assertIsNone(sql_location2.supply_point_id)
| bsd-3-clause | 6,287,786,364,255,786,000 | 47.829268 | 102 | 0.709041 | false |
davidvg/google_api | google_api/gmail_api.py | 1 | 13120 | '''
Basic Python3 implementation of some functionality of the Gmail API.
Based on the code from the Gmail API documentation.
Requires a 'secret file' to allow authentication (see [1])
Installation
-----------
In Python3, install the API using pip3:
pip3 install --upgrade google-api-python-client
Install packages:
python3 setup.py develop
[1] https://developers.google.com/gmail/api/quickstart/python
'''
import httplib2
import os.path
import base64
import email
import time
import datetime as dt
from googleapiclient import discovery
from googleapiclient.http import BatchHttpRequest as batchRequest
from oauth2client import file, client, tools
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail_api-python.json
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = '../client_secret.json'
APPLICATION_NAME = 'Gmail API downloader'
class Client(object):
def __init__(self, scopes_=SCOPES, secret_=CLIENT_SECRET_FILE):
'''
Initialize the class' variables
'''
# Internals
self.__scopes = scopes_
self.__secret = secret_
self.service = None
# Members
self.msg_ids = []
self.raw_messages = []
self.messages = []
self.__format = None
# Path for storing credentials
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail_api-python.json')
store = file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(self.__secret, self.__scopes)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
# Build the service
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('gmail', 'v1', http=http)
def __parse_id(self, id_):
'''
Parses an id when passed to a function, to make sure it works for
every method.
Seems redundant with Client.get_id() when called on a message.
'''
if isinstance(id_, dict):
return id_['id']
elif isinstance(id_, str):
return id_
else:
# Is it a message?
try:
id_ = id_['id']
except:
print(' >>>> __parse_id(): No valid message id.')
return None
def get_msg_ids_from_labels(self, labels):
'''
'''
# Clear previous msg_ids
self.msg_ids = []
response = self.service.users().messages().list(userId='me',
labelIds=labels
).execute()
# First page of results
if 'messages' in response:
self.msg_ids.extend(response['messages'])
# Check if there are more result pages
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = self.service.users().messages().list(
userId='me',
labelIds=labels,
pageToken = page_token
).execute()
self.msg_ids.extend(response['messages'])
def get_msg_ids_from_query(self, query):
# Clear previous msg_ids
self.msg_ids = []
response = self.service.users().messages().list(userId='me',
q=query,
).execute()
# First page of results
if 'messages' in response:
self.msg_ids.extend(response['messages'])
# Check if there are more result pages
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = self.service.users().messages().list(
userId='me',
q=query,
pageToken = page_token
).execute()
self.msg_ids.extend(response['messages'])
def get_batch_messages(self, msg_ids, format='full'):
'''
Download a group of messages, given its ids.
Arguments:
- msg_ids: a list of message ids as returned by the API.
- format: the format for the downloaded message: 'full', 'raw',
'metadata', 'minimal'
Returns:
- A list with the messages.
'''
# Store current format
self.__format__ = format
messages = []
def callback_(req_id, resp, exception):
if exception:
print(' >>> CallbackException')
pass
else:
messages.append(resp)
def batch_request():
batch = self.service.new_batch_http_request(callback_)
ids_ = [elem['id'] for elem in msg_ids]
for id_ in ids_:
batch.add(self.service.users().messages().get(userId='me',
id=id_,
format=format))
batch.execute()
if len(self.msg_ids) < 1000:
batch_request()
else:
# To Do: implement the case for 1000+ messages
pass
self.raw_messages = messages
def get_message(self, msg_id, format='full'):
# Store current format
self.__format__ = format
# Check type of msg_id argument
msg_id = self.__parse_id(msg_id)
# Get messages
res = self.service.users().messages().get(userId='me',
id=msg_id,
format=format).execute()
return res
def get_messages(self, msg_ids=None, labels=None, query=None, format='full'):
# Store current format
self.__format__ = format
# Get the id for messages corresponding to labels/query
if msg_ids:
self.msg_ids = msg_ids
elif labels:
self.get_msg_ids_from_labels(labels=labels)
elif query:
self.get_msg_ids_from_query(query=query)
else:
print(' >>> get_messages(): No labels or query passed. Nothing is done.')
# Download the messages
self.get_batch_messages(self.msg_ids, format=format)
### Parsing and decoding the messages
'''
Message structure for the different formats
* Full
----
- snippet
- internalDate: ms from Epoch
- id
- payload
- filename
- headers: list of 26 dicts with keys {'name', 'value'}
- Received: date (multiple occurences ?)
- MIME-Version
- Content-Type: text/html, charset
- From
- Subject
- ...
- mimeType: text/html, ...
- parts
- body: dict
- data: base64
- size: int
- sizeEstimate
- historyId
- labelIds: list of labels
- threadId
* Raw
---
- threadId
- snippet
- historyId
- internalDate
- id
- raw: base64
- labelIds
- sizeEstimate
* Metadata: dict with 8 dicts
--------
- threadId
- snippet
- historyId
- inernalDate
- id
- labelIds
- payload: dict
- mimeType: text/html, ...
- headers
- sizeEstimate
* Minimal
-------
- historyId
- id
- labelIds
- sizeEstimate
- snippet
- threadId
'''
def get_id(self, message):
'''
Returns the message id for a single raw message.
'''
return str(message['id'])
def get_labels(self, message):
'''
Returns a list of labels for a single raw message.
'''
return message['labelIds']
def modify_labels(self, obj, add=[], remove=[]):
"""
Adds or removes labels from a message.
"""
id_ = self.__parse_id(obj)
self.service.users().messages().modify(
userId='me',
id=id_,
body={'addLabelIds': add,
'removeLabelIds': remove}).execute()
def is_unread(self, message):
# Check if the message is already been decoded
return 'UNREAD' in message['labels']
def mark_as_read(self, obj):
id_ = self.__parse_id(obj)
self.modify_labels(id_, remove=['UNREAD'])
def get_date(self, message):
''' Returns the reception date for a single raw message in a string
using strftime.
'''
internal = float(message['internalDate'])/1000. # seconds from Epoch
date = time.gmtime(internal)
res = dt.datetime(year=date.tm_year,
month=date.tm_mon,
day=date.tm_mday,
hour=date.tm_hour,
minute=date.tm_min,
second=date.tm_sec)
return res.strftime('%Y-%m-%dT%H:%M:%S')
def get_subject(self, message):
headers = message['payload']['headers']
for h in headers:
if h['name'] == 'Subject':
return h['value']
return None
def get_body(self, message):
if self.__format__ is 'full':
payload = message['payload']
if not 'parts' in payload:
raw = payload['body']['data']
else:
### CHECK THIS!!
raw = payload['parts'][0]['body']['data']
body = base64.urlsafe_b64decode(raw.encode('ASCII'))
elif self.__format__ is 'raw':
raw = message['raw']
raw = base64.urlsafe_b64decode(raw.encode('ASCII'))
mime = email.message_from_bytes(raw)
body = mime.get_payload(decode=True)
return body
def decode_messages(self, keys=None):
'''
For 'full' and 'raw' formats; 'minimal' and 'metadata' have no message
body.
Takes messages stored in Client.raw_messages and extracts info from them.
The result is stored in Client.messages
'''
self.messages = []
for msg in self.raw_messages:
decoded = {}
if not keys:
keys = ['id', 'date', 'snippet', 'body', 'labels', 'subject',
'headers']
for key in keys:
decoded[key] = None
decoded['id'] = self.get_id(msg)
decoded['date'] = self.get_date(msg)
decoded['labels'] = self.get_labels(msg)
decoded['snippet'] = msg['snippet']
if self.__format__ is 'full':
decoded['body'] = self.get_body(msg)
decoded['subject'] = self.get_subject(msg)
decoded['headers'] = msg['payload']['headers']
elif self.__format__ is 'raw':
decoded['body'] = self.get_body(msg)
pass
elif self.__format__ is 'metadata':
# At the moment it returns the payload dictionary
decoded['headers'] = msg['payload']['headers']
elif self.__format__ is 'minimal':
pass
self.messages.append(decoded)
def write(self, message, use='date', to='html'):
"""
Write the body of the message to a file.
- use: which key use to generate name (currently only 'date')
- to: file extension
"""
if use is 'date':
name = message[use]
else:
pass
out = '%s.%s' % (name, to)
if self.__format__ is 'full' or self.__format__ is 'raw':
body = message['body'].decode('utf-8')
with open(out, 'w') as f:
f.write(body)
else:
print(' >>> Client.write(): no body to write (format = %s)'
% self.__format__)
def main():
pass
if __name__ == '__main__':
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
gm = Client()
gm.get_msg_ids_from_labels('Label_59')
ids = gm.msg_ids[:2]
gm.get_messages(msg_ids=ids, format='full')
gm.decode_messages()
m = gm.messages[0]
gm.write(m, to='txt')
| mit | -5,331,380,682,368,593,000 | 32.384224 | 88 | 0.509604 | false |
nens/threedi-verification | threedi_verification/views.py | 1 | 6949 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt.
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
from collections import OrderedDict
import itertools
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView
from threedi_verification.models import LibraryVersion
from threedi_verification.models import TestCase
from threedi_verification.models import TestRun
logger = logging.getLogger(__name__)
class BaseView(TemplateView):
template_name = 'threedi_verification/base.html'
title = _("3Di library test")
subtitle = None
back_link = None
back_link_title = None
# some hacky way to get the {% debug %} var without RequestContext things
def get_context_data(self, **kwargs):
context = super(BaseView, self).get_context_data(**kwargs)
context['DEBUG'] = settings.DEBUG
return context
class HomeView(BaseView):
template_name = 'threedi_verification/home.html'
subtitle = _("overview")
@cached_property
def latest_library_version(self):
return LibraryVersion.objects.first()
@cached_property
def latest_test_runs(self):
active_runs = TestRun.objects.filter(
test_case_version__test_case__is_active=True)
return active_runs[:5]
class LibraryVersionsView(BaseView):
template_name = 'threedi_verification/library_versions.html'
title = _("Library versions")
subtitle = _("newest at the top")
back_link_title = _("Back to home")
@property
def back_link(self):
return reverse('threedi_verification.home')
def library_versions(self):
return LibraryVersion.objects.all().prefetch_related('test_runs')[:50]
class LibraryVersionView(BaseView):
template_name = 'threedi_verification/library_version.html'
title = _("Library version")
back_link_title = _("Back to library versions overview")
@property
def back_link(self):
return reverse('threedi_verification.library_versions')
@cached_property
def library_version(self):
return get_object_or_404(LibraryVersion, pk=self.kwargs['pk'])
@cached_property
def subtitle(self):
return 'from %s using %s' % (
self.library_version.last_modified,
self.library_version.library_name)
@cached_property
def all_test_runs(self):
return self.library_version.test_runs.filter(
test_case_version__test_case__has_csv=True).order_by(
'test_case_version__test_case', '-run_started')
@cached_property
def crashed_test_runs(self):
return [test_run for test_run in self.all_test_runs
if test_run.has_crashed]
# TODO: remove this function, is obsolete/unused!
@cached_property
def completed_test_runs(self):
test_runs = [test_run for test_run in self.all_test_runs
if (not test_run.has_crashed) and test_run.duration]
per_test_case = OrderedDict()
for test_case, group in itertools.groupby(
test_runs,
lambda test_run: test_run.test_case_version.test_case):
per_test_case[test_case] = list(group)
return per_test_case
def get_completed_test_runs(self, test_runs):
per_test_case = OrderedDict()
for test_case, group in itertools.groupby(
test_runs,
lambda test_run: test_run.test_case_version.test_case):
per_test_case[test_case] = list(group)
return per_test_case
@cached_property
def _test_runs_by_category(self):
"""Grouped by category"""
test_runs = [test_run for test_run in self.all_test_runs
if (not test_run.has_crashed) and test_run.duration]
# IMPORTANT: test_runs needs to be SORTED for groupby to work
test_runs.sort(
key=lambda test_run: test_run.test_case_version.test_case.category)
per_category = {}
for category, group in itertools.groupby(
test_runs,
lambda testrun: testrun.test_case_version.test_case.category):
per_category[category] = list(group)
return per_category
@cached_property
def test_runs_by_category(self):
d = {}
for category, group in self._test_runs_by_category.items():
d[category] = self.get_completed_test_runs(group)
return d
class TestCasesView(BaseView):
template_name = 'threedi_verification/test_cases.html'
title = _("Test cases")
back_link_title = _("Back to home")
@property
def back_link(self):
return reverse('threedi_verification.home')
def test_cases(self):
return TestCase.objects.filter(is_active=True)
class TestCaseView(BaseView):
template_name = 'threedi_verification/test_case.html'
back_link_title = _("Back to test cases overview")
@property
def back_link(self):
return reverse('threedi_verification.test_cases')
@cached_property
def title(self):
return _("Test case %s") % self.test_case.pretty_name
@cached_property
def test_case(self):
return get_object_or_404(TestCase, pk=self.kwargs['pk'])
@cached_property
def grouped_test_runs(self):
per_test_case_version = OrderedDict()
test_case_versions = self.test_case.test_case_versions.all().order_by(
'-last_modified')
for test_case_version in test_case_versions:
test_runs = test_case_version.test_runs.all().order_by(
'library_version')
per_test_case_version[test_case_version] = list(test_runs)
return per_test_case_version
class TestRunView(BaseView):
template_name = 'threedi_verification/test_run.html'
title = _("Test run")
back_link_title = _("Back to library version")
@property
def back_link(self):
return reverse('threedi_verification.library_version',
kwargs={'pk': self.test_run.library_version.pk})
@cached_property
def test_run(self):
return get_object_or_404(TestRun, pk=self.kwargs['pk'])
@cached_property
def subtitle(self):
return 'for %s' % self.test_run.test_case_version
@cached_property
def report(self):
return self.test_run.report
def plain_log(request, pk=None):
test_run = TestRun.objects.get(pk=pk)
crash_content = test_run.report.get('log')
regular_content = test_run.report.get('successfully_loaded_log')
content = crash_content or regular_content
return HttpResponse(content, content_type='text/plain')
| gpl-3.0 | 6,112,501,060,165,958,000 | 32.090476 | 79 | 0.65477 | false |
Lax/Packages | ganglia-gmond-modules-python-plugins/usr/lib64/ganglia/python_modules/sockstat.py | 1 | 2803 | # sockstat module for ganglia 3.1.x and above
# Copyright (C) Wang Jian <[email protected]>, 2009
import os, sys
import time
last_poll_time = 0
sockstats = {
'tcp_total': 0,
'tcp_established': 0,
'tcp_orphan': 0,
'tcp_timewait': 0,
'udp_total': 0 }
def metric_update():
global sockstats
f = open('/proc/net/sockstat', 'r')
for l in f:
line = l.split()
if (line[0] == 'TCP:'):
sockstats['tcp_total'] = int(line[2])
sockstats['tcp_orphan'] = int(line[4])
sockstats['tcp_established'] = int(line[2]) - int(line[4])
sockstats['tcp_timewait'] = int(line[6])
continue
if (line[0] == 'UDP:'):
sockstats['udp_total'] = int(line[2])
continue
f.close()
def metric_read(name):
global last_poll_time
global sockstats
now_time = time.time()
'''time skewed'''
if now_time < last_poll_time:
last_poll_time = now_time
return 0
'''we cache statistics for 2 sec, it's enough for polling all 3 counters'''
if (now_time - last_poll_time) > 2:
metric_update()
last_poll_time = now_time
return sockstats[name]
descriptors = [{
'name': 'tcp_total',
'call_back': metric_read,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total TCP sockets',
'groups': 'network',
},
{
'name': 'tcp_established',
'call_back': metric_read,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'TCP established sockets',
'groups': 'network',
},
{
'name': 'tcp_timewait',
'call_back': metric_read,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'TCP timewait sockets',
'groups': 'network',
},
{
'name': 'udp_total',
'call_back': metric_read,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total UDP sockets',
'groups': 'network',
}]
def metric_init(params):
return descriptors
def metric_cleanup():
pass
# for unit testing
if __name__ == '__main__':
metric_init(None)
for d in descriptors:
v = d['call_back'](d['name'])
print '%s = %d' % (d['name'], v)
print "----"
while 1:
time.sleep(1)
for d in descriptors:
v = d['call_back'](d['name'])
print '%s = %d' % (d['name'], v)
print "----"
| gpl-3.0 | -1,002,256,754,032,724,500 | 23.163793 | 79 | 0.49447 | false |
paolomonella/ursus | xmlToolBox/minidomToolBox.py | 1 | 7687 | #! /usr/bin/env python
# This is a toolbox I'm using to look for specific things in the XML DOM
##################
# Import modules #
##################
from __future__ import print_function
from xml.dom.minidom import parse, parseString
#import xml.dom.minidom
#################
# Parse the XML #
#################
xmldoc=parse('/home/ilbuonme/siti/paolo.monella/ursus/casanatensis.xml')
###########
# Methods #
###########
def checkIDs():
"""
This function checks whether there are duplicated or
non-sequential xml:id's for <w> elements.
"""
wordElementList = xmldoc.getElementsByTagName('ref')
prevIdN = 0
for r in wordElementList:
#print('cRef: '+r.attributes.getNamedItem('cRef').nodeValue)
for c in r.childNodes:
if c.nodeType == c.ELEMENT_NODE and c.tagName == 'w':
#print(c.attributes.getNamedItem('xml:id').nodeValue, end=', ')
myId = c.attributes.getNamedItem('xml:id').nodeValue
myIdN = int(myId[1:])
#print(myIdN, end=', ')
if not myIdN > prevIdN:
print('Trouble! Not greater...')
#print(myIdN, 'is greater than ', prevIdN)
if myIdN == prevIdN:
print('Trouble! Equal')
def searchPcChildrenOfUnclear():
"""
Print all <pc> elments that are children of <unclear>.
"""
wordElementList = xmldoc.getElementsByTagName('ref')
x = False
for r in wordElementList:
#print('cRef: '+r.attributes.getNamedItem('cRef').nodeValue)
for c in r.childNodes:
if c.nodeType == c.ELEMENT_NODE:
if c.tagName == 'w' and x:
print(c.attributes.getNamedItem('xml:id').nodeValue, end=' viene dopo ')
x = False
if c.tagName == 'unclear':
for w in c.childNodes:
#print(x, end=', ')
if w.nodeType == w.ELEMENT_NODE and w.tagName == 'pc':
print('Eureka!')
print(w.attributes.getNamedItem('n').nodeValue)
x = True
def searchTextNodesChildrenOfUnclear():
"""
Print all textNodes that are children of <unclear>.
"""
wordElementList = xmldoc.getElementsByTagName('ref')
for r in wordElementList:
#print('cRef: '+r.attributes.getNamedItem('cRef').nodeValue)
for c in r.childNodes:
if c.nodeType == c.ELEMENT_NODE:
if c.tagName == 'unclear':
for w in c.childNodes:
if w.nodeType == w.ELEMENT_NODE and w.tagName != 'w':
#print(w.attributes.getNamedItem('n').nodeValue)
print(w.tagName)
if w.nodeType != w.ELEMENT_NODE and w.nodeValue != '\n' and w.nodeValue != '\n\t':
print('"'+w.nodeValue+'"\n---\n')
def listChildrenOfAnElement(elemName):
"""
Return a list of elements that are direct children of the
element with tag name elemName (e.g. 'w' or 'ref').
"""
wordElementList = xmldoc.getElementsByTagName(elemName)
cs=[]
for e in wordElementList:
for c in e.childNodes:
if c.nodeType == c.ELEMENT_NODE:
cs.append(c.tagName)
return(set(cs))
def searchAttrib(elemName):
"""
Check attributes of an element
"""
L = []
wordElementList = xmldoc.getElementsByTagName(elemName)
for e in wordElementList:
if e.attributes.getNamedItem('type'):
n = e.attributes.getNamedItem('type').nodeValue
if n == 'emendation':
if not e.attributes.getNamedItem('cert'):
L.append(e.attributes.getNamedItem('subtype').nodeValue)
#L.append(e.attributes.getNamedItem('subtype').nodeValue)
for l in set(L):
print(l)
def listDescendantsOfElement(myElement):
ds=[]
elementList = xmldoc.getElementsByTagName(myElement)
for w in elementList:
d = w.getElementsByTagName('*')
for x in d:
#if x.nodeType == x.ELEMENT_NODE and x.tagName != 'note':
if x.nodeType == x.ELEMENT_NODE:
ds.append(x.tagName)
for y in set(ds):
print(y)
def graphemeLint():
"""
This function checks that all graphemes encoded directly within
<w> elements (or within those of its descendant element that are
supposed to include graphemes) are actually declared in the
Graphemic Table of Signs. If they are not declared, it prints
an 'Alas!' message.
"""
# Import the graphemes in column 'Grapheme' of GToS.csv into list 'gl'
gl = []
with open('/home/ilbuonme/siti/paolo.monella/ursus/GToS.csv') as gtosFile:
lineCount=0
for l in gtosFile:
if lineCount>0: # I'm skipping the first line (which has the column headers)
gl.append(l[0])
lineCount += 1
# Possible descendants of <w>
allowedElem=['lb', 'pc', 'am', 'choice', 'note', 'expan', 'add', 'hi', 'abbr', 'gap']
noGraphemeContent=['lb', 'pc', 'gap', 'note', 'expan', 'choice'] # <expan> has alphabemes, not graphemes, as content
graphemeContent=['am', 'hi']
# Check the descendants of <w> (elements and textNodes)
elementList = xmldoc.getElementsByTagName('w')
for w in elementList:
g = '' # This is a string including all graphemes in the <w> element
for c in w.childNodes:
if c.nodeType != c.ELEMENT_NODE: # With this we harvest all text nodes directly children of <w>
g = g + c.nodeValue
for x in w.getElementsByTagName('*'):
if x.tagName not in allowedElem:
print('<' + x.tagName + '> is not allowed as a descendant of <w>')
elif x.tagName in graphemeContent: # These elements only have one textNode child, with graphemes
g = g + x.firstChild.nodeValue
elif x.tagName == 'abbr': # Its children can be <am> or <hi> (already taken care of), or textNode
for y in x.childNodes:
if y.nodeType != y.ELEMENT_NODE: # textNode child
g = g + y.nodeValue
else: # element child: the only case as of 2017-03-16 is a <choice> child, so
# no need to worry about this, because its children <abbr>, <expan>
# and <am> are already taken care of
pass
elif x.tagName == 'add': # Its children can be <w> or textNode
for y in x.childNodes:
if y.nodeType != y.ELEMENT_NODE: # textNode child
g = g + y.nodeValue
else: # element child: the only case as of 2017-03-16 is a <choice> child, so
# no need to worry about this, because its children <abbr>, <expan>
# and <am> are already taken care of
pass
for gx in g: # For each character in the graphematic content of <w>
if (gx not in gl) and (gx not in ['\n', '\t']): # If it's not in the GToS (and it's not a tab or newline)
print('Alas! Character "'+gx+'" is not in the Graphemic Table of Signs')
##################
# Call functions #
##################
# List children of <w>
# for x in listChildrenOfAnElement('w'): print(x, end=', ')
# print()
# List descendants of <w>
#graphemeLint()
#listDescendantsOfElement('choice')
searchAttrib('note')
| gpl-2.0 | 6,549,222,712,097,434,000 | 38.420513 | 120 | 0.552751 | false |
apdavison/python-neo | neo/test/iotest/test_neuralynxio.py | 1 | 18949 | """
Tests of neo.io.neuralynxio.py
"""
import time
import warnings
import unittest
import numpy as np
import quantities as pq
from neo.test.iotest.common_io_test import BaseTestIO
from neo.core import *
from neo.io.neuralynxio import NeuralynxIO
from neo import AnalogSignal
class CommonNeuralynxIOTest(BaseTestIO, unittest.TestCase, ):
ioclass = NeuralynxIO
files_to_test = [
'BML/original_data',
'BML_unfilledsplit/original_data',
'Cheetah_v1.1.0/original_data',
'Cheetah_v4.0.2/original_data',
'Cheetah_v5.5.1/original_data',
'Cheetah_v5.6.3/original_data',
'Cheetah_v5.7.4/original_data',
# 'Cheetah_v6.3.2/incomplete_blocks',
'Pegasus_v2.1.1']
files_to_download = [
'BML/original_data/CSC1_trunc.Ncs',
'BML/plain_data/CSC1_trunc.txt',
'BML/README.txt',
'BML_unfilledsplit/original_data/unfilledSplitRecords.Ncs',
'BML_unfilledsplit/plain_data/unfilledSplitRecords.txt',
'BML_unfilledsplit/README.txt',
'Cheetah_v1.1.0/original_data/CSC67_trunc.Ncs',
'Cheetah_v1.1.0/README.txt',
'Cheetah_v1.1.0/plain_data/CSC67_trunc.txt',
'Cheetah_v4.0.2/original_data/CSC14_trunc.Ncs',
'Cheetah_v4.0.2/plain_data/CSC14_trunc.txt',
'Cheetah_v4.0.2/README.txt',
'Cheetah_v5.5.1/original_data/CheetahLogFile.txt',
'Cheetah_v5.5.1/original_data/CheetahLostADRecords.txt',
'Cheetah_v5.5.1/original_data/Events.nev',
'Cheetah_v5.5.1/original_data/STet3a.nse',
'Cheetah_v5.5.1/original_data/STet3b.nse',
'Cheetah_v5.5.1/original_data/Tet3a.ncs',
'Cheetah_v5.5.1/original_data/Tet3b.ncs',
'Cheetah_v5.5.1/plain_data/STet3a.txt',
'Cheetah_v5.5.1/plain_data/STet3b.txt',
'Cheetah_v5.5.1/plain_data/Tet3a.txt',
'Cheetah_v5.5.1/plain_data/Tet3b.txt',
'Cheetah_v5.5.1/plain_data/Events.txt',
'Cheetah_v5.5.1/README.txt',
'Cheetah_v5.6.3/original_data/CheetahLogFile.txt',
'Cheetah_v5.6.3/original_data/CheetahLostADRecords.txt',
'Cheetah_v5.6.3/original_data/Events.nev',
'Cheetah_v5.6.3/original_data/CSC1.ncs',
'Cheetah_v5.6.3/original_data/CSC2.ncs',
'Cheetah_v5.6.3/original_data/TT1.ntt',
'Cheetah_v5.6.3/original_data/TT2.ntt',
'Cheetah_v5.6.3/original_data/VT1.nvt',
'Cheetah_v5.6.3/plain_data/Events.txt',
'Cheetah_v5.6.3/plain_data/CSC1.txt',
'Cheetah_v5.6.3/plain_data/CSC2.txt',
'Cheetah_v5.6.3/plain_data/TT1.txt',
'Cheetah_v5.6.3/plain_data/TT2.txt',
'Cheetah_v5.6.3/original_data/VT1.nvt',
'Cheetah_v5.7.4/original_data/CSC1.ncs',
'Cheetah_v5.7.4/original_data/CSC2.ncs',
'Cheetah_v5.7.4/original_data/CSC3.ncs',
'Cheetah_v5.7.4/original_data/CSC4.ncs',
'Cheetah_v5.7.4/original_data/CSC5.ncs',
'Cheetah_v5.7.4/original_data/Events.nev',
'Cheetah_v5.7.4/plain_data/CSC1.txt',
'Cheetah_v5.7.4/plain_data/CSC2.txt',
'Cheetah_v5.7.4/plain_data/CSC3.txt',
'Cheetah_v5.7.4/plain_data/CSC4.txt',
'Cheetah_v5.7.4/plain_data/CSC5.txt',
'Cheetah_v5.7.4/plain_data/Events.txt',
'Cheetah_v5.7.4/README.txt',
'Pegasus_v2.1.1/Events_0008.nev',
'Cheetah_v6.3.2/incomplete_blocks/CSC1_reduced.ncs',
'Cheetah_v6.3.2/incomplete_blocks/Events.nev',
'Cheetah_v6.3.2/incomplete_blocks/README.txt']
class TestCheetah_v551(CommonNeuralynxIOTest, unittest.TestCase):
cheetah_version = '5.5.1'
files_to_test = []
def test_read_block(self):
"""Read data in a certain time range into one block"""
dirname = self.get_filename_path('Cheetah_v5.5.1/original_data')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
block = nio.read_block()
# Everything put in one segment
self.assertEqual(len(block.segments), 2)
seg = block.segments[0]
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 2)
self.assertEqual(seg.analogsignals[0].sampling_rate, 32. * pq.kHz)
self.assertEqual(len(seg.spiketrains), 2)
# Testing different parameter combinations
block = nio.read_block(load_waveforms=True)
self.assertEqual(len(block.segments[0].analogsignals), 1)
self.assertEqual(len(block.segments[0].spiketrains), 2)
self.assertEqual(block.segments[0].spiketrains[0].waveforms.shape[0],
block.segments[0].spiketrains[0].shape[0])
self.assertGreater(len(block.segments[0].events), 0)
# self.assertEqual(len(block.channel_indexes[-1].units[0].spiketrains), 2) # 2 segment
# block = nio.read_block(load_waveforms=True, units_group_mode='all-in-one')
# self.assertEqual(len(block.channel_indexes[-1].units), 2) # 2 units
# block = nio.read_block(load_waveforms=True, units_group_mode='split-all')
# self.assertEqual(len(block.channel_indexes[-1].units), 1) # 1 units by ChannelIndex
def test_read_segment(self):
dirname = self.get_filename_path('Cheetah_v5.5.1/original_data')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
# read first segment entirely
seg = nio.read_segment(seg_index=0, time_slice=None)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 2)
self.assertEqual(seg.analogsignals[0].sampling_rate, 32 * pq.kHz)
self.assertEqual(len(seg.spiketrains), 2)
# Testing different parameter combinations
seg = nio.read_segment(seg_index=0, load_waveforms=True)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.spiketrains), 2)
self.assertTrue(len(seg.spiketrains[0].waveforms) > 0)
self.assertTrue(len(seg.events) > 0)
class TestCheetah_v563(CommonNeuralynxIOTest, unittest.TestCase):
cheetah_version = '5.6.3'
files_to_test = []
def test_read_block(self):
"""Read data in a certain time range into one block"""
dirname = self.get_filename_path('Cheetah_v5.6.3/original_data')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
block = nio.read_block()
# There are two segments due to gap in recording
self.assertEqual(len(block.segments), 2)
for seg in block.segments:
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 2)
self.assertEqual(seg.analogsignals[0].sampling_rate, 2. * pq.kHz)
self.assertEqual(len(seg.spiketrains), 8)
# Testing different parameter combinations
block = nio.read_block(load_waveforms=True)
self.assertEqual(len(block.segments[0].analogsignals), 1)
self.assertEqual(len(block.segments[0].spiketrains), 8)
self.assertEqual(block.segments[0].spiketrains[0].waveforms.shape[0],
block.segments[0].spiketrains[0].shape[0])
# this is tetrode data, containing 32 samples per waveform
self.assertEqual(block.segments[0].spiketrains[0].waveforms.shape[1], 4)
self.assertEqual(block.segments[0].spiketrains[0].waveforms.shape[-1], 32)
self.assertGreater(len(block.segments[0].events), 0)
# self.assertEqual(len(block.channel_indexes[-1].units[0].spiketrains), 2)
# block = nio.read_block(load_waveforms=True, units_group_mode='all-in-one')
# self.assertEqual(len(block.channel_indexes[-1].units), 8)
# block = nio.read_block(load_waveforms=True, units_group_mode='split-all')
# self.assertEqual(len(block.channel_indexes[-1].units), 1) # 1 units by ChannelIndex
def test_read_segment(self):
dirname = self.get_filename_path('Cheetah_v5.5.1/original_data')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
# read first segment entirely
seg = nio.read_segment(seg_index=0, time_slice=None)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 2)
self.assertEqual(seg.analogsignals[0].sampling_rate, 32 * pq.kHz)
self.assertEqual(len(seg.spiketrains), 2)
# Testing different parameter combinations
seg = nio.read_segment(seg_index=0, load_waveforms=True)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.spiketrains), 2)
self.assertTrue(len(seg.spiketrains[0].waveforms) > 0)
self.assertTrue(len(seg.events) > 0)
class TestCheetah_v574(CommonNeuralynxIOTest, unittest.TestCase):
cheetah_version = '5.7.4'
files_to_test = []
def test_read_block(self):
dirname = self.get_filename_path('Cheetah_v5.7.4/original_data')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
block = nio.read_block()
# Everything put in one segment
seg = block.segments[0]
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(seg.analogsignals[0].shape[-1], 5)
self.assertEqual(seg.analogsignals[0].sampling_rate, 32 * pq.kHz)
self.assertEqual(len(seg.spiketrains), 0) # no nse files available
# Testing different parameter combinations
block = nio.read_block(load_waveforms=True)
self.assertEqual(len(block.segments[0].analogsignals), 1)
self.assertEqual(len(block.segments[0].spiketrains), 0)
self.assertGreater(len(block.segments[0].events), 0)
block = nio.read_block(signal_group_mode='split-all')
self.assertEqual(len(block.groups), 5)
block = nio.read_block(signal_group_mode='group-by-same-units')
self.assertEqual(len(block.groups), 1)
class TestPegasus_v211(CommonNeuralynxIOTest, unittest.TestCase):
pegasus_version = '2.1.1'
files_to_test = []
def test_read_block(self):
dirname = self.get_filename_path('Pegasus_v2.1.1')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
block = nio.read_block()
# Everything put in one segment
seg = block.segments[0]
self.assertEqual(len(seg.analogsignals), 0) # no ncs file available
self.assertGreater(len(block.segments[0].events), 1) # single nev file available
self.assertEqual(len(seg.spiketrains), 0) # no nse files available
# Testing different parameter combinations
block = nio.read_block(load_waveforms=True)
self.assertEqual(len(block.segments[0].spiketrains), 0)
self.assertGreater(len(block.segments[0].events), 1)
block = nio.read_block(signal_group_mode='split-all')
self.assertEqual(len(block.channel_indexes), 0)
block = nio.read_block(signal_group_mode='group-by-same-units')
self.assertEqual(len(block.channel_indexes), 0)
class TestData(CommonNeuralynxIOTest, unittest.TestCase):
def _load_plaindata(self, filename, numSamps):
"""
Load numSamps samples only from Ncs dump files which contain one row for each record,
each row containing the timestamp, channel number, whole integer sampling frequency,
number of samples, followed by that number of samples (which may be different for
each record).
"""
res = []
totRes = 0
with open(filename) as f:
for line in f:
vals = list(map(int, line.split()))
numSampsThisLine = len(vals) - 4
if numSampsThisLine < 0 or numSampsThisLine < vals[3]:
raise IOError('plain data file "' + filename + ' improperly formatted')
numAvail = min(numSampsThisLine, vals[3]) # only use valid samples
if numAvail < numSamps - len(res):
res.append(vals[4:(4 + numAvail)])
else:
res.append(vals[4:(4 + numSamps - len(res))])
totRes += len(res[-1])
if totRes == numSamps:
break
return [item for sublist in res for item in sublist]
# def test_ncs(self):
# for session in self.files_to_test:
# dirname = self.get_filename_path(session)
# nio = NeuralynxIO(dirname=dirname, use_cache=False)
# block = nio.read_block()
# check that data agrees in first segment only
# for anasig_id, anasig in enumerate(block.segments[0].analogsignals):
# chid = anasig.channel_index.channel_ids[anasig_id]
#
# # need to decode, unless keyerror
# chname = anasig.channel_index.channel_names[anasig_id]
# chuid = (chname, chid)
# filename = nio.ncs_filenames[chuid][:-3] + 'txt'
# filename = filename.replace('original_data', 'plain_data')
# overlap = 512 * 500
# plain_data = self._load_plaindata(filename, overlap)
# gain_factor_0 = plain_data[0] / anasig.magnitude[0, 0]
# numToTest = min(len(plain_data), len(anasig.magnitude[:, 0]))
# np.testing.assert_allclose(plain_data[:numToTest],
# anasig.magnitude[:numToTest, 0] * gain_factor_0,
# rtol=0.01, err_msg=" for file " + filename)
@unittest.skip
def test_keep_original_spike_times(self):
for session in self.files_to_test:
dirname = self.get_filename_path(session)
nio = NeuralynxIO(dirname=dirname, keep_original_times=True)
block = nio.read_block()
for st in block.segments[0].spiketrains:
filename = st.file_origin.replace('original_data', 'plain_data')
if '.nse' in st.file_origin:
filename = filename.replace('.nse', '.txt')
times_column = 0
plain_data = np.loadtxt(filename)[:, times_column]
elif '.ntt' in st.file_origin:
filename = filename.replace('.ntt', '.txt')
times_column = 2
plain_data = np.loadtxt(filename)[:, times_column]
# ntt files contain 4 rows per spike time
plain_data = plain_data[::4]
times = st.rescale(pq.microsecond).magnitude
overlap = min(len(plain_data), len(times))
np.testing.assert_allclose(plain_data[:overlap], times[:overlap], rtol=1e-10)
class TestIncompleteBlocks(CommonNeuralynxIOTest, unittest.TestCase):
def test_incomplete_block_handling_v632(self):
dirname = self.get_filename_path('Cheetah_v6.3.2/incomplete_blocks')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
block = nio.read_block()
# known gap values
n_gaps = 2
# so 3 segments, 3 anasigs by Channelindex
self.assertEqual(len(block.segments), n_gaps + 1)
# self.assertEqual(len(block.channel_indexes[0].analogsignals), n_gaps + 1)
for t, gt in zip(nio._sigs_t_start, [8408.806811, 8427.832053, 8487.768561]):
self.assertEqual(np.round(t, 4), np.round(gt, 4))
for t, gt in zip(nio._sigs_t_stop, [8427.831990, 8487.768498, 8515.816549]):
self.assertEqual(np.round(t, 4), np.round(gt, 4))
class TestGaps(CommonNeuralynxIOTest, unittest.TestCase):
def test_gap_handling_v551(self):
dirname = self.get_filename_path('Cheetah_v5.5.1/original_data')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
block = nio.read_block()
# known gap values
n_gaps = 1
# so 2 segments, 2 anasigs by Channelindex, 2 SpikeTrain by Units
self.assertEqual(len(block.segments), n_gaps + 1)
# self.assertEqual(len(block.channel_indexes[0].analogsignals), n_gaps + 1)
# self.assertEqual(len(block.channel_indexes[-1].units[0].spiketrains), n_gaps + 1)
def test_gap_handling_v563(self):
dirname = self.get_filename_path('Cheetah_v5.6.3/original_data')
nio = NeuralynxIO(dirname=dirname, use_cache=False)
block = nio.read_block()
# known gap values
n_gaps = 1
# so 2 segments, 2 anasigs by Channelindex, 2 SpikeTrain by Units
self.assertEqual(len(block.segments), n_gaps + 1)
# self.assertEqual(len(block.channel_indexes[0].analogsignals), n_gaps + 1)
# self.assertEqual(len(block.channel_indexes[-1].units[0].spiketrains), n_gaps + 1)
def compare_neo_content(bl1, bl2):
print('*' * 5, 'Comparison of blocks', '*' * 5)
object_types_to_test = [Segment, ChannelIndex, Unit, AnalogSignal,
SpikeTrain, Event, Epoch]
for objtype in object_types_to_test:
print('Testing {}'.format(objtype))
children1 = bl1.list_children_by_class(objtype)
children2 = bl2.list_children_by_class(objtype)
if len(children1) != len(children2):
warnings.warn('Number of {} is different in both blocks ({} != {}).'
' Skipping comparison'
''.format(objtype, len(children1), len(children2)))
continue
for child1, child2 in zip(children1, children2):
compare_annotations(child1.annotations, child2.annotations)
compare_attributes(child1, child2)
def compare_annotations(anno1, anno2):
if len(anno1) != len(anno2):
warnings.warn('Different numbers of annotations! {} != {}\nSkipping further comparison of '
'this annotation list.'.format(anno1.keys(), anno2.keys()))
return
assert anno1.keys() == anno2.keys()
for key in anno1.keys():
anno1[key] = anno2[key]
def compare_attributes(child1, child2):
assert child1._all_attrs == child2._all_attrs
for attr_id in range(len(child1._all_attrs)):
attr_name = child1._all_attrs[attr_id][0]
attr_dtype = child1._all_attrs[attr_id][1]
if type(child1) == AnalogSignal and attr_name == 'signal':
continue
if type(child1) == SpikeTrain and attr_name == 'times':
continue
unequal = child1.__getattribute__(attr_name) != child2.__getattribute__(attr_name)
if hasattr(unequal, 'any'):
unequal = unequal.any()
if unequal:
warnings.warn('Attributes differ! {}.{}={} is not equal to {}.{}={}'
''.format(child1.__class__.__name__, attr_name,
child1.__getattribute__(attr_name),
child2.__class__.__name__, attr_name,
child2.__getattribute__(attr_name)))
if __name__ == '__main__':
unittest.main()
# ~ compare_old_and_new_neuralynxio()
| bsd-3-clause | 6,517,273,555,125,768,000 | 42.461009 | 99 | 0.616075 | false |
cupy/cupy | tests/cupy_tests/random_tests/test_generator.py | 1 | 39103 | import functools
import os
import threading
import unittest
import numpy
import pytest
import cupy
from cupy import cuda
from cupy.cuda import runtime
from cupy.random import _generator
from cupy import testing
from cupy.testing import _attr
from cupy.testing import _condition
from cupy.testing import _hypothesis
from cupy_tests.random_tests import common_distributions
def numpy_cupy_equal_continuous_distribution(significance_level, name='xp'):
"""Decorator that tests the distributions of NumPy samples and CuPy ones.
Args:
significance_level (float): The test fails if p-value is lower than
this argument.
name(str): Argument name whose value is either
``numpy`` or ``cupy`` module.
Decorated test fixture is required to return samples from the same
distribution even if ``xp`` is ``numpy`` or ``cupy``.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
kw[name] = cupy
cupy_result = impl(self, *args, **kw)
kw[name] = numpy
numpy_result = impl(self, *args, **kw)
assert cupy_result is not None
assert numpy_result is not None
d_plus, d_minus, p_value = \
common_distributions.two_sample_Kolmogorov_Smirnov_test(
cupy.asnumpy(cupy_result), numpy_result)
if p_value < significance_level:
message = '''Rejected null hypothesis:
p: %f
D+ (cupy < numpy): %f
D- (cupy > numpy): %f''' % (p_value, d_plus, d_minus)
raise AssertionError(message)
return test_func
return decorator
def _get_size(size):
# CuPy returns an ndarray of shape () even if size=None.
# cf. NumPy returns a Python scalar if size=None.
if size is None:
return ()
return cupy._core.get_size(size)
class RandomGeneratorTestCase(common_distributions.BaseGeneratorTestCase):
target_method = None
def get_rng(self, xp, seed):
return xp.random.RandomState(seed=seed)
def set_rng_seed(self, seed):
self.rng.seed(seed)
def _xp_random(xp, method_name):
method = getattr(xp.random.RandomState(), method_name)
if xp == cupy:
return method
def f(*args, **kwargs):
dtype = kwargs.pop('dtype', None)
ret = method(*args, **kwargs)
if dtype is not None:
ret = ret.astype(dtype, copy=False)
return ret
return f
@testing.fix_random()
@testing.gpu
class TestRandomState(unittest.TestCase):
def setUp(self):
self.rs = _generator.RandomState(seed=testing.generate_seed())
def check_seed(self, seed):
rs = self.rs
rs.seed(seed)
xs1 = [rs.uniform() for _ in range(100)]
rs.seed(seed)
xs2 = [rs.uniform() for _ in range(100)]
rs.seed(seed)
rs.seed(None)
xs3 = [rs.uniform() for _ in range(100)]
# Random state must be reproducible
assert xs1 == xs2
# Random state must be initialized randomly with seed=None
assert xs1 != xs3
@testing.for_int_dtypes()
def test_seed_not_none(self, dtype):
self.check_seed(dtype(0))
@testing.for_dtypes([numpy.complex_])
def test_seed_invalid_type_complex(self, dtype):
with self.assertRaises(TypeError):
self.rs.seed(dtype(0))
@testing.for_float_dtypes()
def test_seed_invalid_type_float(self, dtype):
with self.assertRaises(TypeError):
self.rs.seed(dtype(0))
def test_array_seed(self):
self.check_seed(numpy.random.randint(0, 2**31, size=40))
def test_methods(self):
methods = [
cuda.curand.CURAND_RNG_PSEUDO_DEFAULT,
cuda.curand.CURAND_RNG_PSEUDO_MRG32K3A,
cupy.cuda.curand.CURAND_RNG_PSEUDO_MT19937,
cupy.cuda.curand.CURAND_RNG_PSEUDO_PHILOX4_32_10,
cupy.cuda.curand.CURAND_RNG_PSEUDO_MTGP32,
cupy.cuda.curand.CURAND_RNG_PSEUDO_XORWOW
]
for method in methods:
if (runtime.is_hip and
method == cupy.cuda.curand.CURAND_RNG_PSEUDO_MT19937):
# hipRAND fails for MT19937 with the status code 1000,
# HIPRAND_STATUS_NOT_IMPLEMENTED. We use `pytest.raises` here
# so that we will be able to find it once hipRAND implement
# MT19937 as the imperative `pytest.xfail` immediately rewinds
# the control flow and does not run the test.
with pytest.raises(KeyError) as e:
rs = cupy.random.RandomState(method=method)
assert e.value.args == (1000,)
continue
rs = cupy.random.RandomState(method=method)
rs.normal()
@testing.parameterize(*common_distributions.beta_params)
@testing.with_requires('numpy>=1.17.0')
@testing.gpu
@testing.fix_random()
class TestBeta(
common_distributions.Beta,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'n': 5, 'p': 0.5},
{'n': 5, 'p': 0.0},
{'n': 5, 'p': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestBinomial(RandomGeneratorTestCase):
# TODO(niboshi):
# Test soundness of distribution.
# Currently only reprocibility is checked.
target_method = 'binomial'
def test_binomial(self):
self.generate(n=self.n, p=self.p, size=(3, 2))
@testing.parameterize(
{'df': 1.0},
{'df': 3.0},
{'df': 10.0},
)
@testing.gpu
@testing.fix_random()
class TestChisquare(RandomGeneratorTestCase):
target_method = 'chisquare'
def test_chisquare(self):
self.generate(df=self.df, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_chisquare_ks(self, dtype):
self.check_ks(0.05)(
df=self.df, size=2000, dtype=dtype)
@testing.gpu
@testing.parameterize(
{'alpha': cupy.array([1.0, 1.0, 1.0])},
{'alpha': cupy.array([1.0, 3.0, 5.0])},
)
@testing.fix_random()
class TestDirichlet(RandomGeneratorTestCase):
target_method = 'dirichlet'
def test_dirichlet(self):
self.generate(alpha=self.alpha, size=(3, 2, 3))
def test_dirichlet_int_shape(self):
self.generate(alpha=self.alpha, size=5)
# TODO(kataoka): add distribution test
@testing.parameterize(*common_distributions.exponential_params)
@testing.gpu
@testing.fix_random()
class TestExponential(
common_distributions.Exponential,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'dfnum': 1.0, 'dfden': 3.0},
{'dfnum': 3.0, 'dfden': 3.0},
{'dfnum': 3.0, 'dfden': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestF(RandomGeneratorTestCase):
target_method = 'f'
def test_f(self):
self.generate(dfnum=self.dfnum, dfden=self.dfden, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_f_ks(self, dtype):
self.check_ks(0.05)(
self.dfnum, self.dfden, size=2000, dtype=dtype)
@testing.parameterize(*common_distributions.gamma_params)
@testing.gpu
@testing.fix_random()
class TestGamma(
common_distributions.Gamma,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'p': 0.5},
{'p': 0.1},
{'p': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestGeometric(RandomGeneratorTestCase):
target_method = 'geometric'
def test_geometric(self):
self.generate(p=self.p, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_geometric_ks(self, dtype):
self.check_ks(0.05)(
p=self.p, size=2000, dtype=dtype)
@testing.parameterize(
{'ngood': 1, 'nbad': 1, 'nsample': 1},
{'ngood': 1, 'nbad': 1, 'nsample': 2},
)
@testing.gpu
@testing.fix_random()
class TestHypergeometric(RandomGeneratorTestCase):
target_method = 'hypergeometric'
def test_hypergeometric(self):
self.generate(ngood=self.ngood, nbad=self.nbad, nsample=self.nsample,
size=(3, 2))
# TODO(kataoka): add distribution test
@testing.gpu
@testing.fix_random()
class TestLaplace(RandomGeneratorTestCase):
target_method = 'laplace'
def test_laplace_1(self):
self.generate()
def test_laplace_2(self):
self.generate(0.0, 1.0, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_laplace_ks_1(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_laplace_ks_2(self, dtype):
self.check_ks(0.05)(
2.3, 4.5, size=2000, dtype=dtype)
@testing.gpu
@testing.fix_random()
class TestLogistic(RandomGeneratorTestCase):
target_method = 'logistic'
def test_logistic_1(self):
self.generate()
def test_logistic_2(self):
self.generate(0.0, 1.0, size=(3, 2))
@_attr.slow
@_condition.repeat(10)
def test_standard_logistic_isfinite(self):
x = self.generate(size=10**7)
assert cupy.isfinite(x).all()
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_logistic_ks_1(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_logistic_ks_2(self, dtype):
self.check_ks(0.05)(
2.3, 4.5, size=2000, dtype=dtype)
@testing.gpu
@testing.parameterize(*[
{'args': (0.0, 1.0), 'size': None},
{'args': (10.0, 20.0), 'size': None},
{'args': (0.0, 1.0), 'size': 10},
{'args': (0.0, 1.0), 'size': (1, 2, 3)},
{'args': (0.0, 1.0), 'size': 3},
{'args': (0.0, 1.0), 'size': (3, 3)},
{'args': (0.0, 1.0), 'size': ()},
])
@testing.fix_random()
class TestLogNormal(RandomGeneratorTestCase):
target_method = 'lognormal'
def check_lognormal(self, dtype):
vals = self.generate_many(
self.args[0], self.args[1], self.size, dtype, _count=10)
shape = _get_size(self.size)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype == dtype
assert val.shape == shape
assert (0 <= val).all()
def test_lognormal_float(self):
self.check_lognormal(float)
def test_lognormal_float32(self):
self.check_lognormal(numpy.float32)
def test_lognormal_float64(self):
self.check_lognormal(numpy.float64)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_lognormal_ks(self, dtype):
self.check_ks(0.05)(
*self.args, size=self.size, dtype=dtype)
@testing.parameterize(
{'p': 0.5},
{'p': 0.1},
{'p': 0.9},
)
@testing.gpu
@testing.fix_random()
class TestLogseries(RandomGeneratorTestCase):
target_method = 'logseries'
def test_logseries(self):
self.generate(p=self.p, size=(3, 2))
# TODO(kataoka): add distribution test
@testing.gpu
@testing.parameterize(*[
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': None, 'tol': 1e-6},
{'args': ([10., 10.], [[20., 10.], [10., 20.]]),
'size': None, 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': 10, 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': (1, 2, 3), 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': 3, 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': (3, 3), 'tol': 1e-6},
{'args': ([0., 0.], [[1., 0.], [0., 1.]]), 'size': (), 'tol': 1e-6},
])
@testing.fix_random()
class TestMultivariateNormal(RandomGeneratorTestCase):
target_method = 'multivariate_normal'
def check_multivariate_normal(self, dtype):
vals = self.generate_many(
mean=self.args[0], cov=self.args[1], size=self.size, tol=self.tol,
dtype=dtype, _count=10)
shape = _get_size(self.size)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype == dtype
assert val.shape == shape + (2,)
def test_multivariate_normal_float32(self):
self.check_multivariate_normal(numpy.float32)
def test_multivariate_normal_float64(self):
self.check_multivariate_normal(numpy.float64)
# TODO(kataoka): add distribution test
@testing.parameterize(
{'n': 5, 'p': 0.5},
)
@testing.gpu
@testing.fix_random()
class TestNegativeBinomial(RandomGeneratorTestCase):
target_method = 'negative_binomial'
def test_negative_binomial(self):
self.generate(n=self.n, p=self.p, size=(3, 2))
# TODO(kataoka): add distribution test
@testing.parameterize(
{'df': 1.5, 'nonc': 2.0},
{'df': 2.0, 'nonc': 0.0},
)
@testing.gpu
@testing.fix_random()
class TestNoncentralChisquare(RandomGeneratorTestCase):
target_method = 'noncentral_chisquare'
def test_noncentral_chisquare(self):
self.generate(df=self.df, nonc=self.nonc, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_noncentral_chisquare_ks(self, dtype):
self.check_ks(0.05)(
self.df, self.nonc, size=2000, dtype=dtype)
@testing.parameterize(
{'dfnum': 2.0, 'dfden': 3.0, 'nonc': 4.0},
{'dfnum': 2.5, 'dfden': 1.5, 'nonc': 0.0},
)
@testing.gpu
@testing.fix_random()
class TestNoncentralF(RandomGeneratorTestCase):
target_method = 'noncentral_f'
def test_noncentral_f(self):
self.generate(
dfnum=self.dfnum, dfden=self.dfden, nonc=self.nonc, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_noncentral_f_ks(self, dtype):
self.check_ks(0.05)(
self.dfnum, self.dfden, self.nonc, size=2000, dtype=dtype)
@testing.gpu
@testing.parameterize(*[
{'args': (0.0, 1.0), 'size': None},
{'args': (10.0, 20.0), 'size': None},
{'args': (0.0, 1.0), 'size': 10},
{'args': (0.0, 1.0), 'size': (1, 2, 3)},
{'args': (0.0, 1.0), 'size': 3},
{'args': (0.0, 1.0), 'size': (3, 3)},
{'args': (0.0, 1.0), 'size': ()},
])
@testing.fix_random()
class TestNormal(RandomGeneratorTestCase):
target_method = 'normal'
def check_normal(self, dtype):
vals = self.generate_many(
self.args[0], self.args[1], self.size, dtype, _count=10)
shape = _get_size(self.size)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype == dtype
assert val.shape == shape
def test_normal_float32(self):
self.check_normal(numpy.float32)
def test_normal_float64(self):
self.check_normal(numpy.float64)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_normal_ks(self, dtype):
self.check_ks(0.05)(
*self.args, size=self.size, dtype=dtype)
@testing.parameterize(
{'a': 1.0},
{'a': 3.0},
{'a': 10.0},
)
@testing.gpu
@testing.fix_random()
class TestPareto(RandomGeneratorTestCase):
target_method = 'pareto'
def test_pareto(self):
self.generate(a=self.a, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_pareto_ks(self, dtype):
self.check_ks(0.05)(
a=self.a, size=2000, dtype=dtype)
@testing.parameterize(*common_distributions.poisson_params)
@testing.gpu
@testing.fix_random()
class TestPoisson(
common_distributions.Poisson,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'df': 1.0},
{'df': 3.0},
{'df': 10.0},
)
@testing.gpu
@testing.fix_random()
class TestStandardT(RandomGeneratorTestCase):
target_method = 'standard_t'
def test_standard_t(self):
self.generate(df=self.df, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_standard_t_ks(self, dtype):
self.check_ks(0.05)(
df=self.df, size=2000, dtype=dtype)
@testing.gpu
@testing.parameterize(*[
{'size': None},
{'size': 10},
{'size': (1, 2, 3)},
{'size': 3},
{'size': ()},
])
@testing.fix_random()
class TestRandomSample(unittest.TestCase):
def setUp(self):
self.rs = _generator.RandomState(seed=testing.generate_seed())
def check_random_sample(self, dtype):
vals = [self.rs.random_sample(self.size, dtype) for _ in range(10)]
shape = _get_size(self.size)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype == dtype
assert val.shape == shape
assert (0 <= val).all()
assert (val < 1).all()
def test_random_sample_float32(self):
self.check_random_sample(numpy.float32)
def test_random_sample_float64(self):
self.check_random_sample(numpy.float64)
@testing.fix_random()
class TestRandomSampleDistrib(unittest.TestCase):
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
@numpy_cupy_equal_continuous_distribution(0.05)
def test_random_sample_ks(self, xp, dtype):
return _xp_random(xp, 'random_sample')(size=2000, dtype=dtype)
@testing.fix_random()
@testing.gpu
class TestRandAndRandN(unittest.TestCase):
def setUp(self):
self.rs = _generator.RandomState(seed=testing.generate_seed())
def test_rand_invalid_argument(self):
with self.assertRaises(TypeError):
self.rs.rand(1, 2, 3, unnecessary='unnecessary_argument')
def test_randn_invalid_argument(self):
with self.assertRaises(TypeError):
self.rs.randn(1, 2, 3, unnecessary='unnecessary_argument')
@testing.parameterize(
{'a': 0.5},
)
@testing.gpu
@testing.fix_random()
class TestPower(RandomGeneratorTestCase):
target_method = 'power'
def test_power(self):
self.generate(a=self.a, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_power_ks(self, dtype):
self.check_ks(0.05)(
a=self.a, size=2000, dtype=dtype)
@testing.parameterize(
{'scale': 1.0},
{'scale': 3.0},
)
@testing.gpu
@testing.fix_random()
class TestRayleigh(RandomGeneratorTestCase):
target_method = 'rayleigh'
def test_rayleigh(self):
self.generate(scale=self.scale, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_rayleigh_ks(self, dtype):
self.check_ks(0.05)(
scale=self.scale, size=2000, dtype=dtype)
@testing.gpu
@testing.fix_random()
class TestStandardCauchy(RandomGeneratorTestCase):
target_method = 'standard_cauchy'
def test_standard_cauchy(self):
self.generate(size=(3, 2))
@_attr.slow
@_condition.repeat(10)
def test_standard_cauchy_isfinite(self):
x = self.generate(size=10**7)
assert cupy.isfinite(x).all()
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_standard_cauchy_ks(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.parameterize(*common_distributions.standard_gamma_params)
@testing.gpu
@testing.fix_random()
class TestStandardGamma(
common_distributions.StandardGamma,
RandomGeneratorTestCase
):
pass
@testing.fix_random()
@testing.gpu
class TestInterval(RandomGeneratorTestCase):
target_method = '_interval'
def test_zero(self):
shape = (2, 3)
vals = self.generate_many(0, shape, _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == shape
assert (val == 0).all()
def test_shape_zero(self):
mx = 10
vals = self.generate_many(mx, None, _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == ()
assert (0 <= val).all()
assert (val <= mx).all()
# TODO(niboshi): Distribution test
def test_shape_one_dim(self):
mx = 10
size = 20
vals = self.generate_many(mx, size, _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == (size,)
assert (0 <= val).all()
assert (val <= mx).all()
# TODO(niboshi): Distribution test
def test_shape_multi_dim(self):
mx = 10
shape = (1, 2)
vals = self.generate_many(mx, shape, _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == shape
assert (0 <= val).all()
assert (val <= mx).all()
# TODO(niboshi): Distribution test
def test_bound_1(self):
vals = self.generate_many(10, (2, 3), _count=10)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == (2, 3)
assert (0 <= val).all()
assert (val <= 10).all()
def test_bound_2(self):
vals = self.generate_many(2, None, _count=20)
for val in vals:
assert isinstance(val, cupy.ndarray)
assert val.dtype.kind in 'iu'
assert val.shape == ()
assert (0 <= val).all()
assert (val <= 2).all()
@_condition.repeat(3, 10)
def test_goodness_of_fit(self):
mx = 5
trial = 300
vals = self.generate_many(mx, None, _count=trial)
vals = [val.get() for val in vals]
counts = numpy.histogram(vals, bins=numpy.arange(mx + 2))[0]
expected = numpy.array([float(trial) / (mx + 1)] * (mx + 1))
assert _hypothesis.chi_square_test(counts, expected)
@_condition.repeat(3)
def test_goodness_of_fit_2(self):
mx = 5
vals = self.generate(mx, (5, 5)).get()
counts = numpy.histogram(vals, bins=numpy.arange(mx + 2))[0]
expected = numpy.array([float(vals.size) / (mx + 1)] * (mx + 1))
assert _hypothesis.chi_square_test(counts, expected)
@testing.fix_random()
@testing.gpu
class TestTomaxint(RandomGeneratorTestCase):
target_method = 'tomaxint'
def test_tomaxint_none(self):
x = self.generate()
assert x.shape == ()
assert (0 <= x).all()
assert (x <= cupy.iinfo(cupy.int_).max).all()
def test_tomaxint_int(self):
x = self.generate(3)
assert x.shape == (3,)
assert (0 <= x).all()
assert (x <= cupy.iinfo(cupy.int_).max).all()
def test_tomaxint_tuple(self):
x = self.generate((2, 3))
assert x.shape == (2, 3)
assert (0 <= x).all()
assert (x <= cupy.iinfo(cupy.int_).max).all()
@testing.parameterize(
{'a': 3, 'size': 2, 'p': None},
{'a': 3, 'size': 2, 'p': [0.3, 0.3, 0.4]},
{'a': 3, 'size': (5, 5), 'p': [0.3, 0.3, 0.4]},
{'a': 3, 'size': (5, 5), 'p': numpy.array([0.3, 0.3, 0.4])},
{'a': 3, 'size': (), 'p': None},
{'a': numpy.array([0.0, 1.0, 2.0]), 'size': 2, 'p': [0.3, 0.3, 0.4]},
{'a': 0, 'size': 0, 'p': None},
{'a': numpy.array([]), 'size': 0, 'p': None},
)
@testing.fix_random()
@testing.gpu
class TestChoice1(RandomGeneratorTestCase):
target_method = 'choice'
def test_dtype_shape(self):
v = self.generate(a=self.a, size=self.size, p=self.p)
if isinstance(self.size, int):
expected_shape = (self.size,)
else:
expected_shape = self.size
if isinstance(self.a, numpy.ndarray):
expected_dtype = 'float'
else:
expected_dtype = 'int64'
assert v.dtype == expected_dtype
assert v.shape == expected_shape
@_condition.repeat(3, 10)
def test_bound(self):
vals = self.generate_many(
a=self.a, size=self.size, p=self.p, _count=20)
vals = [val.get() for val in vals]
size_ = self.size if isinstance(self.size, tuple) else (self.size,)
if size_ == (0, ):
self.skipTest('no bound check for empty `random.choice`')
for val in vals:
assert val.shape == size_
assert min(val.min() for val in vals) == 0
assert max(val.max() for val in vals) == 2
@testing.parameterize(
{'a': [0, 1, 2], 'size': 2, 'p': [0.3, 0.3, 0.4]},
)
@testing.fix_random()
@testing.gpu
class TestChoice2(RandomGeneratorTestCase):
target_method = 'choice'
def test_dtype_shape(self):
v = self.generate(a=self.a, size=self.size, p=self.p)
if isinstance(self.size, int):
expected_shape = (self.size,)
else:
expected_shape = self.size
if isinstance(self.a, numpy.ndarray):
expected_dtype = 'float'
else:
expected_dtype = 'int'
assert v.dtype == expected_dtype
assert v.shape == expected_shape
@_condition.repeat(3, 10)
def test_bound(self):
vals = self.generate_many(
a=self.a, size=self.size, p=self.p, _count=20)
vals = [val.get() for val in vals]
size_ = self.size if isinstance(self.size, tuple) else (self.size,)
for val in vals:
assert val.shape == size_
assert min(val.min() for val in vals) == 0
assert max(val.max() for val in vals) == 2
@testing.fix_random()
@testing.gpu
class TestChoiceChi(RandomGeneratorTestCase):
target_method = 'choice'
@_condition.repeat(3, 10)
def test_goodness_of_fit(self):
trial = 100
vals = self.generate_many(3, 1, True, [0.3, 0.3, 0.4], _count=trial)
vals = [val.get() for val in vals]
counts = numpy.histogram(vals, bins=numpy.arange(4))[0]
expected = numpy.array([30, 30, 40])
assert _hypothesis.chi_square_test(counts, expected)
@_condition.repeat(3, 10)
@pytest.mark.xfail(runtime.is_hip, reason='ROCm/HIP may have a bug')
def test_goodness_of_fit_2(self):
vals = self.generate(3, (5, 20), True, [0.3, 0.3, 0.4]).get()
counts = numpy.histogram(vals, bins=numpy.arange(4))[0]
expected = numpy.array([30, 30, 40])
assert _hypothesis.chi_square_test(counts, expected)
@testing.fix_random()
@testing.gpu
class TestChoiceMultinomial(unittest.TestCase):
@_condition.repeat(3, 10)
@testing.for_float_dtypes()
@testing.numpy_cupy_allclose(atol=0.02)
def test_choice_multinomial(self, xp, dtype):
p = xp.array([0.5, 0.25, 0.125, 0.125], dtype)
trial = 10000
x = xp.random.choice(len(p), trial, p=p)
y = xp.bincount(x).astype('f') / trial
return y
@testing.parameterize(
{'a': 3.1, 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': None, 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': -3, 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': [[0, 1], [2, 3]], 'size': 1, 'p': [[0.1, 0.2], [0.3, 0.4]]},
{'a': [[0, 1], [2, 3]], 'size': 1, 'p': [0.3, 0.7]},
{'a': [], 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': 4, 'size': 1, 'p': [[0.1, 0.2], [0.3, 0.4]]},
{'a': 2, 'size': 1, 'p': [0.1, 0.1, 0.8]},
{'a': 3, 'size': 1, 'p': [-0.1, 0.3, 0.8]},
{'a': 3, 'size': 1, 'p': [0.1, 0.1, 0.7]},
)
@testing.fix_random()
@testing.gpu
class TestChoiceFailure(unittest.TestCase):
def setUp(self):
self.rs = _generator.RandomState(seed=testing.generate_seed())
def test_choice_invalid_value(self):
with self.assertRaises(ValueError):
self.rs.choice(a=self.a, size=self.size, p=self.p)
@testing.parameterize(
{'a': 5, 'size': 2},
{'a': 5, 'size': (2, 2)},
{'a': 5, 'size': ()},
{'a': numpy.array([0.0, 2.0, 4.0]), 'size': 2},
)
@testing.fix_random()
@testing.gpu
class TestChoiceReplaceFalse(RandomGeneratorTestCase):
target_method = 'choice'
def test_dtype_shape(self):
v = self.generate(a=self.a, size=self.size, replace=False)
if isinstance(self.size, int):
expected_shape = (self.size,)
else:
expected_shape = self.size
if isinstance(self.a, numpy.ndarray):
expected_dtype = 'float'
else:
expected_dtype = 'int'
assert v.dtype == expected_dtype
assert v.shape == expected_shape
@_condition.repeat(3, 10)
def test_bound(self):
val = self.generate(a=self.a, size=self.size, replace=False).get()
size = self.size if isinstance(self.size, tuple) else (self.size,)
assert val.shape == size
assert (0 <= val).all()
assert (val < 5).all()
val = numpy.asarray(val)
assert numpy.unique(val).size == val.size
@testing.gpu
@testing.fix_random()
class TestGumbel(RandomGeneratorTestCase):
target_method = 'gumbel'
def test_gumbel_1(self):
self.generate()
def test_gumbel_2(self):
self.generate(0.0, 1.0, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_gumbel_ks_1(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_gumbel_ks_2(self, dtype):
self.check_ks(0.05)(
2.3, 4.5, size=2000, dtype=dtype)
@testing.gpu
@testing.fix_random()
class TestRandint(RandomGeneratorTestCase):
# TODO(niboshi):
# Test soundness of distribution.
# Currently only reprocibility is checked.
target_method = 'randint'
def test_randint_1(self):
self.generate(3)
def test_randint_2(self):
self.generate(3, 4, size=(3, 2))
def test_randint_empty1(self):
self.generate(3, 10, size=0)
def test_randint_empty2(self):
self.generate(3, size=(4, 0, 5))
def test_randint_overflow(self):
self.generate(numpy.int8(-100), numpy.int8(100))
def test_randint_float1(self):
self.generate(-1.2, 3.4, 5)
def test_randint_float2(self):
self.generate(6.7, size=(2, 3))
def test_randint_int64_1(self):
self.generate(2**34, 2**40, 3, dtype='q')
@testing.gpu
@testing.fix_random()
class TestUniform(RandomGeneratorTestCase):
target_method = 'uniform'
def test_uniform_1(self):
self.generate()
def test_uniform_2(self):
self.generate(-4.2, 2.4, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_uniform_ks_1(self, dtype):
self.check_ks(0.05)(
size=2000, dtype=dtype)
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_uniform_ks_2(self, dtype):
self.check_ks(0.05)(
-4.2, 2.4, size=2000, dtype=dtype)
@testing.parameterize(
{'mu': 0.0, 'kappa': 1.0},
{'mu': 3.0, 'kappa': 3.0},
{'mu': 3.0, 'kappa': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestVonmises(RandomGeneratorTestCase):
target_method = 'vonmises'
def test_vonmises(self):
self.generate(mu=self.mu, kappa=self.kappa, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_vonmises_ks(self, dtype):
self.check_ks(0.05)(
self.mu, self.kappa, size=2000, dtype=dtype)
@testing.parameterize(
{'mean': 1.0, 'scale': 3.0},
{'mean': 3.0, 'scale': 3.0},
{'mean': 3.0, 'scale': 1.0},
)
@testing.gpu
@testing.fix_random()
class TestWald(RandomGeneratorTestCase):
target_method = 'wald'
def test_wald(self):
self.generate(mean=self.mean, scale=self.scale, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_wald_ks(self, dtype):
self.check_ks(0.05)(
self.mean, self.scale, size=2000, dtype=dtype)
@testing.parameterize(
{'a': 0.5},
{'a': 1.0},
{'a': 3.0},
{'a': numpy.inf},
)
@testing.gpu
@testing.fix_random()
class TestWeibull(RandomGeneratorTestCase):
target_method = 'weibull'
def test_weibull(self):
self.generate(a=self.a, size=(3, 2))
@testing.for_dtypes('fd')
@_condition.repeat_with_success_at_least(10, 3)
def test_weibull_ks(self, dtype):
self.check_ks(0.05)(
a=self.a, size=2000, dtype=dtype)
@testing.parameterize(
{'a': 2.0},
)
@testing.gpu
@testing.fix_random()
class TestZipf(RandomGeneratorTestCase):
target_method = 'zipf'
def test_zipf(self):
self.generate(a=self.a, size=(3, 2))
# TODO(kataoka): add distribution test
@testing.parameterize(
{'a': 3, 'size': 5},
{'a': [1, 2, 3], 'size': 5},
)
@testing.fix_random()
@testing.gpu
class TestChoiceReplaceFalseFailure(unittest.TestCase):
def test_choice_invalid_value(self):
for xp in (numpy, cupy):
rs = xp.random.RandomState(seed=testing.generate_seed())
with pytest.raises(ValueError):
rs.choice(a=self.a, size=self.size, replace=False)
class TestResetStates(unittest.TestCase):
def test_reset_states(self):
_generator._random_states = 'dummy'
_generator.reset_states()
assert {} == _generator._random_states
@testing.gpu
class TestGetRandomState(unittest.TestCase):
def setUp(self):
self.device_id = cuda.Device().id
self.rs_tmp = _generator._random_states
def tearDown(self, *args):
_generator._random_states = self.rs_tmp
def test_get_random_state_initialize(self):
_generator._random_states = {}
rs = _generator.get_random_state()
assert _generator._random_states[self.device_id] == rs
def test_get_random_state_memoized(self):
_generator._random_states = {self.device_id: 'expected',
self.device_id + 1: 'dummy'}
rs = _generator.get_random_state()
assert 'expected' == _generator._random_states[self.device_id]
assert 'dummy' == _generator._random_states[self.device_id + 1]
assert 'expected' == rs
@testing.gpu
class TestSetRandomState(unittest.TestCase):
def setUp(self):
self.rs_tmp = _generator._random_states
def tearDown(self, *args):
_generator._random_states = self.rs_tmp
def test_set_random_state(self):
rs = _generator.RandomState()
_generator.set_random_state(rs)
assert _generator.get_random_state() is rs
def test_set_random_state_call_multiple_times(self):
_generator.set_random_state(_generator.RandomState())
rs = _generator.RandomState()
_generator.set_random_state(rs)
assert _generator.get_random_state() is rs
@testing.gpu
@testing.fix_random()
class TestStandardExponential(
common_distributions.StandardExponential,
RandomGeneratorTestCase
):
pass
@testing.parameterize(
{'left': -1.0, 'mode': 0.0, 'right': 2.0},
)
@testing.gpu
@testing.fix_random()
class TestTriangular(RandomGeneratorTestCase):
target_method = 'triangular'
def test_triangular(self):
self.generate(
left=self.left, mode=self.mode, right=self.right, size=(3, 2))
@testing.gpu
class TestRandomStateThreadSafe(unittest.TestCase):
def setUp(self):
cupy.random.reset_states()
def test_get_random_state_thread_safe(self):
def _f(func, args=()):
cupy.cuda.Device().use()
func(*args)
seed = 10
threads = [
threading.Thread(
target=_f, args=(cupy.random.seed, (seed,))),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
threading.Thread(
target=_f, args=(cupy.random.get_random_state,)),
]
for t in threads:
t.start()
for t in threads:
t.join()
actual = cupy.random.uniform()
cupy.random.seed(seed)
expected = cupy.random.uniform()
assert actual == expected
def test_set_random_state_thread_safe(self):
def _f(func, args=()):
cupy.cuda.Device().use()
func(*args)
rs = cupy.random.RandomState()
threads = [
threading.Thread(
target=_f, args=(cupy.random.set_random_state, (rs,))),
threading.Thread(
target=_f, args=(cupy.random.set_random_state, (rs,))),
]
for t in threads:
t.start()
for t in threads:
t.join()
assert cupy.random.get_random_state() is rs
@testing.gpu
class TestGetRandomState2(unittest.TestCase):
def setUp(self):
self.rs_dict = _generator._random_states
_generator._random_states = {}
self.cupy_seed = os.getenv('CUPY_SEED')
def tearDown(self, *args):
_generator._random_states = self.rs_dict
if self.cupy_seed is None:
os.environ.pop('CUPY_SEED', None)
else:
os.environ['CUPY_SEED'] = self.cupy_seed
def test_get_random_state_no_cupy(self):
os.environ.pop('CUPY_SEED', None)
rvs0 = self._get_rvs_reset()
rvs1 = self._get_rvs_reset()
self._check_different(rvs0, rvs1)
def test_get_random_state_with_cupy(self):
rvs0 = self._get_rvs(_generator.RandomState(6))
os.environ['CUPY_SEED'] = '6'
rvs1 = self._get_rvs_reset()
self._check_same(rvs0, rvs1)
def _get_rvs(self, rs):
rvu = rs.rand(4)
rvn = rs.randn(4)
return rvu, rvn
def _get_rvs_reset(self):
_generator.reset_states()
return self._get_rvs(_generator.get_random_state())
def _check_same(self, rvs0, rvs1):
for rv0, rv1 in zip(rvs0, rvs1):
testing.assert_array_equal(rv0, rv1)
def _check_different(self, rvs0, rvs1):
for rv0, rv1 in zip(rvs0, rvs1):
for r0, r1 in zip(rv0, rv1):
assert r0 != r1
class TestCheckAndGetDtype(unittest.TestCase):
@testing.for_float_dtypes(no_float16=True)
def test_float32_64_type(self, dtype):
assert _generator._check_and_get_dtype(dtype) == numpy.dtype(dtype)
def test_float16(self):
with self.assertRaises(TypeError):
_generator._check_and_get_dtype(numpy.float16)
@testing.for_int_dtypes()
def test_int_type(self, dtype):
with self.assertRaises(TypeError):
_generator._check_and_get_dtype(dtype)
| mit | -1,526,008,727,223,784,700 | 26.990694 | 79 | 0.591106 | false |
btat/Booktype | lib/booktype/apps/reader/tests/functest_views.py | 1 | 8237 | from django.test import TestCase
from django.core.urlresolvers import reverse
from .factory_models import UserFactory, BookFactory, BookVersionFactory
from .factory_models import ChapterFactory, BookTocFactory
from .factory_models import PLAIN_USER_PASSWORD
class ReaderBaseTestCase(TestCase):
"""
Base test case for Booktype with common utility functions
"""
def setUp(self):
"""
Sets common attributes for test classes
"""
self.book = BookFactory()
self.book.version = BookVersionFactory(book=self.book)
self.book.save()
self.user = self.book.owner
def assertTrueMultiple(self, response, var_list=[]):
"""
Checks if response contains multiples variables in context
"""
for var_name in var_list:
self.assertTrue(var_name in response.context)
def reload_from_db(self, obj):
"""
Returns reloaded attributes of a given object from the database
"""
return obj.__class__.objects.get(pk=obj.id)
class InfoPageTest(ReaderBaseTestCase):
def setUp(self):
# call parent setUp method
super(InfoPageTest, self).setUp()
self.dispatcher = reverse('reader:infopage', args=[self.book.url_title])
# list of variables to check in context
self.vars_to_check = [
'book_collaborators',
'book_admins',
'book_history',
'book_group',
'is_book_admin'
]
def test_details_as_anonymous(self):
response = self.client.get(self.dispatcher)
# response status code should be 200
self.assertEquals(response.status_code, 200)
# check if context has been filled correctly
self.assertTrueMultiple(response, self.vars_to_check)
# check if anonymous user is book admin
self.assertEquals(response.context['is_book_admin'], False)
# check absence of some elements in template
self.assertNotContains(response, 'Edit book info')
self.assertNotContains(response, 'Delete Book')
def test_details_as_owner_logged_in(self):
# first login with book owner user
self.client.login(
username=self.user.username,
password=PLAIN_USER_PASSWORD
)
response = self.client.get(self.dispatcher)
# check status code
self.assertEquals(response.status_code, 200)
# check if context has been filled correctly
self.assertTrueMultiple(response, self.vars_to_check)
# owner is also book admin, so this should be True
self.assertEquals(response.context['is_book_admin'], True)
# check admin actions available in response
self.assertContains(response, 'Edit book info')
self.assertContains(response, 'Delete Book')
class EditBookInfoTest(ReaderBaseTestCase):
def setUp(self):
# call parent setUp method
super(EditBookInfoTest, self).setUp()
self.dispatcher = reverse('reader:edit_info_book', args=[self.book.url_title])
def test_anon_user(self):
response = self.client.get(self.dispatcher)
# response status code should be 302, you're not logged in
self.assertEquals(response.status_code, 302)
def test_as_book_owner(self):
# first login as book owner user
self.client.login(
username=self.user.username,
password=PLAIN_USER_PASSWORD
)
# ---- GET method ----
response = self.client.get(self.dispatcher)
# response status code should be 200
self.assertEquals(response.status_code, 200)
# check if returning the right template
self.assertTemplateUsed(response, "reader/book_info_edit.html")
# check if book object is in context
self.assertTrue('book' in response.context)
# check some content in response
self.assertContains(response, 'Book description')
self.assertContains(response, 'Book image')
# ---- POST method -----
new_description = 'lorem ipsum testing'
response = self.client.post(self.dispatcher, dict(description=new_description))
# response status code should be 200
self.assertEquals(response.status_code, 200)
# test if description has been updated correctly
updated_book = self.reload_from_db(self.book)
self.assertTrue(updated_book.description == new_description)
# in post method, the template must have changed
self.assertTemplateUsed(response, "reader/book_info_edit_redirect.html")
class DeleteBookTest(EditBookInfoTest):
# NOTE: Inheriting from EditBookInfoTest because we need first
# if login required is working good (def test_anon_user).
# Then we need to override 'test_as_book_owner' method
def setUp(self):
# call parent's setUp method
super(DeleteBookTest, self).setUp()
self.dispatcher = reverse('reader:delete_book', args=[self.book.url_title])
def test_as_not_owner(self):
# first login as other user not owner or admin
other_user = UserFactory()
self.client.login(
username=other_user.username,
password=PLAIN_USER_PASSWORD
)
# ---- POST method -----
response = self.client.post(self.dispatcher, dict(title=self.book.title))
# response status code should be 200
self.assertEquals(response.status_code, 200)
# template must be the delete_error_template, because user doesn't
# have enough rights to delete the book
self.assertTemplateUsed(response, "reader/book_delete_error.html")
def test_as_book_owner(self):
# first login as book owner user
self.client.login(
username=self.user.username,
password=PLAIN_USER_PASSWORD
)
# ---- GET method ----
response = self.client.get(self.dispatcher)
# response status code should be 200
self.assertEquals(response.status_code, 200)
# check if returning the right template
self.assertTemplateUsed(response, "reader/book_delete.html")
# check if content in response
self.assertContains(response, 'Delete Book')
# ---- POST method -----
response = self.client.post(self.dispatcher, dict(title=self.book.title))
# response status code should be 200
self.assertEquals(response.status_code, 200)
# in post method, the template must have changed
self.assertTemplateUsed(response, "reader/book_delete_redirect.html")
# check if book registry is not in database anymore
Book = self.book.__class__
self.assertRaises(Book.DoesNotExist, Book.objects.get, pk=self.book.id)
class DraftChapterTest(ReaderBaseTestCase):
def setUp(self):
# call parent setUp method
super(DraftChapterTest, self).setUp()
self.chapter = ChapterFactory()
self.dispatcher = reverse('reader:draft_chapter_page', args=[self.book.url_title])
self.book_toc = BookTocFactory.create_toc(self.book, self.book.version, self.chapter)
# list of variables to check in context
self.vars_to_check = [
'content',
'toc_items',
'book_version',
'can_edit'
]
def test_context_as_anon(self):
response = self.client.get(self.dispatcher)
# response status code should be 200
self.assertEquals(response.status_code, 200)
# as anonymous user you can't edit the book
self.assertEquals(response.context['can_edit'], False)
# test if context is well formed
self.assertTrueMultiple(response, self.vars_to_check)
def test_can_edit_logged_in(self):
# first login as book owner user
self.client.login(
username=self.user.username,
password=PLAIN_USER_PASSWORD
)
response = self.client.get(self.dispatcher)
# as anonymous user you can't edit the book
self.assertEquals(response.context['can_edit'], True) | agpl-3.0 | -3,166,875,531,639,531,500 | 32.901235 | 93 | 0.637611 | false |
baroquebobcat/pants | contrib/go/tests/python/pants_test/contrib/go/tasks/test_go_binary_create.py | 1 | 1949 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.build_graph.target import Target
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import touch
from pants_test.task_test_base import TaskTestBase
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.tasks.go_binary_create import GoBinaryCreate
class GoBinaryCreateTest(TaskTestBase):
@classmethod
def task_type(cls):
return GoBinaryCreate
def test_noop_empty(self):
task = self.create_task(self.context())
task.execute()
self.assertFalse(os.path.exists(task.dist_root))
def test_noop_na(self):
task = self.create_task(self.context(target_roots=[self.make_target(':a', Target)]))
task.execute()
self.assertFalse(os.path.exists(task.dist_root))
def test_execute(self):
with temporary_dir() as bin_source_dir:
def create_binary(name):
target = self.make_target(name, GoBinary)
executable = os.path.join(bin_source_dir, '{}.exe'.format(name))
touch(executable)
return target, executable
a, a_exe = create_binary('thing/a')
b, b_exe = create_binary('thing/b')
context = self.context(target_roots=[a, b])
context.products.safe_create_data('exec_binary', init_func=lambda: {a: a_exe, b: b_exe})
task = self.create_task(context)
task.execute()
binaries = self.buildroot_files(task.dist_root)
rel_dist_root = os.path.relpath(task.dist_root, self.build_root)
self.assertEqual({os.path.join(rel_dist_root, os.path.basename(a_exe)),
os.path.join(rel_dist_root, os.path.basename(b_exe))},
binaries)
| apache-2.0 | 3,385,666,084,271,358,000 | 33.803571 | 94 | 0.682401 | false |
niallrmurphy/pyvern | test_tree.py | 1 | 20890 | #!/usr/bin/env python
# encoding: utf-8
# Niall Richard Murphy <[email protected]>
"""Test the tree (gap-production) object."""
import sys
import constants
import random
import tree
# Perhaps unittest2 is available. Try to import it, for
# those cases where we are running python 2.7.
try:
import unittest2 as unittest
except ImportError:
import unittest
class NodeTest(unittest.TestCase):
def setUp(self):
self.n = tree.Node(supplied_data = "Test")
self.n2 = tree.Node(supplied_data = "Test2")
self.n3 = tree.Node(supplied_data = "Test3")
self.n4 = tree.Node(supplied_data = "Test4")
self.n5 = tree.Node(supplied_data = "Test5")
def test_node_get_used(self):
self.failUnless(self.n.used == False)
def test_node_set_used(self):
self.n.used = True
self.failUnless(self.n.used == True)
def test_node_get_data(self):
self.failUnless(self.n.GetData() == "Test")
def test_node_set_data(self):
self.n.SetData("Wobble")
self.failUnless(self.n.GetData() == "Wobble")
def test_node_getset_left(self):
self.n.SetLeft(self.n2)
self.failUnless(self.n.GetLeft() == self.n2)
def test_node_getset_right(self):
self.n.SetRight(self.n2)
self.failUnless(self.n.GetRight() == self.n2)
def test_node_getset_parent(self):
self.n.SetLeft(self.n2)
self.n2.SetParent(self.n)
self.n.SetRight(self.n3)
self.n3.SetParent(self.n)
self.failUnless(self.n2.GetParent() == self.n)
self.failUnless(self.n3.GetParent() == self.n)
def test_node_getset_level(self):
self.assertEqual(self.n.GetLevel(), 0)
self.n2.SetParent(self.n)
self.n.SetLeft(self.n2)
self.assertEqual(self.n2.GetLevel(), 1)
self.n2.SetLeft(self.n3)
self.n3.SetParent(self.n2)
self.assertEqual(self.n3.GetLevel(), 2)
def test_node_getset_leftright(self):
self.n.SetLeft(self.n2)
self.n2.SetParent(self.n)
self.n.SetRight(self.n3)
self.n3.SetParent(self.n)
self.assertEqual(self.n2.AmLeft(), True)
self.assertEqual(self.n3.AmRight(), True)
def test_node_amroot(self):
self.assertEqual(self.n.AmRoot(), True)
def test_node_getbinary(self):
self.n.SetLeft(self.n2)
self.n2.SetParent(self.n)
self.n.SetRight(self.n3)
self.n3.SetParent(self.n)
self.assertEqual(self.n2.GetBinary(), 0)
self.assertEqual(self.n3.GetBinary(), 1)
def test_node_get_path(self):
self.n.SetLeft(self.n2)
self.n2.SetParent(self.n)
self.n.SetRight(self.n3)
self.n3.SetParent(self.n)
self.n4.SetParent(self.n2)
self.n5.SetParent(self.n2)
self.n2.SetLeft(self.n4)
self.n2.SetRight(self.n5)
self.assertEqual(self.n2.GetPath(), "0")
self.assertEqual(self.n3.GetPath(), "1")
self.assertEqual(self.n4.GetPath(), "00")
self.assertEqual(self.n5.GetPath(), "01")
class TreeTest(unittest.TestCase):
def setUp(self):
self.t = tree.Tree()
def structuralSetUp(self):
# Setup for structual comparisons & marking-as-used
self.n = tree.Node(supplied_data = "Root")
self.n2 = tree.Node(supplied_data = "Test2")
self.n3 = tree.Node(supplied_data = "Test3")
self.n4 = tree.Node(supplied_data = "Test4")
self.n5 = tree.Node(supplied_data = "Test5")
self.n6 = tree.Node(supplied_data = "Test6")
self.n7 = tree.Node(supplied_data = "Test7")
self.t.SetRoot(self.n)
self.t.GetRoot().SetLeft(self.n2)
self.n2.SetParent(self.n)
self.t.GetRoot().GetLeft().SetLeft(self.n3)
self.n3.SetParent(self.n2)
self.t.GetRoot().GetLeft().SetRight(self.n4)
self.n4.SetParent(self.n2)
self.t.GetRoot().SetRight(self.n5)
self.n5.SetParent(self.n)
self.t.GetRoot().GetRight().SetLeft(self.n6)
self.n6.SetParent(self.n5)
self.t.GetRoot().GetRight().SetRight(self.n7)
self.n7.SetParent(self.n5)
self.n3.used = True
self.n4.used = True
self.n6.used = True
self.n7.used = True
def test_tree_new(self):
self.failUnless('Insert' in dir(self.t))
def test_tree_path_to_dot_quad(self):
binstr = "1111"
x = self.t.PathToDotQuad(binstr, 4)
self.assertEqual(x, "240.0.0.0/4")
binstr = "10100111111"
y = self.t.PathToDotQuad(binstr, 16)
self.assertEqual(y, "167.224.0.0/16")
def test_tree_get_root_properties(self):
self.failUnless(self.t.GetRoot().GetData() == 'Root')
self.failUnless(self.t.GetRoot().GetLeft() == None)
self.failUnless(self.t.GetRoot().GetRight() == None)
self.failUnless(self.t.GetRoot().GetParent() == None)
def test_tree_generate_for_prefix(self):
for x in self.t.GenerateForPrefix(2):
self.failUnless(x in ['0.0.0.0/2', '64.0.0.0/2',
'128.0.0.0/2', '192.0.0.0/2'])
def test_tree_insert_default_route(self):
obj = self.t.Insert('0.0.0.0/0', "test03point5", test_dup = False)
self.assertEqual(obj, self.t.GetRoot())
def test_tree_structural_comparison(self):
# N
# N2 N5
# N3 N4 N6 N7
self.structuralSetUp()
for x in self.t.IterateNodes():
self.failUnless(x in ['0.0.0.0/2', '64.0.0.0/2',
'128.0.0.0/2', '192.0.0.0/2'])
self.t2 = tree.Tree()
self.t2.Insert('0.0.0.0/2', 'structural')
self.t2.Insert('64.0.0.0/2', 'structural')
self.t2.Insert('128.0.0.0/2', 'structural')
self.t2.Insert('192.0.0.0/2', 'structural')
for x in self.t2.IterateNodes():
self.failUnless(x in ['0.0.0.0/2', '64.0.0.0/2',
'128.0.0.0/2', '192.0.0.0/2'])
def test_tree_follow_chain(self):
self.t.Insert('192.168.0.0/16', 'test_tree_follow_chain')
obj = self.t.Lookup('192.168.0.0/16')
current = obj
self.assertEqual(current.GetLevel(), 16)
while current != self.t.root:
old_level = current.GetLevel()
current = current.GetParent()
new_level = current.GetLevel()
self.assertEqual(old_level, new_level + 1)
# TODO(niallm): check for membership of array [n - level] -> 192.168.0.0 here
self.t.Insert('192.169.0.0/16', 'test_tree_follow_chain_2')
new_obj = self.t.Lookup('192.169.0.0/16', 'test_tree_follow_chain_3')
self.assertEqual(obj.GetParent(), new_obj.GetParent())
def test_tree_recursive_marking(self):
self.structuralSetUp()
self.assertEqual(self.n2.used, False)
self.t.CheckRecursivelyUsed(self.n3)
self.assertEqual(self.n2.used, True)
self.assertEqual(self.n.used, False)
self.n5.used = True
self.t.CheckRecursivelyUsed(self.n3)
self.assertEqual(self.n.used, True)
def test_tree_insert_one_prefix_left(self):
obj = self.t.Insert('0.0.0.0/1', "testInsertSmall")
data = obj.GetData()
used = obj.used
left = obj.GetLeft()
right = obj.GetRight()
parent = obj.GetParent()
level = obj.GetLevel()
root = self.t.GetRoot()
self.assertEqual(data, "testInsertSmall")
self.assertEqual(used, True)
self.assertEqual(parent, root)
self.assertEqual(left, None)
self.assertEqual(right, None)
self.failUnless(obj.GetParent().GetLeft() == obj)
self.assertEqual(level, 1)
def test_tree_insert_flags(self):
result = self.t.Insert('0.0.0.0/8', '4.5treeobj', mark_used = False,
test_used = True, test_none = False)
self.assertEqual(result.used, False)
def test_tree_insert_two_prefixes_getbinary(self):
obj = self.t.Insert('0.0.0.0/1', "testInsertSmall")
bin = obj.GetBinary()
self.failUnless(str(bin) == "0")
obj = self.t.Insert('128.0.0.0/1', "testInsertSmall")
bin = obj.GetBinary()
self.failUnless(str(bin) == "1")
def test_tree_insert_one_prefix_right(self):
obj = self.t.Insert('128.0.0.0/1', "testInsertSmall")
data = obj.GetData()
used = obj.used
left = obj.GetLeft()
right = obj.GetRight()
parent = obj.GetParent()
level = obj.GetLevel()
path = obj.GetPath()
self.assertEqual(data, "testInsertSmall")
self.assertEqual(used, True)
self.assertEqual(parent, self.t.GetRoot())
self.assertEqual(left, None)
self.assertEqual(right, None)
self.assertEqual(obj.GetParent().GetRight(), obj)
self.assertEqual(level, 1)
self.assertEqual(path, "1")
def test_tree_insert_one_longer_prefix(self):
obj = self.t.Insert('10.0.0.0/8', "testInsertLarge")
data = obj.GetData()
used = obj.used
left = obj.GetLeft()
right = obj.GetRight()
parent = obj.GetParent()
level = obj.GetLevel()
path = obj.GetPath()
self.failUnless(obj.GetData() == 'testInsertLarge')
self.assertEqual(right, None)
self.assertEqual(left, None)
self.assertEqual(level, 8)
self.assertEqual(path, "00001010")
def test_tree_get_path_to_real_prefix(self):
obj = self.t.Insert('10.0.0.0/8', "testGetPath")
path = obj.GetPath()
self.failUnless(path == "00001010", "unexpected path to node: [%s] " % path)
obj = self.t.Insert('137.43.0.0/16', "testInsertLarge")
path = obj.GetPath()
self.failUnless(path == "1000100100101011", "unexpected path to node: [%s] " % path)
def test_tree_lookup_succeed(self):
obj = self.t.Insert('10.0.0.0/8', "testLookup")
obj2 = self.t.Lookup('10.0.0.0/8')
self.assertEqual(obj, obj2)
def test_tree_lookup_fail(self):
obj = self.t.Insert('10.0.0.0/8', "testNegLookup")
obj2 = self.t.Lookup('127.0.0.1')
self.assertEqual(obj2, None)
self.assertNotEqual(obj, None)
def test_tree_lookup_funky(self):
for count in range(4,12):
objdict = {}
total_route_set = []
for route in self.t.GenerateForPrefix(count):
total_route_set.append(route)
picks = random.sample(total_route_set, count/2)
for item in picks:
objdict[item] = self.t.Insert(item,
"complex_find_gap", mark_used = False)
for item in total_route_set:
if item in picks:
self.assertEqual(self.t.Lookup(item), objdict[item],
"Picks lookup [%s] got [%s]" % (self.t.Lookup(item),
objdict[item]))
else:
self.assertEqual(self.t.Lookup(item), None,
"Non-picks lookup get [%s] not none" %
self.t.Lookup(item))
def test_insert_duplicate_fails(self):
#self.t.debug=30
obj1 = self.t.Insert('137.43.0.0/16', 'testInsertDup')
self.assertEqual(False, self.t.Insert('137.43.0.0/16',
'testInsertDup'))
#self.t.debug=0
def test_tree_quick_insert_multiple_prefixes(self):
obj1 = self.t.Insert('0.0.0.0/8', "testInsertMultiple")
obj2 = self.t.Insert('1.0.0.0/8', "testInsertMultiple")
data1 = obj1.GetData()
used1 = obj1.used
left1 = obj1.GetLeft()
right1 = obj1.GetRight()
parent1 = obj1.GetParent()
level1 = obj1.GetLevel()
left2 = obj2.GetLeft()
right2 = obj2.GetRight()
parent2 = obj2.GetParent()
level2 = obj2.GetLevel()
self.assertEqual(data1, 'testInsertMultiple')
self.assertEqual(left1, None)
self.assertEqual(left2, None)
self.assertEqual(level1, 8)
self.assertEqual(level2, 8)
class TreeTestGaps(unittest.TestCase):
def setUp(self):
self.t = tree.Tree()
def test_tree_quick_find_gap_vanilla(self):
# Simple insert
self.t.Insert('0.0.0.0/8', "testFindGap")
ret = self.t.FindGap(8)
self.assertEqual(ret, "1.0.0.0/8",
"Find gap returned [%s], not 1.0.0.0/8" % ret)
# Route of same length immediately beside
self.t.Insert('1.0.0.0/8', "testFindGap2")
ret2 = self.t.FindGap(8)
self.assertEqual(ret2, "2.0.0.0/8",
"Find gap returned [%s], not 2.0.0.0/8" % ret2)
# And up two levels and down again
self.t.Insert("2.0.0.0/8", "testFindGap")
ret3 = self.t.FindGap(8)
self.assertEqual(ret3, "3.0.0.0/8",
"Find gap returned [%s], not " % ret3)
# Insert covering route (0-3/8)
self.t.Insert("0.0.0.0/6", "testFindGap")
ret4 = self.t.FindGap(6)
self.assertEqual(ret4, "4.0.0.0/6")
# Find a large gap after some small routes inserted
self.t.Insert("0.0.0.0/4", "testFindGap")
ret5 = self.t.FindGap(6)
self.assertEqual(ret5, "16.0.0.0/6")
# Bang over to the other side of the tree altogether
ret6 = self.t.FindGap(1)
self.assertEqual(ret6, "128.0.0.0/1")
def test_tree_quick_find_gap_random(self):
for count in range(1,10):
self.t = None
self.t = tree.Tree()
# Looking for route with a relevant prefix size.
# Generate a list of all possible prefixes leaving out one at random.
total_route_set = []
for route in self.t.GenerateForPrefix(count):
total_route_set.append(route)
remove_me = random.choice(total_route_set)
total_route_set.remove(remove_me)
for item in total_route_set:
obj1 = self.t.Insert(item, "testFindGap2")
found = self.t.FindGap(count)
self.assertEqual(found, remove_me, "Find gap gave [%s] not expected \
[%s]" % (found, remove_me))
def test_tree_different_size_find_gap(self):
self.t.Insert('0.0.0.0/8', 'reason1')
self.t.Insert('1.0.0.0/8', 'reason2')
r1 = self.t.FindGap(8)
self.assertEqual(r1, '2.0.0.0/8')
self.t.Insert(r1, 'reason1')
r2 = self.t.FindGap(8)
self.assertEqual(r2, '3.0.0.0/8')
self.t.Insert(r2, 'reason2')
r3 = self.t.FindGap(20)
self.assertEqual(r3, '4.0.0.0/20')
self.t.Insert(r3, 'reason3')
r4 = self.t.FindGap(8)
self.assertEqual(r4, '5.0.0.0/8')
r5 = self.t.FindGap(10)
self.assertEqual(r5, '4.64.0.0/10')
self.t.Insert(r5, 'reason5')
r6 = self.t.FindGap(6)
self.assertEqual(r6, '8.0.0.0/6')
r7 = self.t.FindGap(30)
self.assertEqual(r7, '4.0.16.0/30')
def test_tree_different_size_find_gap_from(self):
#self.t.debug = 10
self.t.Insert('0.0.0.0/8', 'reason1')
self.t.Insert('1.0.0.0/8', 'reason2')
r1 = self.t.FindGapFrom('0.0.0.0/1', 8)
self.assertEqual(r1, '2.0.0.0/8')
self.t.Insert(r1, 'reason1')
r2 = self.t.FindGapFrom('0.0.0.0/1', 8)
self.assertEqual(r2, '3.0.0.0/8')
self.t.Insert(r2, 'reason2')
r3 = self.t.FindGapFrom('0.0.0.0/1', 20)
self.assertEqual(r3, '4.0.0.0/20')
self.t.Insert(r3, 'reason3')
r4 = self.t.FindGapFrom('0.0.0.0/1', 8)
self.assertEqual(r4, '5.0.0.0/8')
r5 = self.t.FindGapFrom('0.0.0.0/1', 10)
self.assertEqual(r5, '4.64.0.0/10')
self.t.Insert(r5, 'reason5')
r6 = self.t.FindGapFrom('0.0.0.0/1', 6)
self.assertEqual(r6, '8.0.0.0/6')
r7 = self.t.FindGapFrom('0.0.0.0/1', 30)
self.assertEqual(r7, '4.0.16.0/30')
def test_tree_find_gap(self):
for count in range(4,12):
total_route_set = []
for route in self.t.GenerateForPrefix(count):
total_route_set.append(route)
picks = random.sample(total_route_set, count/2)
for item in picks:
obj1 = self.t.Insert(item, "testFindGap3")
for item in picks:
gap = self.t.FindGap(count)
self.failUnless(gap in total_route_set, "Gap found [%s] not in total \
route set!" % gap)
if gap not in picks:
# Add it and try again
self.t.Insert(gap, "testFindGap3Update")
else:
print "??????"
def test_tree_find_gap_from_simple(self):
self.t.Insert("0.0.0.0/8", 'testFindGapFrom', mark_used = False,
test_none = False)
gap = self.t.FindGapFrom("0.0.0.0/8", 24)
self.assertEqual(gap, "0.0.0.0/24",
"Should find 0.0.0.0/24, instead found [%s]" % gap)
gap = self.t.FindGapFrom("1.0.0.0/8", 24)
self.assertEqual(gap, None,
"Should find no gap, instead got [%s]" % gap)
def test_tree_find_gap_from_simple_higher(self):
self.t.Insert("0.0.0.0/8", 'testFindGapFrom', mark_used = False,
test_none = False)
gap = self.t.FindGapFrom("0.0.0.0/8", 7)
self.assertEqual(gap, None,
"Should find no gap, instead got [%s]" % gap)
def test_tree_find_gap_from_simple_samesize(self):
self.t.Insert("0.0.0.0/8", 'testFindGapFrom', mark_used = False,
test_none = False)
gap = self.t.FindGapFrom("0.0.0.0/8", 8)
self.assertEqual(gap, "0.0.0.0/8")
def test_tree_find_gap_from_middling(self):
self.t.Insert("172.16.0.0/12", "findgapmiddling", mark_used = False,
test_none = False)
gap = self.t.FindGapFrom("172.16.0.0/12", 16)
self.assertEqual(gap, "172.16.0.0/16")
self.t.Insert("172.16.0.0/16", "findgapmiddling")
gap = self.t.FindGapFrom("172.16.0.0/12", 16)
self.assertEqual(gap, "172.17.0.0/16")
self.t.Insert("172.17.0.0/16", "findgapmiddling")
gap = self.t.FindGapFrom("172.16.0.0/12", 24)
self.assertEqual(gap, "172.18.0.0/24")
self.t.Insert("172.16.0.0/13", "findgapmiddling")
self.t.Insert("172.24.0.0/13", "findgapmiddling")
gap = self.t.FindGapFrom("172.16.0.0/12", 8)
self.assertEqual(gap, None)
def test_tree_find_gap_middling_occupied(self):
node = self.t.Insert("172.16.0.0/12", "findgapmiddling", mark_used = False,
test_none = False)
gap = self.t.FindGapFrom("172.16.0.0/12", 16)
self.assertEqual(gap, "172.16.0.0/16")
node.used = True
gap = self.t.FindGapFrom("172.16.0.0/12", 16)
self.assertEqual(gap, None)
def test_tree_find_gap_from_complex(self):
for count in range(4,12):
total_route_set = []
for route in self.t.GenerateForPrefix(count):
total_route_set.append(route)
picks = random.sample(total_route_set, count/2)
for item in picks:
obj1 = self.t.Insert(item, "complex_find_gap", mark_used = False)
for item in total_route_set:
if item in picks:
gap = self.t.FindGapFrom(item, count)
self.assertEqual(gap, item, "Find gap from gave [%s] not expected \
[%s]" % (gap, item))
else:
gap = self.t.FindGapFrom(item, 24)
self.assertEqual(gap, None)
class TreeIteration(unittest.TestCase):
def setUp(self):
self.t = tree.Tree()
def test_tree_iterate_nodes(self):
compare_list = []
for item in self.t.GenerateForPrefix(3):
obj1 = self.t.Insert(item, "testIterateNodes")
compare_list.append(item)
for node in self.t.IterateNodes():
compare_list.remove(node)
self.assertEqual(compare_list, [])
def test_tree_only_supernets(self):
#self.t.debug = 1
#self.t.Insert('199.0.0.0/8', "walk prob root", mark_used = False)
#self.assertEqual(self.t.Lookup('199.0.0.0/8').GetData(), "walk prob root")
original_routes = ['199.4.32.0/19', '199.4.64.0/18', '199.4.128.0/24', '199.4.130.0/23',
'199.4.132.0/24', '199.4.134.0/23', '199.4.136.0/24', '199.4.139.0/24',
'199.4.140.0/24', '199.4.141.0/24']
for route in original_routes:
self.t.Insert(route, "walk problem", mark_used = True, propagate_used = True)
result = []
for f in self.t.IterateNodes(prefix='199.0.0.0/8', top_used=True):
result.append(f)
result2 = ['199.4.32.0/19', '199.4.64.0/18', '199.4.128.0/24', '199.4.130.0/23',
'199.4.132.0/24', '199.4.134.0/23', '199.4.136.0/24', '199.4.139.0/24',
'199.4.140.0/23']
self.assertEqual(result, result2)
class TreeSlowTests(unittest.TestCase):
def setUp(self):
self.t = tree.Tree()
def test_tree_slow_13_treeobj_find_gap_exhaust(self):
self.t.Insert('0.0.0.0/8', "find_gap_exhaust")
route = self.t.FindGap(8)
while route != None:
self.t.Insert(route, "find_gap_exhaust_extend")
route = self.t.FindGap(8)
def test_tree_slow_14_treeobj_find_gap_too_large(self):
self.t.Insert('0.0.0.0/8', "find_gap_exhaust")
route = self.t.FindGap(8)
while route != None:
self.t.Insert(route, "find_gap_exhaust_large")
route = self.t.FindGap(7)
class TreeComparisonTests(unittest.TestCase):
def setUp(self):
self.t = tree.Tree()
def test_compare_tree_1(self):
self.t2 = tree.Tree()
self.t.Insert('1.0.0.0/8', 'reason1')
self.t2.Insert('1.0.0.0/8', 'reason2')
self.assertEqual(self.t, self.t2)
def test_compare_tree_2(self):
self.t2 = tree.Tree()
self.t.Insert('192.168.0.0/23', 'reason1', mark_used=True)
self.t2.Insert('192.168.0.0/24', 'reason2', mark_used=True, propagate_used=True)
self.t2.Insert('192.168.1.0/24', 'reason3', mark_used=True, propagate_used=True)
#print "T2"
#for x in self.t2.IterateNodes():
#print "X T2", x
#print "T1"
#for y in self.t.IterateNodes():
#print "Y T", y
#print "ASSERT"
self.assertEqual(self.t, self.t2)
def test_compare_tree_3(self):
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -4,469,209,220,088,208,400 | 34.709402 | 94 | 0.613787 | false |
kmahyyg/learn_py3 | antiscanhttp.py | 1 | 2192 | #!/usr/bin/env python3
# -*- coding : utf-8 -*-
# http://speedtest.tele2.net/10GB.zip
# https://docs.python.org/3/library/http.server.html
# http://blog.csdn.net/cteng/article/details/51584766
"""
Anti-HTTP-Scanner : Redirect all requests to 10GB speedtest file
Patrick Young 2017/10/8
usage: "antiscanhttp.py" [-h] [--port] [--ip] redirect_url
positional arguments:
redirect_url (such as http://speedtest.tele2.net/10GB.zip)
optional arguments:
-h,--help Show this help message and exit
--port,-p Port to listen on , Default 80
--ip,-i Host interface to listen on
redirect_url Recommend to use 'http://speedtest.tele2.net/10GB.zip'
"""
import socketserver
import http.server
import argparse
def redirect_handler(url):
class RedirectHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
def do_POST(self):
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
def do_HEAD(self):
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
def do_PUT(self):
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
return RedirectHandler
def main():
parser = argparse.ArgumentParser(description='Anti HTTP Scanner redirector')
parser.add_argument('--port', '-p', action='store', type=int, default=80, help='Server listens on this port')
parser.add_argument('--ip', '-i', action='store', default='', help='Host Interface to listen on')
parser.add_argument('redirect_url', action='store',help='(such as http://speedtest.tele2.net/10GB.zip)')
userinput = parser.parse_args()
redirect_url = userinput.redirect_url
port = userinput.port
host = userinput.ip
redirect_handle = redirect_handler(redirect_url)
handler = socketserver.TCPServer((host, port), redirect_handle)
print('Server now is running on the port %s' % port)
handler.serve_forever()
if __name__ == "__main__":
main()
| agpl-3.0 | 8,516,625,122,009,231,000 | 31.235294 | 113 | 0.649635 | false |
brendanlong/dash-ts-tools | dash_initialization_segmenter.py | 1 | 4159 | #!/usr/bin/env python3
import argparse
import os
from ts import *
def write_ts(file_name, packets, force):
logging.info("Writing %s", file_name)
if not force and os.path.exists(file_name):
choice = input(
"Output file {} already exists. Overwrite it? "
"[y/N] ".format(file_name)).lower()
if choice != "y":
return
with open(file_name, "wb") as f:
for packet in packets:
f.write(packet.bytes)
def generate_initialization_segment(
segment_file_names, segment_template, out_file_name, force):
pat = None
pat_ts = None
pmt = None
pmt_ts = None
segment_ts = {}
pmt_pid = None
for segment_file_name in segment_file_names:
logging.info("Reading %s", segment_file_name)
current_segment_ts = []
segment_ts[segment_file_name] = current_segment_ts
for ts in read_ts(segment_file_name):
if ts.pid == ProgramAssociationTable.PID:
new_pat = ProgramAssociationTable(ts.payload)
if pat is None:
pat = new_pat
pat_ts = ts
programs = list(pat.programs.values())
if len(programs) != 1:
raise Exception(
"PAT has {} programs, but DASH only allows 1 "
"program.".format(len(pat.programs)))
if pmt_pid is not None and programs[0] != pmt_pid:
raise Exception("PAT has new PMT PID. This program has "
"not been tested to handled this case.")
pmt_pid = programs[0]
elif new_pat != pat:
raise Exception("Cannot generate initialization segment "
"for segment with multiple PAT's. {} != {"
"}".format(new_pat, pat))
elif ts.pid == pmt_pid:
new_pmt = ProgramMapTable(ts.payload)
if pmt is None:
pmt = new_pmt
pmt_ts = ts
elif new_pmt != pmt:
raise Exception("Cannot generate initialization segment "
"for segment with multiple PMT's. {} != {"
"}".format(new_pmt, pmt))
else:
current_segment_ts.append(ts)
logging.debug("Common PSI is:\nPAT: %s\nPMT: %s", pat, pmt)
write_ts(out_file_name, [pat_ts, pmt_ts], force)
for segment_file_name in segment_file_names:
path, file_name = os.path.split(segment_file_name)
name_part, _ = os.path.splitext(file_name)
segment_out_file_name = segment_template.format_map(
{"path": path, "name_part": name_part})
write_ts(segment_out_file_name, segment_ts[segment_file_name], force)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"media_segment", nargs="+",
help="The media segments to create an initialization segment for.")
parser.add_argument(
"--segment-template", "-s",
help="Template for segment index files. {name_part} will be replaced "
"with the file name of the media segment minus the suffix (.ts). "
"{path} will be replaced with the full path to the media segment.",
default="{path}/{name_part}.ts")
parser.add_argument(
"--out", "-o", required=True,
help="The file to write the initialization segment to.")
parser.add_argument(
"--force", "-f", action="store_true", default=False,
help="Overwrite output files without prompting.")
parser.add_argument(
"--verbose", "-v", action="store_true", default=False,
help="Enable verbose output.")
args = parser.parse_args()
logging.basicConfig(
format='%(levelname)s: %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO)
generate_initialization_segment(
args.media_segment, args.segment_template, args.out, args.force)
| bsd-2-clause | -5,032,914,013,594,060,000 | 40.178218 | 80 | 0.5434 | false |
MarcoVogt/basil | tests/test_RegisterHardwareLayer.py | 1 | 9389 | #
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
import unittest
from basil.dut import Dut
from basil.HL.RegisterHardwareLayer import RegisterHardwareLayer
import os
_test_init = {
'REG_TEST_INIT': 15,
'REG1': 120,
'REG_BYTE_ARRAY': [4, 3, 2, 1]
}
class test_RegisterHardwareLayer(RegisterHardwareLayer):
'''Register Hardware Layer.
Implementation of advanced register operations.
'''
_registers = {
'REG1': {'default': 12, 'descr': {'addr': 0, 'size': 15, 'offset': 0}},
'REG2': {'default': 1, 'descr': {'addr': 1, 'size': 1, 'offset': 7}},
'REG3': {'default': 2 ** 16 - 1, 'descr': {'addr': 2, 'size': 16, 'offset': 0}},
'REG4_RO': {'default': 0, 'descr': {'addr': 4, 'size': 8, 'properties': ['readonly']}},
'REG5_WO': {'default': 0, 'descr': {'addr': 5, 'size': 8, 'properties': ['writeonly']}},
'REG_TEST_INIT': {'descr': {'addr': 6, 'size': 8}},
'REG_BYTE_ARRAY': {'default': [1, 2, 3, 4], 'descr': {'addr': 16, 'size': 4, 'properties': ['bytearray']}}
}
class TestRegisterHardwareLayer(unittest.TestCase):
def setUp(self):
self.dut = Dut(os.path.join(os.path.dirname(__file__), 'test_RegisterHardwareLayer.yaml'))
self.dut.init()
def test_init_non_existing(self):
with self.assertRaises(KeyError):
self.dut.init({"test_register": {"NON_EXISTING": 1}})
def test_lazy_programming(self):
self.dut['test_register'].set_default()
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG5_WO = 255
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 255, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG5_WO # get value from write-only register, but this will write zero instead
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
def test_get_configuration(self):
self.dut.set_configuration(os.path.join(os.path.dirname(__file__), 'test_RegisterHardwareLayer_configuration.yaml'))
conf = self.dut['test_register'].get_configuration()
self.assertDictEqual({'REG1': 257, 'REG2': 1, 'REG3': 2, 'REG_TEST_INIT': 0, 'REG_BYTE_ARRAY': [1, 2, 3, 4]}, conf)
def test_set_configuration(self):
self.dut.set_configuration(os.path.join(os.path.dirname(__file__), 'test_RegisterHardwareLayer_configuration.yaml'))
self.assertDictEqual({0: 1, 1: 129, 2: 2, 3: 0, 5: 5, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
def test_set_configuration_non_existing(self):
with self.assertRaises(KeyError):
self.dut.set_configuration({"test_register": {"NON_EXISTING": 1}})
def test_read_only(self):
self.assertRaises(IOError, self.dut['test_register']._set, 'REG4_RO', value=0)
# def test_write_only(self):
# self.assertRaises(IOError, self.dut['test_register']._get, 'REG5_WO')
def test_write_only_lazy_programming(self):
self.dut['test_register'].set_default()
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG5_WO = 20
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 20, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG5_WO
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.assertIs(None, self.dut['test_register']._get('REG5_WO'))
def test_set_default(self):
self.dut['test_register'].set_default()
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
def test_set_attribute_add(self):
val = self.dut['test_register']._registers['REG1']['default']
self.dut['test_register'].REG1 = val # 12
mem = self.dut['dummy_tl'].mem.copy()
self.dut['test_register'].REG1 += 1 # 13
mem[0] = 13
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_write_read_reg(self):
for reg in ['REG1', 'REG2', 'REG3']:
val = self.dut['test_register']._registers[reg]['default']
self.dut['test_register']._set(reg, val)
ret_val = self.dut['test_register']._get(reg)
self.assertEqual(ret_val, val)
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
def test_set_attribute_by_value(self):
self.dut['test_register'].set_default()
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG2 = 0
mem = self.dut['dummy_tl'].mem.copy()
mem[1] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_set_attribute_by_string(self):
mem = self.dut['dummy_tl'].mem.copy()
self.dut['test_register'].REG3 = '1010101010101010' # dfghfghdfghgfdghf
mem[2] = 170
mem[3] = 170
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_get_attribute_by_string(self):
self.dut['test_register'].REG3 = '1010101010101010' # 43690
self.assertEqual(43690, self.dut['test_register'].REG3)
def test_set_attribute_too_long_string(self):
val = '11010101010101010' # 17 bit
self.assertRaises(ValueError, self.dut['test_register']._set, 'REG3', value=val)
def test_set_attribute_dict_access(self):
self.dut['test_register']['REG1'] = 27306 # 27306
self.assertEqual(27306, self.dut['test_register']['REG1'])
def test_set_attribute_too_big_val(self):
val = 2 ** 16 # max 2 ** 16 - 1
self.assertRaises(ValueError, self.dut['test_register']._set, 'REG3', value=val)
def test_set_by_function(self):
self.dut['test_register'].set_REG1(27308)
self.assertEqual(27308, self.dut['test_register']['REG1'])
def test_get_by_function(self):
self.dut['test_register']['REG1'] = 27305 # 27306
ret = self.dut['test_register'].get_REG1()
self.assertEqual(ret, self.dut['test_register']['REG1'])
def test_init_with_dict(self):
self.dut['test_register'].set_default()
self.dut.init({'test_register': _test_init})
conf = self.dut.get_configuration()
self.assertDictEqual({'test_register': {'REG1': 120, 'REG2': 1, 'REG3': 65535, 'REG_TEST_INIT': 15, 'REG_BYTE_ARRAY': [4, 3, 2, 1]}, 'dummy_tl': {}}, conf)
def test_get_dut_configuration(self):
self.dut['test_register'].set_default()
conf = self.dut.get_configuration()
self.assertDictEqual({'test_register': {'REG1': 12, 'REG2': 1, 'REG3': 65535, 'REG_TEST_INIT': 0, 'REG_BYTE_ARRAY': [1, 2, 3, 4]}, 'dummy_tl': {}}, conf)
def test_get_set_value(self):
for val in range(256):
self.dut['test_register'].set_value(val, 0, size=8, offset=0)
ret_val = self.dut['test_register'].get_value(0, size=8, offset=0)
self.assertEqual(ret_val, val)
def test_write_read_reg_with_bit_str(self):
val = '00110110' # 54
self.dut['test_register'].set_value(val, 0, size=8, offset=0)
ret_val = self.dut['test_register'].get_value(0, size=8, offset=0)
self.assertEqual(ret_val, int(val, base=2))
def test_write_read_reg_with_offset(self):
for offset in range(32):
val = 131
self.dut['test_register'].set_value(val, 0, size=8, offset=offset)
ret_val = self.dut['test_register'].get_value(0, size=8, offset=offset)
self.assertEqual(ret_val, val)
def test_write_read_reg_with_size(self):
for size in range(8, 33):
val = 131
self.dut['test_register'].set_value(val, 0, size=size, offset=7)
ret_val = self.dut['test_register'].get_value(0, size=size, offset=7)
self.assertEqual(ret_val, val)
def test_read_non_existing(self):
with self.assertRaises(KeyError):
self.dut['test_register'].NON_EXISTING
with self.assertRaises(KeyError):
self.dut['test_register']['NON_EXISTING']
with self.assertRaises(KeyError):
self.dut['test_register'].get_NON_EXISTING()
def test_write_non_existing(self):
with self.assertRaises(KeyError):
self.dut['test_register'].NON_EXISTING = 42
with self.assertRaises(KeyError):
self.dut['test_register']['NON_EXISTING'] = 42
with self.assertRaises(KeyError):
self.dut['test_register'].set_NON_EXISTING(42)
def test_wrong_size(self):
self.assertRaises(ValueError, self.dut['test_register'].set_value, 131, addr=0, size=7, offset=7)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -6,639,450,954,130,615,000 | 45.180905 | 163 | 0.575248 | false |
libvirt/libvirt-test-API | selftests/test_envparser.py | 1 | 5061 | import pytest
from unittest.mock import Mock, MagicMock, patch
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import os
from libvirttestapi.src import exception as exc
from libvirttestapi.src import env_parser
from libvirttestapi.utils import utils
class TestEnvParser():
def setup_method(self):
base_path = utils.get_base_path()
config_path = os.path.join(base_path, 'config', 'global.cfg')
self.configfile = config_path
@patch.object(ConfigParser.ConfigParser, "has_section")
def test_has_section_true(self, mock_has_section):
mock_has_section.return_value = True
test_parser = env_parser.Envparser(self.configfile)
result = test_parser.has_section("section")
assert result == True
@patch.object(ConfigParser.ConfigParser, "has_section")
def test_has_section_False(self, mock_has_section):
mock_has_section.return_value = False
test_parser = env_parser.Envparser(self.configfile)
result = test_parser.has_section("section")
assert result == False
@patch.object(ConfigParser.ConfigParser, "has_section")
@patch.object(ConfigParser.ConfigParser, "has_option")
def test_has_option_true(self, mock_has_option, mock_has_section):
mock_has_section.return_value = True
mock_has_option.return_value = True
test_parser = env_parser.Envparser(self.configfile)
result = test_parser.has_option("section", "option")
assert result == True
@patch.object(ConfigParser.ConfigParser, "has_section")
@patch.object(ConfigParser.ConfigParser, "has_option")
def test_has_option_False(self, mock_has_option, mock_has_section):
mock_has_section.return_value = True
mock_has_option.return_value = False
test_parser = env_parser.Envparser(self.configfile)
result = test_parser.has_option("section", "option")
assert result == False
@patch.object(ConfigParser.ConfigParser, "has_section")
@patch.object(ConfigParser.ConfigParser, "has_option")
def test_has_option_notcalled(self, mock_has_option, mock_has_section):
mock_has_section.return_value = False
test_parser = env_parser.Envparser(self.configfile)
with pytest.raises(exc.SectionDoesNotExist):
test_parser.has_option("section", "option")
@patch.object(env_parser.Envparser, "has_section")
def test_addsection_nonexecute(self, mock_has_section):
mock_has_section.return_value = True
test_parser = env_parser.Envparser(self.configfile)
with pytest.raises(exc.SectionExist):
test_parser.add_section("section")
@patch.object(ConfigParser.ConfigParser, "add_section")
@patch.object(env_parser.Envparser, "has_section")
def test_addsection_execute(self, mock_has_section, mock_add_section):
test_parser = env_parser.Envparser(self.configfile)
mock_has_section.return_value = False
test_parser.add_section("section")
assert mock_add_section.called == True
@patch.object(env_parser.Envparser, "has_option")
@patch.object(env_parser.Envparser, "has_section")
def test_removeoption_nonexecute(self, mock_has_section, mock_has_option):
mock_has_section.return_value = True
mock_has_option.return_value = False
test_parser = env_parser.Envparser(self.configfile)
with pytest.raises(exc.OptionDoesNotExist):
test_parser.remove_option("section", "option")
@patch.object(ConfigParser.ConfigParser, "remove_option")
@patch.object(env_parser.Envparser, "has_option")
@patch.object(env_parser.Envparser, "has_section")
def test_removeoption_execute(self, mock_has_section, mock_has_option, mock_remove_option):
mock_has_section.return_value = True
mock_has_option.return_value = True
test_parser = env_parser.Envparser(self.configfile)
test_parser.remove_option("section", "option")
assert mock_remove_option.called == True
@patch.object(ConfigParser.ConfigParser, "set")
@patch.object(env_parser.Envparser, "has_option")
@patch.object(env_parser.Envparser, "has_section")
def test_setvalue_nonexecute(self, mock_has_section, mock_has_option, mock_set):
mock_has_section.return_value = True
mock_has_option.return_value = False
test_parser = env_parser.Envparser(self.configfile)
with pytest.raises(exc.OptionDoesNotExist):
test_parser.set_value("section", "option", "value")
@patch.object(ConfigParser.ConfigParser, "set")
@patch.object(env_parser.Envparser, "has_option")
@patch.object(env_parser.Envparser, "has_section")
def test_setvalue_execute(self, mock_has_section, mock_has_option, mock_set):
mock_has_section.return_value = True
mock_has_option.return_value = True
test_parser = env_parser.Envparser(self.configfile)
test_parser.set_value("section", "option", "value")
assert mock_set.called == True
| gpl-2.0 | -3,375,050,663,111,439,000 | 43.787611 | 95 | 0.69018 | false |
mistercrunch/panoramix | superset/reports/api.py | 2 | 14710 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any, Optional
from flask import g, request, Response
from flask_appbuilder.api import expose, permission_name, protect, rison, safe
from flask_appbuilder.hooks import before_request
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import ngettext
from marshmallow import ValidationError
from superset import is_feature_enabled
from superset.charts.filters import ChartFilter
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.dashboards.filters import DashboardAccessFilter
from superset.databases.filters import DatabaseFilter
from superset.models.reports import ReportSchedule
from superset.reports.commands.bulk_delete import BulkDeleteReportScheduleCommand
from superset.reports.commands.create import CreateReportScheduleCommand
from superset.reports.commands.delete import DeleteReportScheduleCommand
from superset.reports.commands.exceptions import (
ReportScheduleBulkDeleteFailedError,
ReportScheduleCreateFailedError,
ReportScheduleDeleteFailedError,
ReportScheduleForbiddenError,
ReportScheduleInvalidError,
ReportScheduleNotFoundError,
ReportScheduleUpdateFailedError,
)
from superset.reports.commands.update import UpdateReportScheduleCommand
from superset.reports.filters import ReportScheduleAllTextFilter
from superset.reports.schemas import (
get_delete_ids_schema,
openapi_spec_methods_override,
ReportSchedulePostSchema,
ReportSchedulePutSchema,
)
from superset.views.base_api import (
BaseSupersetModelRestApi,
RelatedFieldFilter,
statsd_metrics,
)
from superset.views.filters import FilterRelatedOwners
logger = logging.getLogger(__name__)
class ReportScheduleRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(ReportSchedule)
@before_request
def ensure_alert_reports_enabled(self) -> Optional[Response]:
if not is_feature_enabled("ALERT_REPORTS"):
return self.response_404()
return None
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.RELATED,
"bulk_delete", # not using RouteMethod since locally defined
}
class_permission_name = "ReportSchedule"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
resource_name = "report"
allow_browser_login = True
show_columns = [
"id",
"active",
"chart.id",
"chart.slice_name",
"context_markdown",
"crontab",
"dashboard.dashboard_title",
"dashboard.id",
"database.database_name",
"database.id",
"description",
"grace_period",
"last_eval_dttm",
"last_state",
"last_value",
"last_value_row_json",
"log_retention",
"name",
"owners.first_name",
"owners.id",
"owners.last_name",
"recipients.id",
"recipients.recipient_config_json",
"recipients.type",
"report_format",
"sql",
"type",
"validator_config_json",
"validator_type",
"working_timeout",
]
show_select_columns = show_columns + [
"chart.datasource_id",
"chart.datasource_type",
]
list_columns = [
"active",
"changed_by.first_name",
"changed_by.last_name",
"changed_on",
"changed_on_delta_humanized",
"created_by.first_name",
"created_by.last_name",
"created_on",
"crontab",
"crontab_humanized",
"id",
"last_eval_dttm",
"last_state",
"name",
"owners.first_name",
"owners.id",
"owners.last_name",
"recipients.id",
"recipients.type",
"type",
]
add_columns = [
"active",
"chart",
"context_markdown",
"crontab",
"dashboard",
"database",
"description",
"grace_period",
"log_retention",
"name",
"owners",
"recipients",
"report_format",
"sql",
"type",
"validator_config_json",
"validator_type",
"working_timeout",
]
edit_columns = add_columns
add_model_schema = ReportSchedulePostSchema()
edit_model_schema = ReportSchedulePutSchema()
order_columns = [
"active",
"created_by.first_name",
"changed_by.first_name",
"changed_on",
"changed_on_delta_humanized",
"created_on",
"crontab",
"last_eval_dttm",
"name",
"type",
"crontab_humanized",
]
search_columns = ["name", "active", "created_by", "type", "last_state"]
search_filters = {"name": [ReportScheduleAllTextFilter]}
allowed_rel_fields = {"owners", "chart", "dashboard", "database", "created_by"}
filter_rel_fields = {
"chart": [["id", ChartFilter, lambda: []]],
"dashboard": [["id", DashboardAccessFilter, lambda: []]],
"database": [["id", DatabaseFilter, lambda: []]],
}
text_field_rel_fields = {
"dashboard": "dashboard_title",
"chart": "slice_name",
"database": "database_name",
}
related_field_filters = {
"dashboard": "dashboard_title",
"chart": "slice_name",
"database": "database_name",
"owners": RelatedFieldFilter("first_name", FilterRelatedOwners),
}
apispec_parameter_schemas = {
"get_delete_ids_schema": get_delete_ids_schema,
}
openapi_spec_tag = "Report Schedules"
openapi_spec_methods = openapi_spec_methods_override
@expose("/<int:pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@permission_name("delete")
def delete(self, pk: int) -> Response:
"""Delete a Report Schedule
---
delete:
description: >-
Delete a Report Schedule
parameters:
- in: path
schema:
type: integer
name: pk
description: The report schedule pk
responses:
200:
description: Item deleted
content:
application/json:
schema:
type: object
properties:
message:
type: string
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteReportScheduleCommand(g.user, pk).run()
return self.response(200, message="OK")
except ReportScheduleNotFoundError:
return self.response_404()
except ReportScheduleForbiddenError:
return self.response_403()
except ReportScheduleDeleteFailedError as ex:
logger.error(
"Error deleting report schedule %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/", methods=["POST"])
@protect()
@safe
@statsd_metrics
@permission_name("post")
def post(self) -> Response:
"""Creates a new Report Schedule
---
post:
description: >-
Create a new Report Schedule
requestBody:
description: Report Schedule schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Report schedule added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateReportScheduleCommand(g.user, item).run()
return self.response(201, id=new_model.id, result=item)
except ReportScheduleNotFoundError as ex:
return self.response_400(message=str(ex))
except ReportScheduleInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except ReportScheduleCreateFailedError as ex:
logger.error(
"Error creating report schedule %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<int:pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@permission_name("put")
def put(self, pk: int) -> Response: # pylint: disable=too-many-return-statements
"""Updates an Report Schedule
---
put:
description: >-
Updates a Report Schedule
parameters:
- in: path
schema:
type: integer
name: pk
description: The Report Schedule pk
requestBody:
description: Report Schedule schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Report Schedule changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = UpdateReportScheduleCommand(g.user, pk, item).run()
return self.response(200, id=new_model.id, result=item)
except ReportScheduleNotFoundError:
return self.response_404()
except ReportScheduleInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except ReportScheduleForbiddenError:
return self.response_403()
except ReportScheduleUpdateFailedError as ex:
logger.error(
"Error updating report %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
def bulk_delete(self, **kwargs: Any) -> Response:
"""Delete bulk Report Schedule layers
---
delete:
description: >-
Deletes multiple report schedules in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Report Schedule bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteReportScheduleCommand(g.user, item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d report schedule",
"Deleted %(num)d report schedules",
num=len(item_ids),
),
)
except ReportScheduleNotFoundError:
return self.response_404()
except ReportScheduleForbiddenError:
return self.response_403()
except ReportScheduleBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
| apache-2.0 | -7,320,515,751,932,594,000 | 32.205418 | 85 | 0.560639 | false |
adieu/django-invitation | invitation/models.py | 1 | 6880 | import os
import random
import datetime
from django.db import models
from django.conf import settings
from django.utils.http import int_to_base36
from django.utils.hashcompat import sha_constructor
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from registration.models import SHA1_RE
class InvitationKeyManager(models.Manager):
def get_key(self, invitation_key):
"""
Return InvitationKey, or None if it doesn't (or shouldn't) exist.
"""
try:
code = InvitationCode.objects.get(code=invitation_key)
if self.filter(key=invitation_key).count() < code.redeem_limit:
key = self.model(key=invitation_key, from_user=code.from_user)
return key
except InvitationCode.DoesNotExist:
pass
# Don't bother hitting database if invitation_key doesn't match pattern.
if not SHA1_RE.search(invitation_key):
return None
try:
key = self.get(key=invitation_key)
except self.model.DoesNotExist:
return None
return key
def is_key_valid(self, invitation_key):
"""
Check if an ``InvitationKey`` is valid or not, returning a boolean,
``True`` if the key is valid.
"""
invitation_key = self.get_key(invitation_key)
return invitation_key and invitation_key.is_usable()
def create_invitation(self, user):
"""
Create an ``InvitationKey`` and returns it.
The key for the ``InvitationKey`` will be a SHA1 hash, generated
from a combination of the ``User``'s username and a random salt.
"""
salt = sha_constructor(str(random.random())).hexdigest()[:5]
key = sha_constructor("%s%s%s" % (datetime.datetime.now(), salt, user.username)).hexdigest()
return self.create(from_user=user, key=key)
def remaining_invitations_for_user(self, user):
"""
Return the number of remaining invitations for a given ``User``.
"""
invitation_user, created = InvitationUser.objects.get_or_create(
inviter=user,
defaults={'invitations_remaining': settings.INVITATIONS_PER_USER})
return invitation_user.invitations_remaining
def delete_expired_keys(self):
for key in self.all():
if key.key_expired():
key.delete()
class InvitationKey(models.Model):
key = models.CharField(_('invitation key'), max_length=40)
date_invited = models.DateTimeField(_('date invited'), default=datetime.datetime.now)
from_user = models.ForeignKey(User, related_name='invitations_sent')
registrant = models.ForeignKey(User, null=True, blank=True, related_name='invitations_used')
objects = InvitationKeyManager()
def __unicode__(self):
return u"Invitation from %s on %s" % (self.from_user.username, self.date_invited)
def is_usable(self):
"""
Return whether this key is still valid for registering a new user.
"""
return self.registrant is None and not self.key_expired()
def key_expired(self):
"""
Determine whether this ``InvitationKey`` has expired, returning
a boolean -- ``True`` if the key has expired.
The date the key has been created is incremented by the number of days
specified in the setting ``ACCOUNT_INVITATION_DAYS`` (which should be
the number of days after invite during which a user is allowed to
create their account); if the result is less than or equal to the
current date, the key has expired and this method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_INVITATION_DAYS)
return self.date_invited + expiration_date <= datetime.datetime.now()
key_expired.boolean = True
def mark_used(self, registrant):
"""
Note that this key has been used to register a new user.
"""
self.registrant = registrant
self.save()
def send_to(self, email):
"""
Send an invitation email to ``email``.
"""
current_site = Site.objects.get_current()
subject = render_to_string('invitation/invitation_email_subject.txt',
{ 'site': current_site,
'invitation_key': self })
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('invitation/invitation_email.txt',
{ 'invitation_key': self,
'expiration_days': settings.ACCOUNT_INVITATION_DAYS,
'site': current_site })
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [email])
class InvitationCode(models.Model):
code = models.CharField(_('invitation code'), max_length=40)
date_created = models.DateTimeField(_('date created'), default=datetime.datetime.now)
from_user = models.ForeignKey(User, related_name='invitation_code_set')
redeem_limit = models.IntegerField()
def __unicode__(self):
return u"Invitation code %s from %s" % (self.code, self.from_user.username)
class InvitationRequest(models.Model):
email = models.EmailField()
invited = models.BooleanField(default=False)
def __unicode__(self):
return u"InvitationRequest from %s" % self.email
class InvitationUser(models.Model):
inviter = models.ForeignKey(User, unique=True)
invitations_remaining = models.IntegerField()
def __unicode__(self):
return u"InvitationUser for %s" % self.inviter.username
def user_post_save(sender, instance, created, **kwargs):
"""Create InvitationUser for user when User is created."""
if created:
invitation_user = InvitationUser()
invitation_user.inviter = instance
invitation_user.invitations_remaining = settings.INVITATIONS_PER_USER
invitation_user.save()
models.signals.post_save.connect(user_post_save, sender=User)
def invitation_key_post_save(sender, instance, created, **kwargs):
"""Decrement invitations_remaining when InvitationKey is created."""
if created:
invitation_user = InvitationUser.objects.get(inviter=instance.from_user)
remaining = invitation_user.invitations_remaining
invitation_user.invitations_remaining = remaining-1
invitation_user.save()
models.signals.post_save.connect(invitation_key_post_save, sender=InvitationKey)
| bsd-3-clause | -6,604,925,912,574,837,000 | 37.435754 | 100 | 0.636628 | false |
drssoccer55/RLBot | src/main/python/rlbot/utils/structures/rigid_body_struct.py | 1 | 1072 | import ctypes
from rlbot.utils.structures.bot_input_struct import PlayerInput
from rlbot.utils.structures.game_data_struct import Vector3
from rlbot.utils.structures.start_match_structures import MAX_PLAYERS
class Quaternion(ctypes.Structure):
_fields_ = [("x", ctypes.c_float),
("y", ctypes.c_float),
("z", ctypes.c_float),
("w", ctypes.c_float)]
class RigidBodyState(ctypes.Structure):
_fields_ = [("frame", ctypes.c_int),
("location", Vector3),
("rotation", Quaternion),
("velocity", Vector3),
("angular_velocity", Vector3)]
class PlayerRigidBodyState(ctypes.Structure):
_fields_ = [("state", RigidBodyState),
("input", PlayerInput)]
class BallRigidBodyState(ctypes.Structure):
_fields_ = [("state", RigidBodyState)]
class RigidBodyTick(ctypes.Structure):
_fields_ = [("ball", BallRigidBodyState),
("players", PlayerRigidBodyState * MAX_PLAYERS),
("num_players", ctypes.c_int)]
| mit | -2,333,322,161,590,240,000 | 29.628571 | 69 | 0.609142 | false |
jepler/linuxcnc-mirror | src/emc/usr_intf/pncconf/pncconf.py | 1 | 292409 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# This is pncconf, a graphical configuration editor for LinuxCNC
# Chris Morley copyright 2009
# This is based from stepconf, a graphical configuration editor for linuxcnc
# Copyright 2007 Jeff Epler <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
# this is for importing modules from lib/python/pncconf
BIN = os.path.dirname(__file__)
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
libdir = os.path.join(BASE, "lib", "python","pncconf")
sys.path.insert(0, libdir)
import errno
import time
import pickle
import shutil
import math
from optparse import Option, OptionParser
import textwrap
import locale
import copy
import fnmatch
import subprocess
import gobject
import gtk
import gtk.glade
import xml.dom.minidom
import xml.etree.ElementTree
import xml.etree.ElementPath
import traceback
from multifilebuilder import MultiFileBuilder
from touchy import preferences
from pncconf import pages
from pncconf import build_INI
from pncconf import build_HAL
from pncconf import tests
from pncconf import data
from pncconf import private_data
import cairo
import hal
#import mesatest
try:
LINUXCNCVERSION = os.environ['LINUXCNCVERSION']
except:
LINUXCNCVERSION = 'UNAVAILABLE'
def get_value(w):
try:
return w.get_value()
except AttributeError:
pass
oldlocale = locale.getlocale(locale.LC_NUMERIC)
try:
locale.setlocale(locale.LC_NUMERIC, "")
return locale.atof(w.get_text())
finally:
locale.setlocale(locale.LC_NUMERIC, oldlocale)
def makedirs(d):
try:
os.makedirs(d)
except os.error, detail:
if detail.errno != errno.EEXIST: raise
makedirs(os.path.expanduser("~/linuxcnc/configs"))
# otherwise, on hardy the user is shown spurious "[application] closed
# unexpectedly" messages but denied the ability to actually "report [the]
# problem"
def excepthook(exc_type, exc_obj, exc_tb):
try:
w = app.widgets.window1
except NameError:
w = None
lines = traceback.format_exception(exc_type, exc_obj, exc_tb)
m = gtk.MessageDialog(w,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
_("PNCconf encountered an error. The following "
"information may be useful in troubleshooting:\n\n")
+ "LinuxCNC Version: %s\n\n"% LINUXCNCVERSION + ''.join(lines))
m.show()
m.run()
m.destroy()
sys.excepthook = excepthook
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
LOCALEDIR = os.path.join(BASE, "share", "locale")
import gettext;
domain = "linuxcnc"
gettext.install(domain, localedir=LOCALEDIR, unicode=True)
locale.setlocale(locale.LC_ALL, '')
locale.bindtextdomain(domain, LOCALEDIR)
gettext.bindtextdomain(domain, LOCALEDIR)
def iceil(x):
if isinstance(x, (int, long)): return x
if isinstance(x, basestring): x = float(x)
return int(math.ceil(x))
prefs = preferences.preferences()
_DEBUGSTRING = ["NONE"]
debugstate = False
# a class for holding the glade widgets rather then searching for them each time
class Widgets:
def __init__(self, xml):
self._xml = xml
def __getattr__(self, attr):
r = self._xml.get_object(attr)
if r is None: raise AttributeError, "No widget %r" % attr
return r
def __getitem__(self, attr):
r = self._xml.get_object(attr)
if r is None: raise IndexError, "No widget %r" % attr
return r
class App:
def __init__(self, dbgstate=0):
print dbgstate
global debug
global dbg
global _PD
self.debugstate = dbgstate
dbg = self.dbg
if self.debugstate:
print 'PNCconf debug',dbgstate
global _DEBUGSTRING
_DEBUGSTRING = [dbgstate]
self.recursive_block = False
self.firmware_block = False
# Private data holds the array of pages to load, signals, and messages
_PD = self._p = private_data.Private_Data(self,BIN,BASE)
self.d = data.Data(self, _PD, BASE, LINUXCNCVERSION)
self.splash_screen()
#self.pbar.set_fraction(.2)
#while gtk.events_pending():
# gtk.main_iteration()
bar_size = 0
# build the glade files
self.builder = MultiFileBuilder()
self.builder.set_translation_domain(domain)
self.builder.add_from_file(os.path.join(self._p.DATADIR,'main_page.glade'))
self.builder.add_from_file(os.path.join(self._p.DATADIR,'dialogs.glade'))
self.builder.add_from_file(os.path.join(self._p.DATADIR,'help.glade'))
window = self.builder.get_object("window1")
notebook1 = self.builder.get_object("notebook1")
for name,y,z,a in (self._p.available_page):
if name == 'intro': continue
dbg("loading glade page REFERENCE:%s TITLE:%s INIT STATE: %s STATE:%s"% (name,y,z,a),mtype="glade")
if not z:
self.add_placeholder_page(name)
page = self.builder.get_object('label_%s'%name)
notebook1.append_page(page)
continue
self.builder.add_from_file(os.path.join(self._p.DATADIR, '%s.glade'%name))
page = self.builder.get_object(name)
notebook1.append_page(page)
self.pbar.set_fraction(bar_size)
while gtk.events_pending():
gtk.main_iteration()
bar_size += .0555
if not 'dev' in dbgstate:
notebook1.set_show_tabs(False)
self.widgets = Widgets(self.builder)
self.TESTS = tests.TESTS(self)
self.p = pages.Pages(self)
self.INI = build_INI.INI(self)
self.HAL = build_HAL.HAL(self)
self.builder.set_translation_domain(domain) # for locale translations
self.builder.connect_signals( self.p ) # register callbacks from Pages class
wiz_pic = gtk.gdk.pixbuf_new_from_file(self._p.WIZARD)
self.widgets.wizard_image.set_from_pixbuf(wiz_pic)
self.window.hide()
axisdiagram = os.path.join(self._p.HELPDIR,"axisdiagram1.png")
self.widgets.helppic0.set_from_file(axisdiagram)
axisdiagram = os.path.join(self._p.HELPDIR,"lathe_diagram.png")
self.widgets.helppic1.set_from_file(axisdiagram)
axisdiagram = os.path.join(self._p.HELPDIR,"HomeAxisTravel_V2.png")
self.widgets.helppic2.set_from_file(axisdiagram)
axisdiagram = os.path.join(self._p.HELPDIR,"HomeAxisTravel_V3.png")
self.widgets.helppic3.set_from_file(axisdiagram)
self.map_7i76 = gtk.gdk.pixbuf_new_from_file(os.path.join(self._p.HELPDIR,"7i76_map.png"))
self.widgets.map_7i76_image.set_from_pixbuf(self.map_7i76)
self.map_7i77 = gtk.gdk.pixbuf_new_from_file(os.path.join(self._p.HELPDIR,"7i77_map.png"))
self.widgets.map_7i77_image.set_from_pixbuf(self.map_7i77)
#self.widgets.openloopdialog.hide()
self.p.initialize()
window.show()
self.axis_under_test = False
self.jogminus = self.jogplus = 0
# set preferences if they exist
link = short = advanced = show_pages = False
filename = os.path.expanduser("~/.pncconf-preferences")
if os.path.exists(filename):
match = open(filename).read()
textbuffer = self.widgets.textoutput.get_buffer()
try :
textbuffer.set_text("%s\n\n"% filename)
textbuffer.insert_at_cursor(match)
except:
pass
version = 0.0
d = xml.dom.minidom.parse(open(filename, "r"))
for n in d.getElementsByTagName("property"):
name = n.getAttribute("name")
text = n.getAttribute('value')
if name == "version":
version = eval(text)
elif name == "always_shortcut":
short = eval(text)
elif name == "always_link":
link = eval(text)
elif name == "use_ini_substitution":
self.widgets.useinisubstitution.set_active(eval(text))
elif name == "show_advanced_pages":
show_pages = eval(text)
elif name == "machinename":
self.d._lastconfigname = text
elif name == "chooselastconfig":
self.d._chooselastconfig = eval(text)
elif name == "MESABLACKLIST":
if version == self.d._preference_version:
self._p.MESABLACKLIST = eval(text)
elif name == "EXTRA_MESA_FIRMWAREDATA":
self.d._customfirmwarefilename = text
rcfile = os.path.expanduser(self.d._customfirmwarefilename)
print rcfile
if os.path.exists(rcfile):
try:
execfile(rcfile)
except:
print _("**** PNCCONF ERROR: custom firmware loading error")
self._p.EXTRA_MESA_FIRMWAREDATA = []
if not self._p.EXTRA_MESA_FIRMWAREDATA == []:
print _("**** PNCCONF INFO: Found extra firmware in file")
# these are set from the hidden preference file
self.widgets.createsymlink.set_active(link)
self.widgets.createshortcut.set_active(short)
self.widgets.advancedconfig.set_active(show_pages)
tempfile = os.path.join(self._p.DISTDIR, "configurable_options/ladder/TEMP.clp")
if os.path.exists(tempfile):
os.remove(tempfile)
def add_placeholder_page(self,name):
string = '''
<?xml version="1.0"?>
<interface>
<requires lib="gtk+" version="2.16"/>
<!-- interface-naming-policy project-wide -->
<object class="GtkLabel" id="label_%s">
<property name="visible">True</property>
<property name="label" translatable="yes">%s</property>
</object>
</interface>
'''%(name,name)
self.builder.add_from_string(string)
# build functions
def makedirs(self, path):
makedirs(path)
def build_base(self):
base = os.path.expanduser("~/linuxcnc/configs/%s" % self.d.machinename)
ncfiles = os.path.expanduser("~/linuxcnc/nc_files")
if not os.path.exists(ncfiles):
self.makedirs(ncfiles)
examples = os.path.join(BASE, "share", "linuxcnc", "ncfiles")
if not os.path.exists(examples):
examples = os.path.join(BASE, "nc_files")
if os.path.exists(examples):
os.symlink(examples, os.path.join(ncfiles, "examples"))
self.makedirs(base)
return base
def copy(self, base, filename):
dest = os.path.join(base, filename)
if not os.path.exists(dest):
shutil.copy(os.path.join(self._p.DISTDIR, filename), dest)
def buid_config(self):
base = self.build_base()
self.d.save(base)
#self.write_readme(base)
self.INI.write_inifile(base)
self.HAL.write_halfile(base)
self.copy(base, "tool.tbl")
if self.warning_dialog(self._p.MESS_QUIT,False):
gtk.main_quit()
# helper functions
def get_discovery_meta(self):
self.widgets.boarddiscoverydialog.set_title(_("Discovery metadata update"))
#self.widgets.cardname_label.set_text('Boardname: %s'%name)
self.widgets.boarddiscoverydialog.show_all()
self.widgets.window1.set_sensitive(0)
result = self.widgets.boarddiscoverydialog.run()
self.widgets.boarddiscoverydialog.hide()
self.widgets.window1.set_sensitive(1)
if result == gtk.RESPONSE_OK:
n = self.widgets.discovery_name_entry.get_text()
itr = self.widgets.discovery_interface_combobox.get_active_iter()
d = self.widgets.discovery_interface_combobox.get_model().get_value(itr, 1)
a = self.widgets.discovery_address_entry.get_text()
print 'discovery:',n,d,a
return n,d,a
def discovery_interface_combobox_changed(self,w):
itr = w.get_active_iter()
d = w.get_model().get_value(itr, 1)
if d == '--addr':
self.widgets.discovery_address_entry.set_sensitive(True)
else:
self.widgets.discovery_address_entry.set_sensitive(False)
def get_board_meta(self, name):
name = name.lower()
meta = _PD.MESA_BOARD_META.get(name)
if meta:
return meta
else:
for key in _PD.MESA_BOARD_META:
if key in name:
return _PD.MESA_BOARD_META.get(key)
print 'boardname %s not found in hardware metadata array'% name
self.widgets.boardmetadialog.set_title(_("%s metadata update") % name)
self.widgets.cardname_label.set_text('Boardname: %s'%name)
self.widgets.boardmetadialog.show_all()
self.widgets.window1.set_sensitive(0)
result = self.widgets.boardmetadialog.run()
self.widgets.boardmetadialog.hide()
self.widgets.window1.set_sensitive(1)
if result == gtk.RESPONSE_OK:
itr = self.widgets.interface_combobox.get_active_iter()
d = self.widgets.interface_combobox.get_model().get_value(itr, 1)
ppc = int(self.widgets.ppc_combobox.get_active_text())
tp = int(self.widgets.noc_spinbutton.get_value())
_PD.MESA_BOARD_META[name] = {'DRIVER':d,'PINS_PER_CONNECTOR':ppc,'TOTAL_CONNECTORS':tp}
meta = _PD.MESA_BOARD_META.get(name)
if meta:
return meta
def splash_screen(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_SPLASHSCREEN)
self.window.set_title(_("Pncconf setup"))
self.window.set_border_width(10)
vbox = gtk.VBox(False, 5)
vbox.set_border_width(10)
self.window.add(vbox)
vbox.show()
align = gtk.Alignment(0.5, 0.5, 0, 0)
vbox.pack_start(align, False, False, 5)
align.show()
self.pbar = gtk.ProgressBar()
self.pbar.set_text(_("Pncconf is setting up"))
self.pbar.set_fraction(.1)
align.add(self.pbar)
self.pbar.show()
self.window.show()
while gtk.events_pending():
gtk.main_iteration()
def dbg(self,message,mtype='all'):
for hint in _DEBUGSTRING:
if "all" in hint or mtype in hint:
print(message)
if "step" in _DEBUGSTRING:
c = raw_input(_("\n**** Debug Pause! ****"))
return
def query_dialog(self,title, message):
def responseToDialog(entry, dialog, response):
dialog.response(response)
label = gtk.Label(message)
#label.modify_font(pango.FontDescription("sans 20"))
entry = gtk.Entry()
dialog = gtk.MessageDialog(self.widgets.window1,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_WARNING, gtk.BUTTONS_OK_CANCEL, title)
dialog.vbox.pack_start(label)
dialog.vbox.add(entry)
#allow the user to press enter to do ok
entry.connect("activate", responseToDialog, dialog, gtk.RESPONSE_OK)
dialog.show_all()
result = dialog.run()
text = entry.get_text()
dialog.destroy()
if result == gtk.RESPONSE_OK:
return text
else:
return None
def warning_dialog(self,message,is_ok_type):
if is_ok_type:
dialog = gtk.MessageDialog(self.widgets.window1,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_WARNING, gtk.BUTTONS_OK,message)
dialog.show_all()
result = dialog.run()
dialog.destroy()
return True
else:
dialog = gtk.MessageDialog(self.widgets.window1,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO,message)
dialog.show_all()
result = dialog.run()
dialog.destroy()
if result == gtk.RESPONSE_YES:
return True
else:
return False
def show_help(self):
helpfilename = os.path.join(self._p.HELPDIR, "%s"% self.d.help)
textbuffer = self.widgets.helpview.get_buffer()
try :
infile = open(helpfilename, "r")
if infile:
string = infile.read()
infile.close()
textbuffer.set_text(string)
except:
text = _("Specific Help page is unavailable\n")
self.warning_dialog(text,True)
self.widgets.help_window.set_title(_("Help Pages") )
self.widgets.helpnotebook.set_current_page(0)
self.widgets.help_window.show_all()
if self.debugstate:
self.widgets.input_tab.set_visible(True)
else:
self.widgets.input_tab.set_visible(False)
self.widgets.help_window.present()
def print_page(self,print_dialog, context, n, imagename):
ctx = context.get_cairo_context()
gdkcr = gtk.gdk.CairoContext(ctx)
gdkcr.set_source_pixbuf(self[imagename], 0,0)
gdkcr.paint ()
def print_image(self,image_name):
print 'print image'
print_dialog = gtk.PrintOperation()
print_dialog.set_n_pages(1)
settings = gtk.PrintSettings()
settings.set_orientation(gtk.PAGE_ORIENTATION_LANDSCAPE)
print_dialog.set_print_settings(settings)
print_dialog.connect("draw-page", self.print_page, image_name)
res = print_dialog.run(gtk.PRINT_OPERATION_ACTION_PRINT_DIALOG, self.widgets.help_window)
if res == gtk.PRINT_OPERATION_RESULT_APPLY:
settings = print_dialog.get_print_settings()
# check for realtime kernel
def check_for_rt(self):
actual_kernel = os.uname()[2]
if hal.is_sim :
self.warning_dialog(self._p.MESS_NO_REALTIME,True)
if self.debugstate:
return True
else:
return False
elif hal.is_kernelspace and hal.kernel_version != actual_kernel:
self.warning_dialog(self._p.MESS_KERNEL_WRONG + '%s'%hal.kernel_version,True)
if self.debugstate:
return True
else:
return False
else:
return True
def add_external_folder_boardnames(self):
if os.path.exists(self._p.FIRMDIR):
self._p.MESA_BOARDNAMES = []
for root, dirs, files in os.walk(self._p.FIRMDIR):
folder = root.lstrip(self._p.FIRMDIR)
if folder in self._p.MESABLACKLIST:continue
if folder == "":continue
dbg("****folder added :%s"%folder,mtype='firmware')
self._p.MESA_BOARDNAMES.append(folder)
else:
#TODO what if there are no external firmware is this enough?
self.warning_dialog(_("You have no hostmot2 firmware downloaded in folder:\n%s\n\
PNCconf will use internal firmware data"%self._p.FIRMDIR),True)
for firmware in self._p.MESA_INTERNAL_FIRMWAREDATA:
if 'internal' in firmware[0].lower():
if firmware[0] in self._p.MESA_BOARDNAMES:
continue
self._p.MESA_BOARDNAMES.append(firmware[0])
if self.d.advanced_option:
self._p.MESA_BOARDNAMES.append('Discovery Option')
# add any extra firmware boardnames from .pncconf-preference file
if not self._p.EXTRA_MESA_FIRMWAREDATA == []:
for search, item in enumerate(self._p.EXTRA_MESA_FIRMWAREDATA):
d = self._p.EXTRA_MESA_FIRMWAREDATA[search]
if not d[_PD._BOARDTITLE] in self._p.MESA_BOARDNAMES:
self._p.MESA_BOARDNAMES.append(d[_PD._BOARDTITLE])
model = self.widgets.mesa_boardname_store
model.clear()
for search,item in enumerate(self._p.MESA_BOARDNAMES):
#print search,item
model.append((item,))
def fill_pintype_model(self):
# notused
self.d._notusedliststore = gtk.ListStore(str,int)
self.d._notusedliststore.append([_PD.pintype_notused[0],0])
self.d._ssrliststore = gtk.ListStore(str,int)
self.d._ssrliststore.append([_PD.pintype_ssr[0],0])
# gpio
self.d._gpioliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_gpio):
self.d._gpioliststore.append([text,0])
# stepper
self.d._stepperliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_stepper):
self.d._stepperliststore.append([text,number])
# encoder
self.d._encoderliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_encoder):
self.d._encoderliststore.append([text,number])
# mux encoder
self.d._muxencoderliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_muxencoder):
self.d._muxencoderliststore.append([text,number])
# resolver
self.d._resolverliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_resolver):
self.d._resolverliststore.append([text,number])
# 8i20 AMP
self.d._8i20liststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_8i20):
self.d._8i20liststore.append([text,number])
# potentiometer output
self.d._potliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_potentiometer):
self.d._potliststore.append([text,number])
# analog input
self.d._analoginliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_analog_in):
self.d._analoginliststore.append([text,number])
# pwm
self.d._pwmrelatedliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_pwm):
self.d._pwmrelatedliststore.append([text,number])
self.d._pwmcontrolliststore = gtk.ListStore(str,int)
self.d._pwmcontrolliststore.append([_PD.pintype_pwm[0],0])
self.d._pwmcontrolliststore.append([_PD.pintype_pdm[0],0])
self.d._pwmcontrolliststore.append([_PD.pintype_udm[0],0])
# pdm
self.d._pdmrelatedliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_pdm):
self.d._pdmrelatedliststore.append([text,number])
self.d._pdmcontrolliststore = gtk.ListStore(str,int)
self.d._pdmcontrolliststore.append([_PD.pintype_pwm[0],0])
self.d._pdmcontrolliststore.append([_PD.pintype_pdm[0],0])
self.d._pdmcontrolliststore.append([_PD.pintype_udm[0],0])
# udm
self.d._udmrelatedliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_udm):
self.d._udmrelatedliststore.append([text,number])
self.d._udmcontrolliststore = gtk.ListStore(str,int)
self.d._udmcontrolliststore.append([_PD.pintype_pwm[0],0])
self.d._udmcontrolliststore.append([_PD.pintype_pdm[0],0])
self.d._udmcontrolliststore.append([_PD.pintype_udm[0],0])
#tppwm
self.d._tppwmliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_tp_pwm):
self.d._tppwmliststore.append([text,number])
#sserial
self.d._sserialliststore = gtk.ListStore(str,int)
for number,text in enumerate(_PD.pintype_sserial):
self.d._sserialliststore.append([text,number])
# comboboxes with 3 levels
def fill_combobox_models2(self):
templist = [ ["_gpioisignaltree",_PD.human_input_names,1,'hal_input_names'],
["_steppersignaltree",_PD.human_stepper_names,1,'hal_stepper_names'],
["_encodersignaltree",_PD.human_encoder_input_names,1,'hal_encoder_input_names'],
["_muxencodersignaltree",_PD.human_encoder_input_names,1,'hal_encoder_input_names'],
["_pwmsignaltree",_PD.human_pwm_output_names,1,'hal_pwm_output_names'],]
for item in templist:
#print "\ntype",item[0]
count = 0
end = len(item[1])-1
# treestore(parentname,parentnum,signalname,signaltreename,signal index number)
self.d[item[0]]= gtk.TreeStore(str,int,str,str,int)
for i,parent in enumerate(item[1]):
############################
# if there are no children:
############################
if not isinstance(parent[1], list):
signame = parent[1]
index = _PD[item[3]].index(parent[1])
#print 'no children:', signame, index
# add parent and get reference for child
# This entry is selectable it has a signal attached to it
piter = self.d[item[0]].append(None, [parent[0], index,signame,item[3],0])
#print parent,parentnum,count,signame,item[3],i,signame,count
else:
# If list is empty it's a custome signal - with no signals yet
if len(parent[1]) == 0:
piter = self.d[item[0]].append(None, [parent[0], 0,'none',item[3],0])
else:
#print "parsing child",parent[1]
# add parent title
##########################
# if there are children:
# add an entry to first list that cannot be selected
# (well it always gives the unused signal - 0)
# because we need users to select from the next column
##########################
piter = self.d[item[0]].append(None, [parent[0],0,signame,item[3],0])
for j,child in enumerate(parent[1]):
#############################
# If grandchildren
#############################
if isinstance(child[1], list):
##########################
# if there are children:
# add an entry to second list that cannot be selected
# (well it always gives the unused signal - 0)
# because we need users to select from the next column
##########################
citer = self.d[item[0]].append(piter, [child[0], 0,signame,item[3],0])
#print 'add to CHILD list',child[0]
#print 'String:',child[1]
for k,grandchild in enumerate(child[1]):
#print 'raw grand: ', grandchild
#############################
# If GREAT children
#############################
#print grandchild[0],grandchild[1]
if isinstance(grandchild[1], list):
#print 'ERROR combo boxes can not have GREAT children yet add'
#print 'skipping'
continue
else:
#############################
# If No GREAT children
############################
humanName = grandchild[0]
sigName = grandchild[1]
index = _PD[item[3]].index(grandchild[1])
halNameArray = item[3]
#print 'adding to grandchild to childlist: ', humanName,index,sigName,halNameArray,index
self.d[item[0]].append(citer, [humanName, index,sigName,halNameArray,index])
####################
# No grandchildren
####################
else:
#print' add to child - no grandchild',child
humanName = child[0]
sigName = child[1]
index = _PD[item[3]].index(child[1])
halNameArray = item[3]
#print child[0],index,sigName,item[3],index
self.d[item[0]].append(piter, [humanName, index,sigName,halNameArray,index])
count +=item[2]
# combobox with 2 levels
def fill_combobox_models(self):
templist = [ ["_gpioosignaltree",_PD.human_output_names,1,'hal_output_names'],
["_resolversignaltree",_PD.human_resolver_input_names,1,'hal_resolver_input_names'],
["_tppwmsignaltree",_PD.human_tppwm_output_names,8,'hal_tppwm_output_names'],
["_8i20signaltree",_PD.human_8i20_input_names,1,'hal_8i20_input_names'],
["_potsignaltree",_PD.human_pot_output_names,2,'hal_pot_output_names'],
["_analoginsignaltree",_PD.human_analog_input_names,1,'hal_analog_input_names'],
["_sserialsignaltree",_PD.human_sserial_names,3,'hal_sserial_names']
]
for item in templist:
#print "\ntype",item[0]
count = 0
end = len(item[1])-1
# treestore(parentname,parentnum,signalname,signaltreename,signal index number)
self.d[item[0]]= gtk.TreeStore(str,int,str,str,int)
for i,parent in enumerate(item[1]):
############################
# if there are no children:
############################
if len(parent[1]) == 0:
# if combobox has a 'custom' signal choice then the index must be 0
if i == end and not item[0] =="_sserialsignaltree":parentnum = 0
else:parentnum = count
#print "length of human names:",len(parent[1])
# this adds the index number (parentnum) of the signal
try:
signame=_PD[item[3]][count]
except:
signame = 'none'
# add parent and get reference for child
piter = self.d[item[0]].append(None, [parent[0], parentnum,signame,item[3],count])
#print parent,parentnum,count,signame,item[3],i,signame,count
if count == 0: count = 1
else: count +=item[2]
##########################
# if there are children:
##########################
else:
#print "parsing child",signame
# add parent title
piter = self.d[item[0]].append(None, [parent[0],0,signame,item[3],count])
for j,child in enumerate(parent[1]):
#print len(child[1]), child[0]
#if item[0] =='_gpioisignaltree':
#print item[0], child[0],len(child[1])
#############################
# If grandchildren
#############################
if len(child[1]) > 1:
# add child and get reference
citer = self.d[item[0]].append(piter, [child[0], 0,signame,item[3],count])
#if item[0] =='_gpioisignaltree':
#print 'add to CHILD list',child[0]
#print 'Strig:',child[1]
for k,grandchild in enumerate(child[1]):
#print 'raw grand: ', grandchild
#############################
# If greatchildren
#############################
#print grandchild[0],grandchild[1]
if len(grandchild) > 1:
#print 'add to grandchild child list',grandchild[0]
index = _PD[item[3]].index(grandchild[1])
self.d[item[0]].append(citer, [grandchild[0],index,grandchild[1],item[3],index])
continue
else:
#############################
# If No greatchildren
#############################
try:
signame=_PD[item[3]][count]
except:
signame = 'none'
#print 'adding to grandchild to childlist: ', grandchild,signame,item[3],count
# add grandchild
self.d[item[0]].append(piter, [child,0,signame,item[3],count])
#count +=item[2]
####################
# No grandchildren
####################
else:
#print' add to child - no grandchild',child
signame=_PD[item[3]][count]
#print i,count,parent[0],child,signame,item[3], _PD[item[3]].index(signame),count
self.d[item[0]].append(piter, [child, count,signame,item[3],count])
count +=item[2]
self.fill_combobox_models2()
self.d._notusedsignaltree = gtk.TreeStore(str,int,str,str,int)
self.d._notusedsignaltree.append(None, [_PD.human_notused_names[0][0],0,'unused-unused','_notusedsignaltree',0])
# make a filter for sserial encoder as they can't be used for AXES
self.d._encodersignalfilter = self.d._encodersignaltree.filter_new()
self.d._enc_filter_list = ['Axis Encoder']
self.d._encodersignalfilter.set_visible_func(self.visible_cb, self.d._enc_filter_list)
# build filters for the 'controlling' sserial combbox
# We need to limit selections often
for channel in range(0,_PD._NUM_CHANNELS):
self.d['_sserial%d_filter_list'%channel] =[]
self.d['_sserial%d_signalfilter'%channel] = self.d._sserialsignaltree.filter_new()
self.d['_sserial%d_signalfilter'%channel].set_visible_func(self.filter_cb,self.d['_sserial%d_filter_list'%channel])
self.set_filter('_sserial%d'%channel,'ALL')
# Filter out any matching names in a list
def visible_cb(self, model, iter, data ):
#print model.get_value(iter, 0) ,data
return not model.get_value(iter, 0) in data
# filter out anything not in one of the lists, the list depending on a keyword
def set_filter(self,sserial,data):
keyword = data.upper()
if keyword == '7I77':
f_list = ['Unused','7i77']
elif keyword == '7I76':
f_list = ['Unused','7i76']
else:
f_list = ['Unused','7i73','7i69','8i20','7i64','7i71','7i70','7i84']
del self.d['%s_filter_list'%sserial][:]
for i in(f_list):
self.d['%s_filter_list'%sserial].append(i)
#print '\n',filterlist,self.d[filterlist]
self.d['%s_signalfilter'%sserial].refilter()
# Filter callback
def filter_cb(self, model, iter, data ):
#print model.get_value(iter, 0) ,data
for i in data:
if i in model.get_value(iter, 0):
return True
return False
def load_config(self):
filter = gtk.FileFilter()
filter.add_pattern("*.pncconf")
filter.set_name(_("LinuxCNC 'PNCconf' configuration files"))
dialog = gtk.FileChooserDialog(_("Modify Existing Configuration"),
self.widgets.window1, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.add_filter(filter)
if not self.d._lastconfigname == "" and self.d._chooselastconfig:
dialog.set_filename(os.path.expanduser("~/linuxcnc/configs/%s.pncconf"% self.d._lastconfigname))
dialog.add_shortcut_folder(os.path.expanduser("~/linuxcnc/configs"))
dialog.set_current_folder(os.path.expanduser("~/linuxcnc/configs"))
dialog.show_all()
result = dialog.run()
if result == gtk.RESPONSE_OK:
filename = dialog.get_filename()
dialog.destroy()
self.d.load(filename, self)
self.d._mesa0_configured = False
self.d._mesa1_configured = False
try:
# check that the firmware is current enough by checking the length of a sub element and that the other is an integer.
for boardnum in(0,1):
i=j=None
i = len(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._NUMOFCNCTRS])
j = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._HIFREQ]+100 # throws an error if not an integer.
if not i > 1:
print i,j,boardnum
raise UserWarning
except :
print i,j,boardnum
self.warning_dialog(_("It seems data in this file is from too old of a version of PNCConf to continue.\n."),True)
return True
else:
dialog.destroy()
return True
def mesa_firmware_search(self,boardtitle,*args):
#TODO if no firm packages set up for internal data?
#TODO don't do this if the firmware is already loaded
self.pbar.set_text("Loading external firmware")
self.pbar.set_fraction(0)
self.window.show()
while gtk.events_pending():
gtk.main_iteration()
firmlist = []
for root, dirs, files in os.walk(self._p.FIRMDIR):
folder = root.lstrip(self._p.FIRMDIR)
#dbg('Firmware folder:%s'% folder)
if folder in self._p.MESABLACKLIST:continue
if not folder == boardtitle:continue
for n,name in enumerate(files):
if name in self._p.MESABLACKLIST:continue
if ".xml" in name:
dbg('%s'% name)
temp = name.rstrip(".xml")
firmlist.append(temp)
dbg("\nXML list:%s"%firmlist,mtype="firmname")
for n,currentfirm in enumerate(firmlist):
self.pbar.set_fraction(n*1.0/len(firmlist))
while gtk.events_pending():
gtk.main_iteration()
# XMLs don't tell us the driver type so set to None (parse will guess)
firmdata = self.parse_xml(None,boardtitle, currentfirm,os.path.join(
self._p.FIRMDIR,boardtitle,currentfirm+".xml"))
self._p.MESA_FIRMWAREDATA.append(firmdata)
self.window.hide()
def parse_xml(self, driver, boardtitle, firmname, xml_path):
def search(elementlist):
for i in elementlist:
temp = root.find(i)
if temp is not None:
return temp.text
return temp
root = xml.etree.ElementTree.parse(xml_path)
watchdog = encoder = resolver = pwmgen = led = muxedqcount = 0
stepgen = tppwmgen = sserialports = sserialchannels = 0
numencoderpins = numpwmpins = 3; numstepperpins = 2; numttpwmpins = 0; numresolverpins = 10
text = search(('boardname','BOARDNAME'))
if text == None:
print 'Missing info: boardname'
return
boardname = text.lower()
#dbg("\nBoard and firmwarename: %s %s\n"%( boardname, firmname), "firmraw")
text = search(("IOPORTS","ioports")) ; #print numcnctrs
if text == None:
print 'Missing info: ioports'
return
numcnctrs = int(text)
text = search(("PORTWIDTH","portwidth"))
if text == None:
print 'Missing info: portwidth'
return
portwidth = int(text)
maxgpio = numcnctrs * portwidth ; #print maxgpio
placeholders = 24-portwidth
text = search(("CLOCKLOW","clocklow")) ; #print lowfreq
if text == None:
print 'Missing info: clocklow'
return
lowfreq = int(text)/1000000
text = search(("CLOCKHIGH","clockhigh")); #print hifreq
if text == None:
print 'Missing info: clockhigh'
return
hifreq = int(text)/1000000
modules = root.findall(".//modules")[0]
if driver == None:
meta = self.get_board_meta(boardname)
driver = meta.get('DRIVER')
for i,j in enumerate(modules):
k = modules[i].find("tagname").text
print k
if k in ("Watchdog","WatchDog","WATCHDOG"):
l = modules[i].find("numinstances").text;#print l,k
watchdog = int(l)
elif k in ("Encoder","QCOUNT"):
l = modules[i].find("numinstances").text;#print l,k
encoder = int(l)
elif k in ("ResolverMod","RESOLVERMOD"):
l = modules[i].find("numinstances").text;#print l,k
resolver = int(l)
elif k in ("PWMGen","PWMGEN","PWM"):
l = modules[i].find("numinstances").text;#print l,k
pwmgen = int(l)
elif k == "LED":
l = modules[i].find("numinstances").text;#print l,k
led = int(l)
elif k in ("MuxedQCount","MUXEDQCOUNT"):
l = modules[i].find("numinstances").text;#print l,k
muxedqcount = int(l)
elif k in ("StepGen","STEPGEN"):
l = modules[i].find("numinstances").text;#print l,k
stepgen = int(l)
elif k in ("TPPWM","TPPWM"):
l = modules[i].find("numinstances").text;#print l,k
tppwmgen = int(l)
elif k in ("SSerial","SSERIAL"):
l = modules[i].find("numinstances").text;#print l,k
sserialports = int(l)
elif k in ("None","NONE"):
l = modules[i].find("numinstances").text;#print l,k
elif k in ("ssr","SSR"):
l = modules[i].find("numinstances").text;#print l,k
elif k in ("IOPort","AddrX","MuxedQCountSel"):
continue
else:
print "**** WARNING: Pncconf parsing firmware: tagname (%s) not reconized"% k
discov_sserial = []
ssname = root.findall("SSERIALDEVICES/SSERIALFUNCTION")
for i in (ssname):
port = i.find("PORT").text
dev = i.find("DEVICE").text
chan = i.find("CHANNEL").text
discov_sserial.append((int(port),int(chan),dev))
print 'discovered sserial:', discov_sserial
pins = root.findall(".//pins")[0]
temppinlist = []
tempconlist = []
pinconvertenc = {"PHASE A":_PD.ENCA,"PHASE B":_PD.ENCB,"INDEX":_PD.ENCI,"INDEXMASK":_PD.ENCM,
"QUAD-A":_PD.ENCA,"QUAD-B":_PD.ENCB,"QUAD-IDX":_PD.ENCI,
"MUXED PHASE A":_PD.MXE0,"MUXED PHASE B":_PD.MXE1,"MUXED INDEX":_PD.MXEI,
"MUXED INDEX MASK":_PD.MXEM,"MUXED ENCODER SELECT 0":_PD.MXES,"MUXED ENCODER SELEC":_PD.MXES,
"MUXQ-A":_PD.MXE0,"MUXQ-B":_PD.MXE1,"MUXQ-IDX":_PD.MXEI,"MUXSEL0":_PD.MXES}
pinconvertresolver = {"RESOLVER POWER ENABLE":_PD.RESU,"RESOLVER SPIDI 0":_PD.RES0,
"RESOLVER SPIDI 1":_PD.RES1,"RESOLVER ADC CHANNEL 2":_PD.RES2,"RESOLVER ADC CHANNEL 1":_PD.RES3,
"RESOLVER ADC CHANNEL 0":_PD.RES4,"RESOLVER SPI CLK":_PD.RES5,"RESOLVER SPI CHIP SELECT":_PD.RESU,
"RESOLVER PDMM":_PD.RESU,"RESOLVER PDMP":_PD.RESU}
pinconvertstep = {"STEP":_PD.STEPA,"DIR":_PD.STEPB,"STEP/TABLE1":_PD.STEPA,"DIR/TABLE2":_PD.STEPB}
#"StepTable 2":STEPC,"StepTable 3":STEPD,"StepTable 4":STEPE,"StepTable 5":STEPF
pinconvertppwm = {"PWM/UP":_PD.PWMP,"DIR/DOWN":_PD.PWMD,"ENABLE":_PD.PWME,
"PWM":_PD.PWMP,"DIR":_PD.PWMD,"/ENABLE":_PD.PWME}
pinconverttppwm = {"PWM A":_PD.TPPWMA,"PWM B":_PD.TPPWMB,"PWM C":_PD.TPPWMC,
"PWM /A":_PD.TPPWMAN,"PWM /B":_PD.TPPWMBN,"PWM /C":_PD.TPPWMCN,
"FAULT":_PD.TPPWMF,"ENABLE":_PD.TPPWME}
pinconvertsserial = {"RXDATA0":_PD.RXDATA0,"TXDATA0":_PD.TXDATA0,"TXE0":_PD.TXEN0,"TXEN0":_PD.TXEN0,
"RXDATA1":_PD.RXDATA0,"TXDATA1":_PD.TXDATA0,"TXE1":_PD.TXEN0,"TXEN1":_PD.TXEN0,
"RXDATA2":_PD.RXDATA1,"TXDATA2":_PD.TXDATA1,"TXE2":_PD.TXEN1,"TXEN2":_PD.TXEN1,
"RXDATA3":_PD.RXDATA2,"TXDATA3":_PD.TXDATA2,"TXE3":_PD.TXEN2,"TXEN3":_PD.TXEN2,
"RXDATA4":_PD.RXDATA3,"TXDATA4":_PD.TXDATA3,"TXE4":_PD.TXEN3,"TXEN4":_PD.TXEN3,
"RXDATA5":_PD.RXDATA4,"TXDATA5":_PD.TXDATA4,"TXE5":_PD.TXEN4,"TXEN4":_PD.TXEN4,
"RXDATA6":_PD.RXDATA5,"TXDATA6":_PD.TXDATA5,"TXE6":_PD.TXEN5,"TXEN6":_PD.TXEN5,
"RXDATA7":_PD.RXDATA6,"TXDATA7":_PD.TXDATA6,"TXE7":_PD.TXEN6,"TXEN7":_PD.TXEN6,
"RXDATA8":_PD.RXDATA7,"TXDATA8":_PD.TXDATA7,"TXE8":_PD.TXEN7,"TXEN8":_PD.TXEN7}
pinconvertnone = {"NOT USED":_PD.GPIOI}
count = 0
fakecon = 0
for i,j in enumerate(pins):
instance_num = 9999
iocode = None
temppinunit = []
temp = pins[i].find("connector").text
if 'P' in temp:
tempcon = int(temp.strip("P"))
else:
tempcon = temp
tempfunc = pins[i].find("secondaryfunctionname").text
tempfunc = tempfunc.upper().strip() # normalise capitalization: Peters XMLs are different from linuxcncs
if "(IN)" in tempfunc:
tempfunc = tempfunc.rstrip(" (IN)")
elif "(OUT" in tempfunc:
tempfunc = tempfunc.rstrip(" (OUT)")
convertedname = "Not Converted"
# this converts the XML file componennt names to pncconf's names
try:
secmodname = pins[i].find("secondarymodulename")
modulename = secmodname.text.upper().strip()
dbg("secondary modulename: %s, %s."%( tempfunc,modulename), "firmraw")
if modulename in ("ENCODER","QCOUNT","MUXEDQCOUNT","MUXEDQCOUNTSEL"):
convertedname = pinconvertenc[tempfunc]
elif modulename in ("ResolverMod","RESOLVERMOD"):
convertedname = pinconvertresolver[tempfunc]
elif modulename in ("PWMGen","PWMGEN","PWM"):
convertedname = pinconvertppwm[tempfunc]
elif modulename in ("StepGen","STEPGEN"):
convertedname = pinconvertstep[tempfunc]
elif modulename in ("TPPWM","TPPWM"):
convertedname = pinconverttppwm[tempfunc]
elif modulename in ("SSerial","SSERIAL"):
temp = pins[i].find("foundsserialdevice")
if temp is not None:
founddevice = temp.text.upper()
else:
founddevice = None
#print tempfunc,founddevice
# this auto selects the sserial 7i76 mode 0 card for sserial 0 and 2
# as the 5i25/7i76 uses some of the sserial channels for it's pins.
if boardname in ("5i25","7i92"):
if "7i77_7i76" in firmname:
if tempfunc == "TXDATA1": convertedname = _PD.SS7I77M0
elif tempfunc == "TXDATA2": convertedname = _PD.SS7I77M1
elif tempfunc == "TXDATA4": convertedname = _PD.SS7I76M3
else: convertedname = pinconvertsserial[tempfunc]
#print "XML ",firmname, tempfunc,convertedname
elif "7i76x2" in firmname or "7i76x1" in firmname:
if tempfunc == "TXDATA1": convertedname = _PD.SS7I76M0
elif tempfunc == "TXDATA3": convertedname = _PD.SS7I76M2
else: convertedname = pinconvertsserial[tempfunc]
#print "XML ",firmname, tempfunc,convertedname
elif "7i77x2" in firmname or "7i77x1" in firmname:
if tempfunc == "TXDATA1": convertedname = _PD.SS7I77M0
elif tempfunc == "TXDATA2": convertedname = _PD.SS7I77M1
elif tempfunc == "TXDATA4": convertedname = _PD.SS7I77M3
elif tempfunc == "TXDATA5": convertedname = _PD.SS7I77M4
else: convertedname = pinconvertsserial[tempfunc]
#print "XML ",firmname, tempfunc,convertedname
elif founddevice == "7I77-0": convertedname = _PD.SS7I77M0
elif founddevice == "7I77-1": convertedname = _PD.SS7I77M1
elif founddevice == "7I77-3": convertedname = _PD.SS7I77M3
elif founddevice == "7I77-4": convertedname = _PD.SS7I77M4
elif founddevice == "7I76-0": convertedname = _PD.SS7I76M0
elif founddevice == "7I76-2": convertedname = _PD.SS7I76M2
elif founddevice == "7I76-3": convertedname = _PD.SS7I76M3
else: convertedname = pinconvertsserial[tempfunc]
else:
convertedname = pinconvertsserial[tempfunc]
elif modulename in ('SSR','SSR'):
if tempfunc == 'AC':
convertedname = _PD.NUSED
elif 'OUT-' in tempfunc:
convertedname = _PD.SSR0
# ssr outputs encode the HAL number in the XML name
# add it to 100 so it's not change from output
iocode = 100 + int(tempfunc[4:])
elif modulename in ("None","NONE"):
iocode = 0
#convertedname = pinconvertnone[tempfunc]
else:
print 'unknon module - setting to unusable',modulename, tempfunc
convertedname = _PD.NUSED
except:
iocode = 0
exc_type, exc_value, exc_traceback = sys.exc_info()
formatted_lines = traceback.format_exc().splitlines()
print
print "****pncconf verbose XML parse debugging:",formatted_lines[0]
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print formatted_lines[-1]
if iocode == 0:
# must be GPIO pins if there is no secondary mudule name
# or if pinconvert fails eg. StepTable instance default to GPIO
temppinunit.append(_PD.GPIOI)
temppinunit.append(0) # 0 signals to pncconf that GPIO can changed to be input or output
elif iocode >= 100:
temppinunit.append(_PD.SSR0)
temppinunit.append(iocode)
else:
instance_num = int(pins[i].find("secondaryinstance").text)
# this is a workaround for the 7i77_7i776 firmware. it uses a mux encoder for the 7i76 but only uses half of it
# this is because of a limitation of hostmot2 - it can't have mux encoders and regular encoders
# so in pncconf we look for this and change it to a regular encoder.
if boardname == "5i25" and firmname == "7i77_7i76":
if modulename in ("MuxedQCount","MUXEDQCOUNT") and instance_num == 3:
instance_num = 6
encoder =-1
if convertedname == _PD.MXE0: convertedname = _PD.ENCA
elif convertedname == _PD.MXE1: convertedname = _PD.ENCB
elif convertedname == _PD.MXEI: convertedname = _PD.ENCI
temppinunit.append(convertedname)
if tempfunc in("MUXED ENCODER SELECT 0","MUXEDQCOUNTSEL") and instance_num == 6:
instance_num = 3
temppinunit.append(instance_num)
tempmod = pins[i].find("secondarymodulename").text
tempfunc = tempfunc.upper()# normalize capitalization
#dbg("secondary modulename, function: %s, %s."%( tempmod,tempfunc), "firmraw")
if tempmod in("Encoder","MuxedQCount") and tempfunc in ("MUXED INDEX MASK (IN)","INDEXMASK (IN)"):
numencoderpins = 4
if tempmod in("SSerial","SSERIAL") and tempfunc in ("TXDATA1","TXDATA2","TXDATA3",
"TXDATA4","TXDATA5","TXDATA6","TXDATA7","TXDATA8"):
sserialchannels +=1
#dbg("temp: %s, converted name: %s. num %d"%( tempfunc,convertedname,instance_num), "firmraw")
if not tempcon in tempconlist:
tempconlist.append(tempcon)
temppinlist.append(temppinunit)
# add NONE place holders for boards with less then 24 pins per connector.
if not placeholders == 0:
#print i,portwidth*numcnctrs
if i == (portwidth + count-1) or i == portwidth*numcnctrs-1:
#print "loop %d %d"% (i,portwidth + count-1)
count =+ portwidth
#print "count %d" % count
for k in range(0,placeholders):
#print "%d fill here with %d parts"% (k,placeholders)
temppinlist.append((_PD.NUSED,0))
if not sserialchannels == 0:
sserialchannels +=1
# 7i96 doesn't number the connectors with P numbers so we fake it
# TODO
# probably should move the connector numbers to board data rather then firmware
for j in tempconlist:
if not isinstance(j, (int, long)):
tempconlist = [i for i in range(1,len(tempconlist)+1)]
break
temp = [boardtitle,boardname,firmname,boardtitle,driver,encoder + muxedqcount,
numencoderpins,resolver,numresolverpins,pwmgen,numpwmpins,
tppwmgen,numttpwmpins,stepgen,numstepperpins,
sserialports,sserialchannels,discov_sserial,0,0,0,0,0,0,0,watchdog,maxgpio,
lowfreq,hifreq,tempconlist]
for i in temppinlist:
temp.append(i)
if "5i25" in boardname :
dbg("5i25 firmware:\n%s\n"%( temp), mtype="5i25")
print 'firm added:\n',temp
return temp
def discover_mesacards(self):
name, interface, address = self.get_discovery_meta()
if name is None: return
if not name:
name = '5i25'
if self.debugstate:
print 'try to discover board by reading help text input:',name
buf = self.widgets.textinput.get_buffer()
info = buf.get_text(buf.get_start_iter(),
buf.get_end_iter(),
True)
else:
info = self.call_mesaflash(name,interface,address)
print 'INFO:',info,'<-'
if info is None: return None
lines = info.splitlines()
try:
if 'ERROR' in lines[0]:
raise ValueError('Mesaflash Error')
except ValueError as err:
text = err.args
self.warning_dialog(text[0],True)
return
except:
self.warning_dialog('Unspecified Error with Mesaflash',True)
return
if 'No' in lines[0] and 'board found' in lines[0] :
text = _("No board was found\n")
self.warning_dialog(text,True)
print 'OOPS no board found!'
return None
return info
def call_mesaflash(self, devicename, interface, address):
if address == ' ':
address = None
textbuffer = self.widgets.textoutput.get_buffer()
print 'DEVICE NAME SPECIFIED',devicename, interface, address
# 7i43 needs it's firmware loaded before it can be 'discovered'
if '7i43' in devicename.lower():
halrun = os.popen("halrun -Is > /dev/null", "w")
halrun.write("echo\n")
load,read,write = self.hostmot2_command_string()
# do I/O load commands
for i in load:
halrun.write('%s\n'%i)
halrun.flush()
time.sleep(.001)
halrun.close()
if interface == '--addr' and address:
board_command = '--device %s %s %s' %(devicename, interface, address)
elif interface == '--epp':
board_command = '--device %s %s' %(devicename, interface)
else:
board_command = '--device %s' %(devicename)
#cmd ="""pkexec "sh -c 'mesaflash %s';'mesaflash %s --sserial';'mesaflash %s --readhmid' " """%(board_command, board_command, board_command)
cmd =""" mesaflash -%s;mesaflash %s --sserial;mesaflash %s --readhmid """%(board_command, board_command, board_command)
discover = subprocess.Popen([cmd], shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE )
output, error = discover.communicate()
if output == '':
text = _("Discovery is got an error\n\n Is mesaflash installed?\n\n %s"%error)
self.warning_dialog(text,True)
try :
textbuffer.set_text('Command:\n%s\n gave:\n%s'%(cmd,error))
self.widgets.helpnotebook.set_current_page(2)
except Exception as e :
print e
return None
try :
textbuffer.set_text(output)
self.widgets.helpnotebook.set_current_page(2)
self.widgets.help_window.show_all()
except:
text = _("Discovery is unavailable\n")
self.warning_dialog(text,True)
print 'cmd=',cmd
return output
def parse_discovery(self,info,boardnum=0):
DRIVER = BOARDNAME = ''
WATCHDOG = NUMCONS = NUMCONPINS = ENCODERS = MUXENCODERS = 0
RESOLVERS = NUMSSCHANNELS = SSERIALPORTS = 0
PWMGENS = LEDS = STEPGENS = TPPWMGEN = 0
NUMENCODERPINS = NUMPWMPINS = 3; NUMSTEPPERPINS = 2
NUMTPPWMPINS = 0;NUMRESOLVERPINS = 10
DOC = xml.dom.minidom.getDOMImplementation().createDocument(
None, 'hostmot2', None)
ELEMENT = DOC.documentElement
def add_element(ELEMENT,name):
n1 = DOC.createElement(name)
ELEMENT.appendChild(n1)
return n1
def add_text(root,title,value):
n = DOC.createElement(title)
root.appendChild(n)
nodeText = DOC.createTextNode( value )
n.appendChild(nodeText)
return n
info = info.upper()
lines = info.splitlines()
sserial=[]
ssflag = pinsflag = True
dev7i77flag = dev7i76flag = False
for l_num,i in enumerate(lines):
i = i.lstrip()
temp2 = i.split(" ")
#print i,temp2
if 'ETH' in i:
DRIVER = 'hm2_eth'
if 'PCI' in i:
DRIVER = 'hm2_pci'
if 'BOARDNAME' in i:
BOARDNAME = temp2[2].strip('MESA').lower()
add_text(ELEMENT,'BOARDNAME',BOARDNAME)
if 'DEVICE AT' in i:
if ssflag:
n1 = add_element(ELEMENT,'SSERIALDEVICES')
ssflag = False
for num,i in enumerate(temp2):
if i =="CHANNEL":
sserial.append((temp2[num+1].strip(':'),temp2[num+2]))
n2 = add_element(n1,'SSERIALFUNCTION')
add_text(n2,'PORT','0')
add_text(n2,'CHANNEL',temp2[num+1].strip(':'))
add_text(n2,'DEVICE',temp2[num+2])
if '7I77' in(temp2[num+2]):
dev7i77flag = True
elif '7I76' in(temp2[num+2]):
dev7i76flag = True
if 'SSLBP CHANNELS:' in i:
NUMSSCHANNELS = temp2[2]
if 'CLOCK LOW FREQUENCY: ' in i:
add_text(ELEMENT,'CLOCKLOW',str(int(float(temp2[3])*1000000)))
if 'CLOCK HIGH FREQUENCY:' in i:
add_text(ELEMENT,'CLOCKHIGH',str(int(float(temp2[3])*1000000)))
if 'NUMBER OF IO PORTS:' in i:
NUMCONS = temp2[4]
add_text(ELEMENT,'IOPORTS',NUMCONS)
if 'WIDTH OF ONE I/O PORT:' in i:
NUMCONPINS = temp2[5]
add_text(ELEMENT,'PORTWIDTH',NUMCONPINS)
if 'MODULES IN CONFIGURATION:' in i:
mod_ele = add_element(ELEMENT,'modules')
modflag = True
if 'MODULE: WATCHDOG' in i:
tline = lines[l_num+1].split(" ")
new = add_element(mod_ele,'module')
add_text(new,'tagname','WATCHDOG')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: QCOUNT' in i:
tline = lines[l_num+1].split(" ")
ENCODERS = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','QCOUNT')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: MUXEDQCOUNTSEL' in i:
continue
if 'MODULE: MUXEDQCOUNT' in i:
tline = lines[l_num+1].split(" ")
MUXENCODERS = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','MUXEDQCOUNT')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: SSERIAL' in i:
tline = lines[l_num+1].split(" ")
SSERIALPORTS = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','SSERIAL')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: RESOLVERMOD' in i:
tline = lines[l_num+1].split(" ")
RESOLVER = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','RESOLVERMOD')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: PWM' in i:
tline = lines[l_num+1].split(" ")
PWMGENS = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','PWMGEN')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: TPPWM' in i:
tline = lines[l_num+1].split(" ")
TPPWMGENS = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','TPPWMGEN')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: STEPGEN' in i:
tline = lines[l_num+1].split(" ")
STEPGENS = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','STEPGEN')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: LED' in i:
tline = lines[l_num+1].split(" ")
LEDS = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','LED')
add_text(new,'numinstances',tline[4].lstrip())
if 'MODULE: SSR' in i:
tline = lines[l_num+1].split(" ")
LEDS = tline[4].lstrip()
new = add_element(mod_ele,'module')
add_text(new,'tagname','SSR')
add_text(new,'numinstances',tline[4].lstrip())
if 'IO CONNECTIONS FOR' in i:
if pinsflag:
n1 = add_element(ELEMENT,'pins')
pinsflag = False
CON = temp2[3]
print CON
for num in range(l_num+3,l_num+3+int(NUMCONPINS)):
CHAN = PINFNCTN = ''
pin_line = ' '.join(lines[num].split()).split()
PINNO = pin_line[0]
IO = pin_line[1]
SECFNCTN = pin_line[3]
n2 = add_element(n1,'pin')
add_text(n2,'index',IO)
add_text(n2,'connector',CON)
add_text(n2,'pinno',PINNO)
add_text(n2,'secondarymodulename',SECFNCTN)
if not SECFNCTN == 'NONE':
CHAN = pin_line[4]
PINFNCTN = pin_line[5]
if PINFNCTN in("TXDATA1","TXDATA2","TXDATA3",
"TXDATA4","TXDATA5","TXDATA6","TXDATA7","TXDATA8"):
num = int(PINFNCTN[6])-1
print num
for idnum,dev in sserial:
print idnum,dev,num
if int(idnum) == num:
NEW_FNCTN = '%s-%d'% (dev,num)
add_text(n2,'foundsserialdevice',NEW_FNCTN)
add_text(n2,'secondaryfunctionname',PINFNCTN)
add_text(n2,'secondaryinstance',CHAN)
else:
add_text(n2,'secondaryfunctionname','NOT USED')
print ' I/O ',IO, ' function ',SECFNCTN,' CHANNEL:',CHAN,'PINFUNCTION:',PINFNCTN
print 'Sserial CARDS FOUND:',sserial
print NUMCONS,NUMCONPINS,ENCODERS,MUXENCODERS,SSERIALPORTS,NUMSSCHANNELS
print RESOLVERS,PWMGENS,LEDS
firmname = "~/mesa%d_discovered.xml"%boardnum
filename = os.path.expanduser(firmname)
DOC.writexml(open(filename, "wb"), addindent=" ", newl="\n")
return DRIVER, BOARDNAME, firmname, filename
# update all the firmware/boardname arrays and comboboxes
def discovery_selection_update(self, info, bdnum):
driver, boardname, firmname, path = self.parse_discovery(info,boardnum=bdnum)
boardname = 'Discovered:%s'% boardname
firmdata = self.parse_xml( driver,boardname,firmname,path)
self._p.MESA_FIRMWAREDATA.append(firmdata)
self._p.MESA_INTERNAL_FIRMWAREDATA.append(firmdata)
self._p.MESA_BOARDNAMES.append(boardname)
# add firmname to combo box if it's not there
model = self.widgets["mesa%s_firmware"%bdnum].get_model()
flag = True
for search,item in enumerate(model):
if model[search][0] == firmname:
flag = False
break
if flag:
model.append((firmname,))
search = 0
model = self.widgets["mesa%s_firmware"%bdnum].get_model()
for search,item in enumerate(model):
if model[search][0] == firmname:
self.widgets["mesa%s_firmware"%bdnum].set_active(search)
break
# add boardtitle
model = self.widgets["mesa%s_boardtitle"%bdnum].get_model()
flag2 = True
for search,item in enumerate(model):
if model[search][0] == boardname:
flag2 = False
break
if flag2:
model.append((boardname,))
search = 0
model = self.widgets["mesa%s_boardtitle"%bdnum].get_model()
for search,item in enumerate(model):
#print model[search][0], boardname
if model[search][0] == boardname:
self.widgets["mesa%s_boardtitle"%bdnum].set_active(search)
break
# update if there was a change
if flag or flag2:
self.on_mesa_component_value_changed(None,0)
def add_device_rule(self):
text = []
sourcefile = "/tmp/"
if os.path.exists("/etc/udev/rules.d/50-LINUXCNC-general.rules"):
text.append( "General rule already exists\n")
else:
text.append("adding a general rule first\nso your device will be found\n")
filename = os.path.join(sourcefile, "LINUXCNCtempGeneral.rules")
file = open(filename, "w")
print >>file, ("# This is a rule for LinuxCNC's hal_input\n")
print >>file, ("""SUBSYSTEM="input", MODE="0660", GROUP="plugdev" """)
file.close()
p=os.popen("gksudo cp %sLINUXCNCtempGeneral.rules /etc/udev/rules.d/50-LINUXCNC-general.rules"% sourcefile )
time.sleep(.1)
p.flush()
p.close()
os.remove('%sLINUXCNCtempGeneral.rules'% sourcefile)
text.append(("disconect USB device please\n"))
if not self.warning_dialog("\n".join(text),False):return
os.popen('less /proc/bus/input/devices >> %sLINUXCNCnojoytemp.txt'% sourcefile)
text = ["Plug in USB device please"]
if not self.warning_dialog("\n".join(text),False):return
time.sleep(1)
os.popen('less /proc/bus/input/devices >> %sLINUXCNCjoytemp.txt'% sourcefile).read()
diff = os.popen (" less /proc/bus/input/devices | diff %sLINUXCNCnojoytemp.txt %sLINUXCNCjoytemp.txt "%(sourcefile, sourcefile) ).read()
self.widgets.help_window.set_title(_("USB device Info Search"))
os.remove('%sLINUXCNCnojoytemp.txt'% sourcefile)
os.remove('%sLINUXCNCjoytemp.txt'% sourcefile)
if diff =="":
text = ["No new USB device found"]
if not self.warning_dialog("\n".join(text),True):return
else:
textbuffer = self.widgets.textoutput.get_buffer()
try :
textbuffer.set_text(diff)
self.widgets.helpnotebook.set_current_page(2)
self.widgets.help_window.show_all()
except:
text = _("USB device page is unavailable\n")
self.warning_dialog(text,True)
linelist = diff.split("\n")
for i in linelist:
if "Name" in i:
temp = i.split("\"")
name = temp[1]
temp = name.split(" ")
self.widgets.usbdevicename.set_text(temp[0])
infolist = diff.split()
for i in infolist:
if "Vendor" in i:
temp = i.split("=")
vendor = temp[1]
if "Product" in i:
temp = i.split("=")
product = temp[1]
text =[ "Vendor = %s\n product = %s\n name = %s\nadding specific rule"%(vendor,product,name)]
if not self.warning_dialog("\n".join(text),False):return
tempname = sourcefile+"LINUXCNCtempspecific.rules"
file = open(tempname, "w")
print >>file, ("# This is a rule for LINUXCNC's hal_input\n")
print >>file, ("# For devicename=%s\n"% name)
print >>file, ("""SYSFS{idProduct}=="%s", SYSFS{idVendor}=="%s", MODE="0660", GROUP="plugdev" """%(product,vendor))
file.close()
# remove illegal filename characters
for i in ("(",")"):
temp = name.replace(i,"")
name = temp
newname = "50-LINUXCNC-%s.rules"% name.replace(" ","_")
os.popen("gksudo cp %s /etc/udev/rules.d/%s"% (tempname,newname) )
time.sleep(1)
os.remove('%sLINUXCNCtempspecific.rules'% sourcefile)
text = ["Please unplug and plug in your device again"]
if not self.warning_dialog("\n".join(text),True):return
def test_joystick(self):
halrun = subprocess.Popen("halrun -I ", shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE )
#print "requested devicename = ",self.widgets.usbdevicename.get_text()
halrun.stdin.write("loadusr hal_input -W -KRAL +%s\n"% self.widgets.usbdevicename.get_text())
halrun.stdin.write("loadusr halmeter -g 0 500\n")
time.sleep(1.5)
halrun.stdin.write("show pin\n")
self.warning_dialog("Close me When done.\n",True)
halrun.stdin.write("exit\n")
output = halrun.communicate()[0]
temp2 = output.split(" ")
temp=[]
for i in temp2:
if i =="": continue
temp.append(i)
buttonlist=""
for index,i in enumerate(temp):
if "bit" in i and "OUT" in temp[index+1]:
buttonlist = buttonlist + " Digital: %s"% ( temp[index+3] )
if "float" in i and "OUT" in temp[index+1]:
buttonlist = buttonlist + " Analog: %s"% ( temp[index+3] )
if buttonlist =="": return
textbuffer = self.widgets.textoutput.get_buffer()
try :
textbuffer.set_text(buttonlist)
self.widgets.helpnotebook.set_current_page(2)
self.widgets.help_window.show_all()
except:
text = _("Pin names are unavailable\n")
self.warning_dialog(text,True)
def search_for_device_rule(self):
flag = False
textbuffer = self.widgets.textoutput.get_buffer()
textbuffer.set_text("Searching for device rules in folder: /etc/udev/rules.d\n\n")
for entry in os.listdir("/etc/udev/rules.d"):
if fnmatch.fnmatch( entry,"50-LINUXCNC-*"):
temp = open("/etc/udev/rules.d/" + entry, "r").read()
templist = temp.split("\n")
for i in templist:
if "devicename=" in i:
flag = True
temp = i.split("=")
name = temp[1]
try:
textbuffer.insert_at_cursor( "File name: %s\n"% entry)
textbuffer.insert_at_cursor( "Device name: %s\n\n"% name)
self.widgets.helpnotebook.set_current_page(2)
self.widgets.help_window.show_all()
except:
self.show_try_errors()
text = _("Device names are unavailable\n")
self.warning_dialog(text,True)
if flag == False:
text = _("No Pncconf made device rules were found\n")
textbuffer.insert_at_cursor(text)
self.warning_dialog(text,True)
def read_touchy_preferences(self):
# This reads the Touchy preference file directly
tempdict = {"touchyabscolor":"abs_textcolor","touchyrelcolor":"rel_textcolor",
"touchydtgcolor":"dtg_textcolor","touchyerrcolor":"err_textcolor"}
for key,value in tempdict.iteritems():
data = prefs.getpref(value, 'default', str)
if data == "default":
self.widgets[key].set_active(False)
else:
self.widgets[key].set_active(True)
self.widgets[key+"button"].set_color(gtk.gdk.color_parse(data))
self.widgets.touchyforcemax.set_active(bool(prefs.getpref('window_force_max')))
def get_installed_themes(self):
data1 = self.d.gladevcptheme
data2 = prefs.getpref('gtk_theme', 'Follow System Theme', str)
data3 = self.d.gmcpytheme
model = self.widgets.themestore
model.clear()
model.append((_("Follow System Theme"),))
model2 = self.widgets.glade_themestore
model2.clear()
model2.append((_("Follow System Theme"),))
temp1 = temp2 = temp3 = 0
names = os.listdir(_PD.THEMEDIR)
names.sort()
for search,dirs in enumerate(names):
model.append((dirs,))
model2.append((dirs,))
if dirs == data1:
temp1 = search+1
if dirs == data2:
temp2 = search+1
if dirs == data3:
temp3 = search+1
self.widgets.gladevcptheme.set_active(temp1)
self.widgets.touchytheme.set_active(temp2)
self.widgets.gmcpy_theme.set_active(temp3)
def gladevcp_sanity_check(self):
if os.path.exists(os.path.expanduser("~/linuxcnc/configs/%s/gvcp-panel.ui" % self.d.machinename)):
if not self.warning_dialog(_("OK to replace existing glade panel ?\
\nIt will be renamed and added to 'backups' folder.\n Clicking 'existing custom program' will avoid this warning, but \
if you change related options later -such as spindle feedback- the HAL connection will not update"),False):
return True
def pyvcp_sanity_check(self):
if os.path.exists(os.path.expanduser("~/linuxcnc/configs/%s/pyvcp-panel.xml" % self.d.machinename)):
if not self.warning_dialog(_("OK to replace existing custom pyvcp panel?\
\nExisting pyvcp-panel.xml will be renamed and added to 'backups' folder\n\
Clicking 'existing custom program' will aviod this warning. "),False):
return True
# disallow some signal combinations
def do_exclusive_inputs(self, widget,portnum,pinname):
# If initializing the Pport pages we don't want the signal calls to register here.
# if we are working in here we don't want signal calls because of changes made in here
# GTK supports signal blocking but then you can't assign signal block name references in GLADE -slaps head
if self._p.prepare_block or self.recursive_block: return
if 'mesa' in pinname:
ptype = '%stype'%pinname
if not self.widgets[ptype].get_active_text() == _PD.pintype_gpio[0]: return
self.recursive_block = True
SIG = self._p
exclusive = {
SIG.HOME_X: (SIG.MAX_HOME_X, SIG.MIN_HOME_X, SIG.BOTH_HOME_X, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.HOME_Y: (SIG.MAX_HOME_Y, SIG.MIN_HOME_Y, SIG.BOTH_HOME_Y, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.HOME_Z: (SIG.MAX_HOME_Z, SIG.MIN_HOME_Z, SIG.BOTH_HOME_Z, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.HOME_A: (SIG.MAX_HOME_A, SIG.MIN_HOME_A, SIG.BOTH_HOME_A, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MAX_HOME_X: (SIG.HOME_X, SIG.MIN_HOME_X, SIG.MAX_HOME_X, SIG.BOTH_HOME_X, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MAX_HOME_Y: (SIG.HOME_Y, SIG.MIN_HOME_Y, SIG.MAX_HOME_Y, SIG.BOTH_HOME_Y, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MAX_HOME_Z: (SIG.HOME_Z, SIG.MIN_HOME_Z, SIG.MAX_HOME_Z, SIG.BOTH_HOME_Z, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MAX_HOME_A: (SIG.HOME_A, SIG.MIN_HOME_A, SIG.MAX_HOME_A, SIG.BOTH_HOME_A, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MIN_HOME_X: (SIG.HOME_X, SIG.MAX_HOME_X, SIG.BOTH_HOME_X, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MIN_HOME_Y: (SIG.HOME_Y, SIG.MAX_HOME_Y, SIG.BOTH_HOME_Y, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MIN_HOME_Z: (SIG.HOME_Z, SIG.MAX_HOME_Z, SIG.BOTH_HOME_Z, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MIN_HOME_A: (SIG.HOME_A, SIG.MAX_HOME_A, SIG.BOTH_HOME_A, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.BOTH_HOME_X: (SIG.HOME_X, SIG.MAX_HOME_X, SIG.MIN_HOME_X, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.BOTH_HOME_Y: (SIG.HOME_Y, SIG.MAX_HOME_Y, SIG.MIN_HOME_Y, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.BOTH_HOME_Z: (SIG.HOME_Z, SIG.MAX_HOME_Z, SIG.MIN_HOME_Z, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.BOTH_HOME_A: (SIG.HOME_A, SIG.MAX_HOME_A, SIG.MIN_HOME_A, SIG.ALL_LIMIT, SIG.ALL_HOME, SIG.ALL_LIMIT_HOME),
SIG.MIN_X: (SIG.BOTH_X, SIG.BOTH_HOME_X, SIG.MIN_HOME_X, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.MIN_Y: (SIG.BOTH_Y, SIG.BOTH_HOME_Y, SIG.MIN_HOME_Y, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.MIN_Z: (SIG.BOTH_Z, SIG.BOTH_HOME_Z, SIG.MIN_HOME_Z, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.MIN_A: (SIG.BOTH_A, SIG.BOTH_HOME_A, SIG.MIN_HOME_A, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.MAX_X: (SIG.BOTH_X, SIG.BOTH_HOME_X, SIG.MIN_HOME_X, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.MAX_Y: (SIG.BOTH_Y, SIG.BOTH_HOME_Y, SIG.MIN_HOME_Y, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.MAX_Z: (SIG.BOTH_Z, SIG.BOTH_HOME_Z, SIG.MIN_HOME_Z, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.MAX_A: (SIG.BOTH_A, SIG.BOTH_HOME_A, SIG.MIN_HOME_A, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.BOTH_X: (SIG.MIN_X, SIG.MAX_X, SIG.MIN_HOME_X, SIG.MAX_HOME_X, SIG.BOTH_HOME_X, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.BOTH_Y: (SIG.MIN_Y, SIG.MAX_Y, SIG.MIN_HOME_Y, SIG.MAX_HOME_Y, SIG.BOTH_HOME_Y, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.BOTH_Z: (SIG.MIN_Z, SIG.MAX_Z, SIG.MIN_HOME_Z, SIG.MAX_HOME_Z, SIG.BOTH_HOME_Z, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.BOTH_A: (SIG.MIN_A, SIG.MAX_A, SIG.MIN_HOME_A, SIG.MAX_HOME_A, SIG.BOTH_HOME_A, SIG.ALL_LIMIT, SIG.ALL_LIMIT_HOME),
SIG.ALL_LIMIT: (
SIG.MIN_X, SIG.MAX_X, SIG.BOTH_X, SIG.MIN_HOME_X, SIG.MAX_HOME_X, SIG.BOTH_HOME_X,
SIG.MIN_Y, SIG.MAX_Y, SIG.BOTH_Y, SIG.MIN_HOME_Y, SIG.MAX_HOME_Y, SIG.BOTH_HOME_Y,
SIG.MIN_Z, SIG.MAX_Z, SIG.BOTH_Z, SIG.MIN_HOME_Z, SIG.MAX_HOME_Z, SIG.BOTH_HOME_Z,
SIG.MIN_A, SIG.MAX_A, SIG.BOTH_A, SIG.MIN_HOME_A, SIG.MAX_HOME_A, SIG.BOTH_HOME_A,
SIG.ALL_LIMIT_HOME),
SIG.ALL_HOME: (
SIG.HOME_X, SIG.MIN_HOME_X, SIG.MAX_HOME_X, SIG.BOTH_HOME_X,
SIG.HOME_Y, SIG.MIN_HOME_Y, SIG.MAX_HOME_Y, SIG.BOTH_HOME_Y,
SIG.HOME_Z, SIG.MIN_HOME_Z, SIG.MAX_HOME_Z, SIG.BOTH_HOME_Z,
SIG.HOME_A, SIG.MIN_HOME_A, SIG.MAX_HOME_A, SIG.BOTH_HOME_A,
SIG.ALL_LIMIT_HOME),
SIG.ALL_LIMIT_HOME: (
SIG.HOME_X, SIG.MIN_HOME_X, SIG.MAX_HOME_X, SIG.BOTH_HOME_X,
SIG.HOME_Y, SIG.MIN_HOME_Y, SIG.MAX_HOME_Y, SIG.BOTH_HOME_Y,
SIG.HOME_Z, SIG.MIN_HOME_Z, SIG.MAX_HOME_Z, SIG.BOTH_HOME_Z,
SIG.HOME_A, SIG.MIN_HOME_A, SIG.MAX_HOME_A, SIG.BOTH_HOME_A,
SIG.MIN_X, SIG.MAX_X, SIG.BOTH_X, SIG.MIN_HOME_X, SIG.MAX_HOME_X, SIG.BOTH_HOME_X,
SIG.MIN_Y, SIG.MAX_Y, SIG.BOTH_Y, SIG.MIN_HOME_Y, SIG.MAX_HOME_Y, SIG.BOTH_HOME_Y,
SIG.MIN_Z, SIG.MAX_Z, SIG.BOTH_Z, SIG.MIN_HOME_Z, SIG.MAX_HOME_Z, SIG.BOTH_HOME_Z,
SIG.MIN_A, SIG.MAX_A, SIG.BOTH_A, SIG.MIN_HOME_A, SIG.MAX_HOME_A, SIG.BOTH_HOME_A,
SIG.ALL_LIMIT, SIG.ALL_HOME),
}
model = self.widgets[pinname].get_model()
piter = self.widgets[pinname].get_active_iter()
try:
dummy, index,signame,sig_group = model.get(piter, 0,1,2,3)
except:
self.recursive_block = False
return
dbg('exclusive: current:%s %d %s %s'%(pinname,index,signame,sig_group),mtype='excl')
ex = exclusive.get(signame, ())
if self.d.number_mesa > 0:
dbg( 'looking for %s in mesa'%signame,mtype='excl')
# check mesa main board - only if the tab is shown and the ptype is GOIOI
for boardnum in range(0,int(self.d.number_mesa)):
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._NUMOFCNCTRS]) :
try:
if not self.widgets['mesa%dcon%dtable'%(boardnum,connector)].get_visible():continue
except:
break
break
for s in range(0,24):
p = "mesa%dc%dpin%d"% (boardnum,connector,s)
ptype = "mesa%dc%dpin%dtype"% (boardnum,connector,s)
#print p,self.widgets[ptype].get_active_text(),_PD.pintype_gpio[0]
try:
if not self.widgets[ptype].get_active_text() == _PD.pintype_gpio[0]: continue
if self.widgets[p] == widget:continue
except:
break
break
break
model = self.widgets[p].get_model()
piter = self.widgets[p].get_active_iter()
dummy, index,v1,sig_group = model.get(piter, 0,1,2,3)
#print 'check mesa signals',v1
if v1 in ex or v1 == signame:
dbg( 'found %s, at %s'%(signame,p),mtype='excl')
self.widgets[p].set_active(self._p.hal_input_names.index(SIG.UNUSED_INPUT))
self.d[p] = SIG.UNUSED_INPUT
port = 0
dbg( 'looking for %s in mesa sserial'%signame,mtype='excl')
for channel in range (0,self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._MAXSSERIALCHANNELS]):
if channel == _PD._NUM_CHANNELS: break # TODO may not have all channels worth of glade widgets
if not self.widgets['mesa%dsserial%d_%d'%(boardnum,port,channel)].get_visible():continue
#print "sserial data transfering"
for s in range (0,_PD._SSCOMBOLEN):
p = 'mesa%dsserial%d_%dpin%d' % (boardnum, port, channel, s)
ptype = 'mesa%dsserial%d_%dpin%dtype' % (boardnum, port, channel, s)
try:
if not self.widgets[ptype].get_active_text() == _PD.pintype_gpio[0]: continue
if self.widgets[p] == widget:continue
except:
break
break
model = self.widgets[p].get_model()
piter = self.widgets[p].get_active_iter()
dummy, index,v1,sig_group = model.get(piter, 0,1,2,3)
#print 'check mesa signals',v1
if v1 in ex or v1 == signame:
dbg( 'found %s, at %s'%(signame,p),mtype='excl')
self.widgets[p].set_active(self._p.hal_input_names.index(SIG.UNUSED_INPUT))
self.d[p] = SIG.UNUSED_INPUT
if self.d.number_pports >0:
# search pport1 for the illegal signals and change them to unused.
dbg( 'looking for %s in pport1'%signame,mtype='excl')
for pin1 in (2,3,4,5,6,7,8,9,10,11,12,13,15):
p = 'pp1_Ipin%d' % pin1
# pport2 may not be loaded yet
try:
if self.widgets[p] == widget:continue
except:
self.recursive_block = False
return
model = self.widgets[p].get_model()
piter = self.widgets[p].get_active_iter()
dummy, index,v1,sig_group = model.get(piter, 0,1,2,3)
#print 'check pport1 signals',v1
if v1 in ex or v1 == signame:
dbg( 'found %s, at %s'%(signame,p),mtype='excl')
self.widgets[p].set_active(self._p.hal_input_names.index(SIG.UNUSED_INPUT))
self.d[p] = SIG.UNUSED_INPUT
if self.d.number_pports >1:
# search pport2 for the illegal signals and change them to unused.
dbg( 'looking for %s in pport2'%signame,mtype='excl')
for pin1 in (2,3,4,5,6,7,8,9,10,11,12,13,15):
p2 = 'pp2_Ipin%d' % pin1
# pport2 may not be loaded yet
try:
if self.widgets[p2] == widget: continue
except:
self.recursive_block = False
return
model = self.widgets[p].get_model()
piter = self.widgets[p].get_active_iter()
dummy, index,v2,sig_group = model.get(piter, 0,1,2,3)
#print 'check pport2 signals',v1
if v2 in ex or v2 == signame:
dbg( 'found %s, at %s'%(signame,p2),mtype='excl')
self.widgets[p2].set_active(self._p.hal_input_names.index(SIG.UNUSED_INPUT))
self.d[p2] = SIG.UNUSED_INPUT
self.recursive_block = False
# MESA SIGNALS
# connect signals with pin designation data to mesa signal comboboxes and pintype comboboxes
# record the signal ID numbers so we can block the signals later in the mesa routines
# have to do it here manually (instead of autoconnect) because glade doesn't handle added
# user info (board/connector/pin number designations) and doesn't record the signal ID numbers
# none of this is done if mesa is not checked off in pncconf
# TODO we should check to see if signals are already present as each time user goes though this page
# the signals get added again causing multple calls to the functions.
def init_mesa_signals(self,boardnum):
cb = "mesa%d_discovery"% (boardnum)
i = "_mesa%dsignalhandler_discovery"% (boardnum)
self.d[i] = int(self.widgets[cb].connect("clicked", self.p['on_mesa%d_discovery_clicked'%boardnum]))
cb = "mesa%d_comp_update"% (boardnum)
i = "_mesa%dsignalhandler_comp_update"% (boardnum)
self.d[i] = int(self.widgets[cb].connect("clicked", self.on_mesa_component_value_changed,boardnum))
cb = "mesa%d_boardtitle"% (boardnum)
i = "_mesa%dsignalhandler_boardname_change"% (boardnum)
self.d[i] = int(self.widgets[cb].connect("changed", self.on_mesa_boardname_changed,boardnum))
cb = "mesa%d_firmware"% (boardnum)
i = "_mesa%dsignalhandler_firmware_change"% (boardnum)
self.d[i] = int(self.widgets[cb].connect("changed", self.on_mesa_firmware_changed,boardnum))
for connector in (1,2,3,4,5,6,7,8,9):
for pin in range(0,24):
cb = "mesa%dc%ipin%i"% (boardnum,connector,pin)
i = "_mesa%dsignalhandlerc%ipin%i"% (boardnum,connector,pin)
self.d[i] = int(self.widgets[cb].connect("changed",
self.on_general_pin_changed,"mesa",boardnum,connector,None,pin,False))
i = "_mesa%dactivatehandlerc%ipin%i"% (boardnum,connector,pin)
self.d[i] = int(self.widgets[cb].child.connect("activate",
self.on_general_pin_changed,"mesa",boardnum,connector,None,pin,True))
self.widgets[cb].connect('changed', self.do_exclusive_inputs,boardnum,cb)
cb = "mesa%dc%ipin%itype"% (boardnum,connector,pin)
i = "_mesa%dptypesignalhandlerc%ipin%i"% (boardnum,connector,pin)
self.d[i] = int(self.widgets[cb].connect("changed", self.on_mesa_pintype_changed,boardnum,connector,None,pin))
# SmartSerial signals
port = 0 #TODO we only support one serial port
for channel in range (0,self._p._NUM_CHANNELS):
for pin in range (0,self._p._SSCOMBOLEN):
cb = "mesa%dsserial%i_%ipin%i"% (boardnum,port,channel,pin)
i = "_mesa%dsignalhandlersserial%i_%ipin%i"% (boardnum,port,channel,pin)
self.d[i] = int(self.widgets[cb].connect("changed",
self.on_general_pin_changed,"sserial",boardnum,port,channel,pin,False))
i = "_mesa%dactivatehandlersserial%i_%ipin%i"% (boardnum,port,channel,pin)
self.d[i] = int(self.widgets[cb].child.connect("activate",
self.on_general_pin_changed,"sserial",boardnum,port,channel,pin,True))
self.widgets[cb].connect('changed', self.do_exclusive_inputs,boardnum,cb)
cb = "mesa%dsserial%i_%ipin%itype"% (boardnum,port,channel,pin)
i = "_mesa%dptypesignalhandlersserial%i_%ipin%i"% (boardnum,port,channel,pin)
self.d[i] = int(self.widgets[cb].connect("changed", self.on_mesa_pintype_changed,boardnum,port,channel,pin))
self.widgets["mesa%d_7i29_sanity_check"%boardnum].connect('clicked', self.daughter_board_sanity_check)
self.widgets["mesa%d_7i30_sanity_check"%boardnum].connect('clicked', self.daughter_board_sanity_check)
self.widgets["mesa%d_7i33_sanity_check"%boardnum].connect('clicked', self.daughter_board_sanity_check)
self.widgets["mesa%d_7i40_sanity_check"%boardnum].connect('clicked', self.daughter_board_sanity_check)
self.widgets["mesa%d_7i48_sanity_check"%boardnum].connect('clicked', self.daughter_board_sanity_check)
def init_mesa_options(self,boardnum):
#print 'init mesa%d options'%boardnum
i = self.widgets['mesa%d_boardtitle'%boardnum].get_active_text()
# check for installed firmware
#print i,self.d['mesa%d_boardtitle'%boardnum]
if 1==1:#if not self.d['_mesa%d_arrayloaded'%boardnum]:
#print boardnum,self._p.FIRMDIR,i
# add any extra firmware data from .pncconf-preference file
#if not customself._p.MESA_FIRMWAREDATA == []:
# for i,j in enumerate(customself._p.MESA_FIRMWAREDATA):
# self._p.MESA_FIRMWAREDATA.append(customself._p.MESA_FIRMWAREDATA[i])
# ok set up mesa info
dbg('Looking for firmware data %s'%self.d["mesa%d_firmware"% boardnum])
found = False
search = 0
model = self.widgets["mesa%d_firmware"% boardnum].get_model()
for search,item in enumerate(model):
dbg('%d,%s'%(search,model[search][0]))
if model[search][0] == self.d["mesa%d_firmware"% boardnum]:
self.widgets["mesa%d_firmware"% boardnum].set_active(search)
found = True
dbg('found firmware # %d'% search)
break
if not found:
dbg('firmware not found')
cur_firm = self.d['mesa%d_currentfirmwaredata'% boardnum][_PD._FIRMWARE]
dbg('looking for: %s'% cur_firm )
#self.widgets["mesa%d_firmware"% boardnum].set_active(0)
self._p.MESA_FIRMWAREDATA.append(self.d['mesa%d_currentfirmwaredata'% boardnum])
model.append((cur_firm,))
self.init_mesa_options(boardnum)
return
else:
self.widgets["mesa%d_pwm_frequency"% boardnum].set_value(self.d["mesa%d_pwm_frequency"% boardnum])
self.widgets["mesa%d_pdm_frequency"% boardnum].set_value(self.d["mesa%d_pdm_frequency"% boardnum])
self.widgets["mesa%d_3pwm_frequency"% boardnum].set_value(self.d["mesa%d_3pwm_frequency"% boardnum])
self.widgets["mesa%d_watchdog_timeout"% boardnum].set_value(self.d["mesa%d_watchdog_timeout"% boardnum])
self.widgets["mesa%d_numof_encodergens"% boardnum].set_value(self.d["mesa%d_numof_encodergens"% boardnum])
self.widgets["mesa%d_numof_pwmgens"% boardnum].set_value(self.d["mesa%d_numof_pwmgens"% boardnum])
self.widgets["mesa%d_numof_tppwmgens"% boardnum].set_value(self.d["mesa%d_numof_tppwmgens"% boardnum])
self.widgets["mesa%d_numof_stepgens"% boardnum].set_value(self.d["mesa%d_numof_stepgens"% boardnum])
self.widgets["mesa%d_numof_sserialports"% boardnum].set_value(self.d["mesa%d_numof_sserialports"% boardnum])
self.widgets["mesa%d_numof_sserialchannels"% boardnum].set_value(self.d["mesa%d_numof_sserialchannels"% boardnum])
if not self.widgets.createconfig.get_active() and not self.d['_mesa%d_configured'%boardnum]:
bt = self.d['mesa%d_boardtitle'%boardnum]
firm = self.d['mesa%d_firmware'%boardnum]
pgens = self.d['mesa%d_numof_pwmgens'%boardnum]
tpgens = self.d['mesa%d_numof_tppwmgens'%boardnum]
stepgens = self.d['mesa%d_numof_stepgens'%boardnum]
enc = self.d['mesa%d_numof_encodergens'%boardnum]
ssports = self.d['mesa%d_numof_sserialports'%boardnum]
sschannels = self.d['mesa%d_numof_sserialchannels'%boardnum]
self.set_mesa_options(boardnum,bt,firm,pgens,tpgens,stepgens,enc,ssports,sschannels)
elif not self.d._mesa0_configured:
self.widgets['mesa%dcon2table'%boardnum].hide()
self.widgets['mesa%dcon3table'%boardnum].hide()
self.widgets['mesa%dcon4table'%boardnum].hide()
self.widgets['mesa%dcon5table'%boardnum].hide()
def on_mesa_boardname_changed(self, widget,boardnum):
#print "**** INFO boardname %d changed"% boardnum
model = self.widgets["mesa%d_boardtitle"% boardnum].get_model()
title = self.widgets["mesa%d_boardtitle"% boardnum].get_active_text()
if title:
if 'Discovery Option' in title:
self.widgets["mesa%d_discovery"% boardnum].show()
else:
self.widgets["mesa%d_discovery"% boardnum].hide()
for i in(1,2,3,4,5,6,7,8,9):
self.widgets['mesa%dcon%dtable'%(boardnum,i)].hide()
self.widgets["mesa{}con{}tab".format(boardnum,i)].set_text('I/O\n Connector %d'%i)
for i in(0,1,2,3,4,5):
self.widgets["mesa%dsserial0_%d"%(boardnum,i)].hide()
if title == None: return
if 'Discovery Option' not in title:
meta = self.get_board_meta(title)
names = meta.get('TAB_NAMES')
tnums = meta.get('TAB_NUMS')
if names and tnums:
for index, tabnum in enumerate(tnums):
self.widgets["mesa{}con{}tab".format(boardnum,tabnum)].set_text(names[index])
#print 'title',title
self.fill_firmware(boardnum)
def fill_firmware(self,boardnum):
#print 'fill firmware'
self.firmware_block = True
title = self.widgets["mesa%d_boardtitle"% boardnum].get_active_text()
#print title
self._p.MESA_FIRMWAREDATA = []
if os.path.exists(os.path.join(self._p.FIRMDIR,title)):
self.mesa_firmware_search(title)
self.d['_mesa%d_arrayloaded'%boardnum] = True
for i in self._p.MESA_INTERNAL_FIRMWAREDATA:
self._p.MESA_FIRMWAREDATA.append(i)
model = self.widgets["mesa%d_firmware"% boardnum].get_model()
model.clear()
temp=[]
for search, item in enumerate(self._p.MESA_FIRMWAREDATA):
d = self._p.MESA_FIRMWAREDATA[search]
if not d[self._p._BOARDTITLE] == title:continue
temp.append(d[self._p._FIRMWARE])
temp.sort()
for i in temp:
#print i
model.append((i,))
self.widgets["mesa%d_firmware"% boardnum].set_active(0)
self.firmware_block = False
self.on_mesa_firmware_changed(None,boardnum)
#print "firmware-",self.widgets["mesa%d_firmware"% boardnum].get_active_text(),self.widgets["mesa%d_firmware"% boardnum].get_active()
#print "boardname-" + d[_PD._BOARDNAME]
def on_mesa_firmware_changed(self, widget,boardnum):
if self.firmware_block:
return
print "**** INFO firmware %d changed"% boardnum
model = self.widgets["mesa%d_boardtitle"% boardnum].get_model()
active = self.widgets["mesa%d_boardtitle"% boardnum].get_active()
if active < 0:
title = None
else: title = model[active][0]
firmware = self.widgets["mesa%d_firmware"% boardnum].get_active_text()
for search, item in enumerate(self._p.MESA_FIRMWAREDATA):
d = self._p.MESA_FIRMWAREDATA[search]
#print firmware,d[_PD._FIRMWARE],title,d[_PD._BOARDTITLE]
if not d[_PD._BOARDTITLE] == title:continue
if d[_PD._FIRMWARE] == firmware:
self.widgets["mesa%d_numof_encodergens"%boardnum].set_range(0,d[_PD._MAXENC])
self.widgets["mesa%d_numof_encodergens"% boardnum].set_value(d[_PD._MAXENC])
self.widgets["mesa%d_numof_pwmgens"% boardnum].set_range(0,d[_PD._MAXPWM])
self.widgets["mesa%d_numof_pwmgens"% boardnum].set_value(d[_PD._MAXPWM])
if d[_PD._MAXTPPWM]:
self.widgets["mesa%d_numof_tppwmgens"% boardnum].show()
self.widgets["mesa%d_numof_tpp_label"% boardnum].show()
self.widgets["mesa%d_3pwm_freq_label"% boardnum].show()
self.widgets["mesa%d_3pwm_freq_units"% boardnum].show()
self.widgets["mesa%d_3pwm_frequency"% boardnum].show()
else:
self.widgets["mesa%d_numof_tppwmgens"% boardnum].hide()
self.widgets["mesa%d_numof_tpp_label"% boardnum].hide()
self.widgets["mesa%d_3pwm_freq_label"% boardnum].hide()
self.widgets["mesa%d_3pwm_freq_units"% boardnum].hide()
self.widgets["mesa%d_3pwm_frequency"% boardnum].hide()
self.widgets["mesa%d_numof_tppwmgens"% boardnum].set_range(0,d[_PD._MAXTPPWM])
self.widgets["mesa%d_numof_tppwmgens"% boardnum].set_value(d[_PD._MAXTPPWM])
self.widgets["mesa%d_numof_stepgens"% boardnum].set_range(0,d[_PD._MAXSTEP])
self.widgets["mesa%d_numof_stepgens"% boardnum].set_value(d[_PD._MAXSTEP])
self.d["mesa%d_numof_resolvers"% boardnum] = (d[_PD._MAXRES]) # TODO fix this hack should be selectable
if d[_PD._MAXRES]:
self.widgets["mesa%d_numof_resolvers"% boardnum].show()
self.widgets["mesa%d_numof_resolvers"% boardnum].set_value(d[_PD._MAXRES]*6)
self.widgets["mesa%d_numof_resolvers"% boardnum].set_sensitive(False)
self.widgets["mesa%d_numof_resolvers_label"% boardnum].show()
self.widgets["mesa%d_pwm_frequency"% boardnum].set_value(24000)
else:
self.widgets["mesa%d_numof_resolvers"% boardnum].hide()
self.widgets["mesa%d_numof_resolvers_label"% boardnum].hide()
self.widgets["mesa%d_numof_resolvers"% boardnum].set_value(0)
if d[_PD._MAXSSERIALPORTS]:
self.widgets["mesa%d_numof_sserialports"% boardnum].show()
self.widgets["mesa%d_numof_sserialports_label"% boardnum].show()
self.widgets["mesa%d_numof_sserialchannels"% boardnum].show()
self.widgets["mesa%d_numof_sserialchannels_label"% boardnum].show()
else:
self.widgets["mesa%d_numof_sserialports"% boardnum].hide()
self.widgets["mesa%d_numof_sserialports_label"% boardnum].hide()
self.widgets["mesa%d_numof_sserialchannels"% boardnum].hide()
self.widgets["mesa%d_numof_sserialchannels_label"% boardnum].hide()
self.widgets["mesa%d_numof_sserialports"% boardnum].set_range(0,d[_PD._MAXSSERIALPORTS])
self.widgets["mesa%d_numof_sserialports"% boardnum].set_value(d[_PD._MAXSSERIALPORTS])
self.widgets["mesa%d_numof_sserialchannels"% boardnum].set_range(1,d[_PD._MAXSSERIALCHANNELS])
self.widgets["mesa%d_numof_sserialchannels"% boardnum].set_value(d[_PD._MAXSSERIALCHANNELS])
self.widgets["mesa%d_totalpins"% boardnum].set_text("%s"% d[_PD._MAXGPIO])
self.widgets["mesa%d_3pwm_frequency"% boardnum].set_sensitive(d[_PD._MAXTPPWM])
if d[_PD._MAXRES]:
self.widgets["mesa%d_pwm_frequency"% boardnum].set_sensitive(False)
else:
self.widgets["mesa%d_pwm_frequency"% boardnum].set_sensitive(d[_PD._MAXPWM])
self.widgets["mesa%d_pdm_frequency"% boardnum].set_sensitive(d[_PD._MAXPWM])
if 'eth' in d[_PD._HALDRIVER] or "7i43" in title or '7i90' in title:
self.widgets["mesa%d_card_addrs_hbox"% boardnum].show()
if '7i43' in title or '7i90' in title:
self.widgets["mesa%d_parportaddrs"% boardnum].show()
self.widgets["mesa%d_card_addrs"% boardnum].hide()
else:
self.widgets["mesa%d_parportaddrs"% boardnum].hide()
self.widgets["mesa%d_card_addrs"% boardnum].show()
self.widgets["mesa%d_parporttext"% boardnum].show()
else:
self.widgets["mesa%d_card_addrs_hbox"% boardnum].hide()
self.widgets["mesa%d_parporttext"% boardnum].hide()
break
# This method converts data from the GUI page to signal names for pncconf's mesa data variables
# It starts by checking pin type to set up the proper lists to search
# then depending on the pin type widget data is converted to signal names.
# if the signal name is not in the list add it to Human_names, signal_names
# and disc-saved signalname lists
# if encoder, pwm, or stepper pins the related pin are also set properly
# it does this by searching the current firmware array and finding what the
# other related pins numbers are then changing them to the appropriate signalname.
def mesa_data_transfer(self,boardnum):
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._NUMOFCNCTRS]) :
for pin in range(0,24):
p = 'mesa%dc%dpin%d' % (boardnum,connector,pin)
pinv = 'mesa%dc%dpin%dinv' % (boardnum,connector,pin)
ptype = 'mesa%dc%dpin%dtype' % (boardnum,connector,pin)
self.data_transfer(boardnum,connector,None,pin,p,pinv,ptype)
self.d["mesa%d_pwm_frequency"% boardnum] = self.widgets["mesa%d_pwm_frequency"% boardnum].get_value()
self.d["mesa%d_pdm_frequency"% boardnum] = self.widgets["mesa%d_pdm_frequency"% boardnum].get_value()
self.d["mesa%d_3pwm_frequency"% boardnum] = self.widgets["mesa%d_3pwm_frequency"% boardnum].get_value()
self.d["mesa%d_watchdog_timeout"% boardnum] = self.widgets["mesa%d_watchdog_timeout"% boardnum].get_value()
port = 0
for channel in range (0,self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._MAXSSERIALCHANNELS]):
if channel == _PD._NUM_CHANNELS: break # TODO may not have all channels worth of glade widgets
subboardname = self.d["mesa%dsserial%d_%dsubboard"% (boardnum, port, channel)]
#print "data transfer-channel ",channel," subboard name",subboardname
if subboardname == "none":
#print "no subboard for %s"% subboardname
continue
#print "sserial data transfering"
for pin in range (0,_PD._SSCOMBOLEN):
p = 'mesa%dsserial%d_%dpin%d' % (boardnum, port, channel, pin)
pinv = 'mesa%dsserial%d_%dpin%dinv' % (boardnum, port, channel, pin)
ptype = 'mesa%dsserial%d_%dpin%dtype' % (boardnum, port, channel, pin)
self.data_transfer(boardnum,port,channel,pin,p,pinv,ptype)
#print "sserial data transfer",p
def data_transfer(self,boardnum,connector,channel,pin,p,pinv,ptype):
foundit = False
piter = self.widgets[p].get_active_iter()
ptiter = self.widgets[ptype].get_active_iter()
pintype = self.widgets[ptype].get_active_text()
selection = self.widgets[p].get_active_text()
signaltree = self.widgets[p].get_model()
#if "serial" in p:
# print "**** INFO mesa-data-transfer:",p," selection: ",selection," pintype: ",pintype
# print "**** INFO mesa-data-transfer:",ptiter,piter
# type NOTUSED
if pintype == _PD.NUSED:
self.d[p] = _PD.UNUSED_UNUSED
self.d[ptype] = _PD.NUSED
self.d[pinv] = False
return
# type GPIO input
if pintype == _PD.GPIOI:
ptypetree = self.d._gpioliststore
signaltocheck = _PD.hal_input_names
# type gpio output and open drain
elif pintype in (_PD.GPIOO,_PD.GPIOD):
ptypetree = self.d._gpioliststore
signaltocheck = _PD.hal_output_names
elif pintype == _PD.SSR0:
ptypetree = self.d._ssrliststore
signaltocheck = _PD.hal_output_names
#type encoder
elif pintype in (_PD.ENCA,_PD.ENCB,_PD.ENCI,_PD.ENCM):
ptypetree = self.d._encoderliststore
signaltocheck = _PD.hal_encoder_input_names
# resolvers
elif pintype in (_PD.RES0,_PD.RES1,_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5,_PD.RESU):
ptypetree = self.d._resolverliststore
signaltocheck = _PD.hal_resolver_input_names
# 8i20 amplifier card
elif pintype == _PD.AMP8I20:
ptypetree = self.d._8i20liststore
signaltocheck = _PD.hal_8i20_input_names
# potentiometer output
elif pintype in (_PD.POTO,_PD.POTE):
ptypetree = self.d._potliststore
signaltocheck = _PD.hal_pot_output_names
# analog in
elif pintype == (_PD.ANALOGIN):
ptypetree = self.d._analoginliststore
signaltocheck = _PD.hal_analog_input_names
#type mux encoder
elif pintype in (_PD.MXE0, _PD.MXE1, _PD.MXEI, _PD.MXEM, _PD.MXES):
ptypetree = self.d._muxencoderliststore
signaltocheck = _PD.hal_encoder_input_names
# type PWM gen
elif pintype in( _PD.PDMP,_PD.PDMD,_PD.PDME):
if pintype == _PD.PDMP:
ptypetree = self.d._pdmcontrolliststore
else:
ptypetree = self.d._pdmrelatedliststore
signaltocheck = _PD.hal_pwm_output_names
# PDM
elif pintype in( _PD.PWMP,_PD.PWMD,_PD.PWME):
if pintype == _PD.PWMP:
ptypetree = self.d._pwmcontrolliststore
else:
ptypetree = self.d._pwmrelatedliststore
signaltocheck = _PD.hal_pwm_output_names
# Up/Down mode
elif pintype in( _PD.UDMU,_PD.UDMD,_PD.UDME):
if pintype == _PD.UDMU:
ptypetree = self.d._udmcontrolliststore
else:
ptypetree = self.d._udmrelatedliststore
signaltocheck = _PD.hal_pwm_output_names
# type tp pwm
elif pintype in (_PD.TPPWMA,_PD.TPPWMB,_PD.TPPWMC,_PD.TPPWMAN,_PD.TPPWMBN,_PD.TPPWMCN,_PD.TPPWME,_PD.TPPWMF):
ptypetree = self.d._tppwmliststore
signaltocheck = _PD.hal_tppwm_output_names
# type step gen
elif pintype in (_PD.STEPA,_PD.STEPB):
ptypetree = self.d._stepperliststore
signaltocheck = _PD.hal_stepper_names
# type sserial
elif pintype in (_PD.RXDATA0,_PD.TXDATA0,_PD.TXEN0,_PD.RXDATA1,_PD.TXDATA1,_PD.TXEN1,_PD.RXDATA2,
_PD.TXDATA2,_PD.TXEN2,_PD.RXDATA3,_PD.TXDATA3,_PD.TXEN3,
_PD.RXDATA4,_PD.TXDATA4,_PD.TXEN4,_PD.RXDATA5,_PD.TXDATA5,_PD.TXEN5,_PD.RXDATA6,_PD.TXDATA6,
_PD.TXEN6,_PD.RXDATA7,_PD.TXDATA7,_PD.TXEN7,
_PD.SS7I76M0,_PD.SS7I76M2,_PD.SS7I76M3,_PD.SS7I77M0,_PD.SS7I77M1,_PD.SS7I77M3,_PD.SS7I77M4):
ptypetree = self.d._sserialliststore
signaltocheck = _PD.hal_sserial_names
# this suppresses errors because of unused and uninitialized sserial instances
elif pintype == None and "sserial" in ptype: return
else :
print "**** ERROR mesa-data-transfer: error unknown pin type:",pintype,"of ",ptype
return
# **Start widget to data Convertion**
# for encoder pins
if piter == None:
#print "callin pin changed !!!"
name ="mesa"
if "sserial" in p: name = "sserial"
self.on_general_pin_changed(None,name,boardnum,connector,channel,pin,True)
selection = self.widgets[p].get_active_text()
piter = self.widgets[p].get_active_iter()
if piter == None:
print "****ERROR PNCCONF: no custom name available"
return
#print "found signame -> ",selection," "
# ok we have a piter with a signal type now- lets convert it to a signalname
#if not "serial" in p:
# self.debug_iter(piter,p,"signal")
dummy, index = signaltree.get(piter,0,1)
#if not "serial" in p:
# print "signaltree: ",dummy
# self.debug_iter(ptiter,ptype,"ptype")
widgetptype, index2 = ptypetree.get(ptiter,0,1)
#if not "serial" in p:
# print "ptypetree: ",widgetptype
if pintype in (_PD.GPIOI,_PD.GPIOO,_PD.GPIOD,_PD.SSR0,_PD.MXE0,_PD.MXE1,_PD.RES1,_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5,_PD.RESU,_PD.SS7I76M0,
_PD.SS7I76M2,_PD.SS7I76M3,_PD.SS7I77M0,_PD.SS7I77M1,_PD.SS7I77M3,_PD.SS7I77M4) or (index == 0):
index2 = 0
elif pintype in ( _PD.TXDATA0,_PD.RXDATA0,_PD.TXEN0,_PD.TXDATA1,_PD.RXDATA1,_PD.TXEN1,_PD.TXDATA2,_PD.RXDATA2,
_PD.TXEN2,_PD.TXDATA3,_PD.RXDATA3,_PD.TXEN3,_PD.TXDATA4,_PD.RXDATA4,_PD.TXEN4,
_PD.TXDATA5,_PD.RXDATA5,_PD.TXEN5,_PD.TXDATA6,_PD.RXDATA6,_PD.TXEN6,_PD.TXDATA7,_PD.RXDATA7,_PD.TXEN7 ):
index2 = 0
#print index,index2,signaltocheck[index+index2]
self.d[p] = signaltocheck[index+index2]
self.d[ptype] = widgetptype
self.d[pinv] = self.widgets[pinv].get_active()
#if "serial" in p:
# print "*** INFO PNCCONF mesa pin:",p,"signalname:",self.d[p],"pin type:",widgetptype
def on_mesa_pintype_changed(self, widget,boardnum,connector,channel,pin):
#print "mesa pintype changed:",boardnum,connector,channel,pin
if not channel == None:
port = connector
p = 'mesa%dsserial%d_%dpin%d' % (boardnum, port, channel, pin)
ptype = 'mesa%dsserial%d_%dpin%dtype' % (boardnum, port, channel, pin)
blocksignal = "_mesa%dsignalhandlersserial%i_%ipin%i" % (boardnum, port, channel, pin)
ptypeblocksignal = "_mesa%dptypesignalhandlersserial%i_%ipin%i"% (boardnum, port, channel, pin)
else:
p = 'mesa%dc%dpin%d' % (boardnum,connector,pin)
ptype = 'mesa%dc%dpin%dtype' % (boardnum,connector,pin)
blocksignal = "_mesa%dsignalhandlerc%ipin%i"% (boardnum,connector,pin)
ptypeblocksignal = "_mesa%dptypesignalhandlerc%ipin%i" % (boardnum, connector,pin)
modelcheck = self.widgets[p].get_model()
modelptcheck = self.widgets[ptype].get_model()
new = self.widgets[ptype].get_active_text()
#print "pintypechanged",p
# switch GPIO input to GPIO output
# here we switch the available signal names in the combobox
# we block signals so pinchanged method is not called
if modelcheck == self.d._gpioisignaltree and new in (_PD.GPIOO,_PD.GPIOD):
#print "switch GPIO input ",p," to output",new
self.widgets[p].handler_block(self.d[blocksignal])
self.widgets[p].set_model(self.d._gpioosignaltree)
self.widgets[p].set_active(0)
self.widgets[p].handler_unblock(self.d[blocksignal])
# switch GPIO output to input
elif modelcheck == self.d._gpioosignaltree:
if new == _PD.GPIOI:
#print "switch GPIO output ",p,"to input"
self.widgets[p].handler_block(self.d[blocksignal])
self.widgets[p].set_model(self.d._gpioisignaltree)
self.widgets[p].set_active(0)
self.widgets[p].handler_unblock(self.d[blocksignal])
# switch between pulse width, pulse density or up/down mode analog modes
# here we search the firmware for related pins (eg PWMP,PWMD,PWME ) and change them too.
# we block signals so we don't call this routine again.
elif modelptcheck in (self.d._pwmcontrolliststore, self.d._pdmcontrolliststore, self.d._udmcontrolliststore):
relatedpins = [_PD.PWMP,_PD.PWMD,_PD.PWME]
if new == _PD.PWMP:
display = 0
relatedliststore = self.d._pwmrelatedliststore
controlliststore = self.d._pwmcontrolliststore
elif new == _PD.PDMP:
display = 1
relatedliststore = self.d._pdmrelatedliststore
controlliststore = self.d._pdmcontrolliststore
elif new == _PD.UDMU:
display = 2
relatedliststore = self.d._udmrelatedliststore
controlliststore = self.d._udmcontrolliststore
else:print "**** WARNING PNCCONF: pintype error-PWM type not found";return
self.widgets[ptype].handler_block(self.d[ptypeblocksignal])
self.widgets[ptype].set_model(controlliststore)
self.widgets[ptype].set_active(display)
self.widgets[ptype].handler_unblock(self.d[ptypeblocksignal])
pinlist = self.list_related_pins(relatedpins, boardnum, connector, channel, pin, 1)
for i in (pinlist):
relatedptype = i[0]
if relatedptype == ptype :continue
if not channel == None:
ptypeblocksignal = "_mesa%dptypesignalhandlersserial%i_%ipin%i"% (i[1], i[2],i[3],i[4])
else:
ptypeblocksignal = "_mesa%dptypesignalhandlerc%ipin%i" % (i[1], i[2],i[4])
self.widgets[relatedptype].handler_block(self.d[ptypeblocksignal])
j = self.widgets[relatedptype].get_active()
self.widgets[relatedptype].set_model(relatedliststore)
self.widgets[relatedptype].set_active(j)
self.widgets[relatedptype].handler_unblock(self.d[ptypeblocksignal])
else: print "**** WARNING PNCCONF: pintype error in pintypechanged method new ",new," pinnumber ",p
def on_mesa_component_value_changed(self, widget,boardnum):
self.in_mesa_prepare = True
self.d["mesa%d_pwm_frequency"% boardnum] = self.widgets["mesa%d_pwm_frequency"% boardnum].get_value()
self.d["mesa%d_pdm_frequency"% boardnum] = self.widgets["mesa%d_pdm_frequency"% boardnum].get_value()
self.d["mesa%d_watchdog_timeout"% boardnum] = self.widgets["mesa%d_watchdog_timeout"% boardnum].get_value()
numofpwmgens = self.d["mesa%d_numof_pwmgens"% boardnum] = int(self.widgets["mesa%d_numof_pwmgens"% boardnum].get_value())
numoftppwmgens = self.d["mesa%d_numof_tppwmgens"% boardnum] = int(self.widgets["mesa%d_numof_tppwmgens"% boardnum].get_value())
numofstepgens = self.d["mesa%d_numof_stepgens"% boardnum] = int(self.widgets["mesa%d_numof_stepgens"% boardnum].get_value())
numofencoders = self.d["mesa%d_numof_encodergens"% boardnum] = int(self.widgets["mesa%d_numof_encodergens"% boardnum].get_value())
numofsserialports = self.d["mesa%d_numof_sserialports"% boardnum] = int(self.widgets["mesa%d_numof_sserialports"% boardnum].get_value())
numofsserialchannels = self.d["mesa%d_numof_sserialchannels"% boardnum] = \
int(self.widgets["mesa%d_numof_sserialchannels"% boardnum].get_value())
title = self.d["mesa%d_boardtitle"% boardnum] = self.widgets["mesa%d_boardtitle"% boardnum].get_active_text()
firmware = self.d["mesa%d_firmware"% boardnum] = self.widgets["mesa%d_firmware"% boardnum].get_active_text()
self.set_mesa_options(boardnum,title,firmware,numofpwmgens,numoftppwmgens,numofstepgens,numofencoders,numofsserialports,numofsserialchannels)
return True
# This method sets up the mesa GUI page and is used when changing component values / firmware or boards from config page.
# it changes the component comboboxes according to the firmware max and user requested amounts
# it adds signal names to the signal name combo boxes according to component type and in the
# case of GPIO options selected on the basic page such as limit/homing types.
# it will grey out I/O tabs according to the selected board type.
# it uses GTK signal blocking to block on_general_pin_change and on_mesa_pintype_changed methods.
# Since this method is for initialization, there is no need to check for changes and this speeds up
# the update.
# 'self._p.MESA_FIRMWAREDATA' holds all the firmware d.
# 'self.d.mesaX_currentfirmwaredata' hold the current selected firmware data (X is 0 or 1)
def set_mesa_options(self,boardnum,title,firmware,numofpwmgens,numoftppwmgens,numofstepgens,numofencoders,numofsserialports,numofsserialchannels):
_PD.prepare_block = True
self.p.set_buttons_sensitive(0,0)
self.pbar.set_text("Setting up Mesa tabs")
self.pbar.set_fraction(0)
self.window.show()
while gtk.events_pending():
gtk.main_iteration()
for search, item in enumerate(self._p.MESA_FIRMWAREDATA):
d = self._p.MESA_FIRMWAREDATA[search]
if not d[_PD._BOARDTITLE] == title:continue
if d[_PD._FIRMWARE] == firmware:
self.d["mesa%d_currentfirmwaredata"% boardnum] = self._p.MESA_FIRMWAREDATA[search]
break
dbg('current firmware:\n%r'%self._p.MESA_FIRMWAREDATA[search],mtype='curfirm')
self.widgets["mesa%dcon2table"% boardnum].hide()
self.widgets["mesa%dcon3table"% boardnum].hide()
self.widgets["mesa%dcon4table"% boardnum].hide()
self.widgets["mesa%dcon5table"% boardnum].hide()
self.widgets["mesa%dcon6table"% boardnum].hide()
self.widgets["mesa%dcon7table"% boardnum].hide()
self.widgets["mesa%dcon8table"% boardnum].hide()
self.widgets["mesa%dcon9table"% boardnum].hide()
self.widgets["mesa%dsserial0_0"% boardnum].hide()
self.widgets["mesa%dsserial0_1"% boardnum].hide()
self.widgets["mesa%dsserial0_2"% boardnum].hide()
self.widgets["mesa%dsserial0_3"% boardnum].hide()
self.widgets["mesa%dsserial0_4"% boardnum].hide()
currentboard = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME]
for i in self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._NUMOFCNCTRS]:
self.widgets["mesa%dcon%dtable"% (boardnum,i)].show()
# self.widgets["mesa%d"%boardnum].set_title("Mesa%d Configuration-Board: %s firmware: %s"% (boardnum,self.d["mesa%d_boardtitle"%boardnum],
# self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._FIRMWARE]))
temp = "/usr/share/doc/hostmot2-firmware-%s/%s.PIN"% (self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._DIRECTORY],
self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._FIRMWARE] )
filename = os.path.expanduser(temp)
if os.path.exists(filename):
match = open(filename).read()
textbuffer = self.widgets.textoutput.get_buffer()
try :
textbuffer.set_text("%s\n\n"% filename)
textbuffer.insert_at_cursor(match)
except:
pass
currentboard = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME]
meta = self.get_board_meta(currentboard)
ppc = meta.get('PINS_PER_CONNECTOR')
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._NUMOFCNCTRS]) :
for pin in range (0,24):
self.pbar.set_fraction((pin+1)/24.0)
while gtk.events_pending():
gtk.main_iteration()
firmptype,compnum = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._STARTOFDATA+pin+(concount*24)]
p = 'mesa%dc%dpin%d' % (boardnum, connector, pin)
ptype = 'mesa%dc%dpin%dtype' % (boardnum, connector , pin)
#print "**** INFO set-mesa-options DATA:",self.d[p],p,self.d[ptype]
#print "**** INFO set-mesa-options FIRM:",firmptype
#print "**** INFO set-mesa-options WIDGET:",self.widgets[p].get_active_text(),self.widgets[ptype].get_active_text()
complabel = 'mesa%dc%dpin%dnum' % (boardnum, connector , pin)
pinv = 'mesa%dc%dpin%dinv' % (boardnum, connector , pin)
blocksignal = "_mesa%dsignalhandlerc%ipin%i" % (boardnum, connector, pin)
ptypeblocksignal = "_mesa%dptypesignalhandlerc%ipin%i" % (boardnum, connector,pin)
actblocksignal = "_mesa%dactivatehandlerc%ipin%i" % (boardnum, connector, pin)
# kill all widget signals:
self.widgets[ptype].handler_block(self.d[ptypeblocksignal])
self.widgets[p].handler_block(self.d[blocksignal])
self.widgets[p].child.handler_block(self.d[actblocksignal])
self.firmware_to_widgets(boardnum,firmptype,p,ptype,pinv,complabel,compnum,concount,ppc,pin,numofencoders,
numofpwmgens,numoftppwmgens,numofstepgens,None,numofsserialports,numofsserialchannels,False)
self.d["mesa%d_numof_stepgens"% boardnum] = numofstepgens
self.d["mesa%d_numof_pwmgens"% boardnum] = numofpwmgens
self.d["mesa%d_numof_encodergens"% boardnum] = numofencoders
self.d["mesa%d_numof_sserialports"% boardnum] = numofsserialports
self.d["mesa%d_numof_sserialchannels"% boardnum] = numofsserialchannels
self.widgets["mesa%d_numof_stepgens"% boardnum].set_value(numofstepgens)
self.widgets["mesa%d_numof_encodergens"% boardnum].set_value(numofencoders)
self.widgets["mesa%d_numof_pwmgens"% boardnum].set_value(numofpwmgens)
self.in_mesa_prepare = False
self.d["_mesa%d_configured"% boardnum] = True
# unblock all the widget signals again
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._NUMOFCNCTRS]) :
for pin in range (0,24):
p = 'mesa%dc%dpin%d' % (boardnum, connector, pin)
ptype = 'mesa%dc%dpin%dtype' % (boardnum, connector , pin)
blocksignal = "_mesa%dsignalhandlerc%ipin%i" % (boardnum, connector, pin)
ptypeblocksignal = "_mesa%dptypesignalhandlerc%ipin%i" % (boardnum, connector,pin)
actblocksignal = "_mesa%dactivatehandlerc%ipin%i" % (boardnum, connector, pin)
self.widgets[ptype].handler_unblock(self.d[ptypeblocksignal])
self.widgets[p].handler_unblock(self.d[blocksignal])
self.widgets[p].child.handler_unblock(self.d[actblocksignal])
self.mesa_mainboard_data_to_widgets(boardnum)
self.window.hide()
self.p.set_buttons_sensitive(1,1)
_PD.prepare_block = False
def set_sserial_options(self,boardnum,port,channel):
numofsserialports = self.d["mesa%d_numof_sserialports"% boardnum]
numofsserialchannels = self.d["mesa%d_numof_sserialchannels"% boardnum]
subboardname = self.d["mesa%dsserial%d_%dsubboard"% (boardnum, port, channel)]
if subboardname == "none":return
self.pbar.set_text("Setting up Mesa Smart Serial tabs")
self.pbar.set_fraction(0)
self.window.show()
while gtk.events_pending():
gtk.main_iteration()
for subnum,temp in enumerate(self._p.MESA_DAUGHTERDATA):
#print self._p.MESA_DAUGHTERDATA[subnum][self._p._SUBFIRMNAME],subboardname
if self._p.MESA_DAUGHTERDATA[subnum][self._p._SUBFIRMNAME] == subboardname: break
#print "found subboard name:",self._p.MESA_DAUGHTERDATA[subnum][self._p._SUBFIRMNAME],subboardname,subnum,"channel:",channel
for pin in range (0,self._p._SSCOMBOLEN):
self.pbar.set_fraction((pin+1)/60.0)
while gtk.events_pending():
gtk.main_iteration()
p = 'mesa%dsserial%d_%dpin%d' % (boardnum, port, channel, pin)
ptype = 'mesa%dsserial%d_%dpin%dtype' % (boardnum, port, channel, pin)
pinv = 'mesa%dsserial%d_%dpin%dinv' % (boardnum, port, channel, pin)
complabel = 'mesa%dsserial%d_%dpin%dnum' % (boardnum, port, channel, pin)
blocksignal = "_mesa%dsignalhandlersserial%i_%ipin%i" % (boardnum, port, channel, pin)
ptypeblocksignal = "_mesa%dptypesignalhandlersserial%i_%ipin%i" % (boardnum, port, channel, pin)
actblocksignal = "_mesa%dactivatehandlersserial%i_%ipin%i" % (boardnum, port, channel, pin)
firmptype,compnum = self._p.MESA_DAUGHTERDATA[subnum][self._p._SUBSTARTOFDATA+pin]
#print "sserial set options",p
# kill all widget signals:
self.widgets[ptype].handler_block(self.d[ptypeblocksignal])
self.widgets[p].handler_block(self.d[blocksignal])
self.widgets[p].child.handler_block(self.d[actblocksignal])
ppc = 0
concount = 0
numofencoders = 10
numofpwmgens = 12
numoftppwmgens = 0
numofstepgens = 0
self.firmware_to_widgets(boardnum,firmptype,p,ptype,pinv,complabel,compnum,concount,ppc,pin,numofencoders,
numofpwmgens,numoftppwmgens,numofstepgens,subboardname,numofsserialports,numofsserialchannels,True)
# all this to unblock signals
for pin in range (0,self._p._SSCOMBOLEN):
firmptype,compnum = self._p.MESA_DAUGHTERDATA[0][self._p._SUBSTARTOFDATA+pin]
p = 'mesa%dsserial%d_%dpin%d' % (boardnum, port, channel, pin)
ptype = 'mesa%dsserial%d_%dpin%dtype' % (boardnum, port, channel, pin)
pinv = 'mesa%dsserial%d_%dpin%dinv' % (boardnum, port, channel, pin)
complabel = 'mesa%dsserial%d_%dpin%dnum' % (boardnum, port, channel, pin)
blocksignal = "_mesa%dsignalhandlersserial%i_%ipin%i" % (boardnum, port, channel, pin)
ptypeblocksignal = "_mesa%dptypesignalhandlersserial%i_%ipin%i" % (boardnum, port, channel, pin)
actblocksignal = "_mesa%dactivatehandlersserial%i_%ipin%i" % (boardnum, port, channel, pin)
# unblock all widget signals:
self.widgets[ptype].handler_unblock(self.d[ptypeblocksignal])
self.widgets[p].handler_unblock(self.d[blocksignal])
self.widgets[p].child.handler_unblock(self.d[actblocksignal])
# now that the widgets are set up as per firmware, change them as per the loaded data and add signals
for pin in range (0,self._p._SSCOMBOLEN):
firmptype,compnum = self._p.MESA_DAUGHTERDATA[subnum][self._p._SUBSTARTOFDATA+pin]
p = 'mesa%dsserial%d_%dpin%d' % (boardnum, port, channel, pin)
#print "INFO: data to widget smartserial- ",p, firmptype
ptype = 'mesa%dsserial%d_%dpin%dtype' % (boardnum, port, channel, pin)
pinv = 'mesa%dsserial%d_%dpin%dinv' % (boardnum, port, channel, pin)
self.data_to_widgets(boardnum,firmptype,compnum,p,ptype,pinv)
#print "sserial data-widget",p
self.widgets["mesa%d_numof_sserialports"% boardnum].set_value(numofsserialports)
self.widgets["mesa%d_numof_sserialchannels"% boardnum].set_value(numofsserialchannels)
self.window.hide()
def firmware_to_widgets(self,boardnum,firmptype,p,ptype,pinv,complabel,compnum,concount,ppc, pin,numofencoders,numofpwmgens,numoftppwmgens,
numofstepgens,subboardname,numofsserialports,numofsserialchannels,sserialflag):
currentboard = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME]
# *** convert widget[ptype] to component specified in firmwaredata ***
# if the board has less then 24 pins hide the extra comboboxes
if firmptype == _PD.NUSED:
self.widgets[p].hide()
self.widgets[ptype].hide()
self.widgets[pinv].hide()
self.widgets[complabel].hide()
self.widgets[ptype].set_model(self.d._notusedliststore)
self.widgets[ptype].set_active(0)
self.widgets[p].set_model(self.d._notusedsignaltree)
self.widgets[p].set_active(0)
return
else:
self.widgets[p].show()
self.widgets[ptype].show()
self.widgets[pinv].show()
self.widgets[complabel].show()
self.widgets[p].child.set_editable(True)
# ---SETUP GUI FOR ENCODER FAMILY COMPONENT---
# check that we are not converting more encoders that user requested
# if we are then we trick this routine into thinking the firware asked for GPIO:
# we can do that by changing the variable 'firmptype' to ask for GPIO
if firmptype in ( _PD.ENCA,_PD.ENCB,_PD.ENCI,_PD.ENCM ):
if numofencoders >= (compnum+1):
# if the combobox is not already displaying the right component:
# then we need to set up the comboboxes for this pin, otherwise skip it
if self.widgets[ptype].get_model():
widgetptype = self.widgets[ptype].get_active_text()
else: widgetptype = None
if not widgetptype == firmptype or not self.d["_mesa%d_configured"%boardnum]:
self.widgets[pinv].set_sensitive(0)
self.widgets[pinv].set_active(0)
self.widgets[ptype].set_model(self.d._encoderliststore)
# serial encoders are not for AXES - filter AXES selections out
if sserialflag:
self.widgets[p].set_model(self.d._encodersignalfilter)
else:
self.widgets[p].set_model(self.d._encodersignaltree)
# we only add every 4th human name so the user can only select
# the encoder's 'A' signal name. If its the other signals
# we can add them all because pncconf controls what the user sees
if firmptype == _PD.ENCA:
self.widgets[complabel].set_text("%d:"%compnum)
self.widgets[p].set_active(0)
self.widgets[p].set_sensitive(1)
self.widgets[ptype].set_sensitive(0)
self.widgets[ptype].set_active(0)
# pncconf control what the user sees with these ones:
elif firmptype in(_PD.ENCB,_PD.ENCI,_PD.ENCM):
self.widgets[complabel].set_text("")
self.widgets[p].set_active(0)
self.widgets[p].set_sensitive(0)
self.widgets[ptype].set_sensitive(0)
for i,j in enumerate((_PD.ENCB,_PD.ENCI,_PD.ENCM)):
if firmptype == j:break
self.widgets[ptype].set_active(i+1)
else:
# user requested this encoder component to be GPIO instead
# We cheat a little and tell the rest of the method that the firmware says
# it should be GPIO and compnum is changed to signify that the GPIO can be changed
# from input to output
# Right now only mainboard GPIO can be changed
# sserial I/O can not
firmptype = _PD.GPIOI
compnum = 0
# --- mux encoder ---
elif firmptype in (_PD.MXE0,_PD.MXE1,_PD.MXEI,_PD.MXEM,_PD.MXES):
#print "**** INFO: MUX ENCODER:",firmptype,compnum,numofencoders
if numofencoders >= (compnum*2+1) or (firmptype == _PD.MXES and numofencoders >= compnum*2+1) or \
(firmptype == _PD.MXEM and numofencoders >= compnum +1):
# if the combobox is not already displaying the right component:
# then we need to set up the comboboxes for this pin, otherwise skip it
self.widgets[pinv].set_sensitive(0)
self.widgets[pinv].set_active(0)
pmodel = self.widgets[p].set_model(self.d._muxencodersignaltree)
ptmodel = self.widgets[ptype].set_model(self.d._muxencoderliststore)
self.widgets[ptype].set_active(_PD.pintype_muxencoder.index(firmptype))
self.widgets[ptype].set_sensitive(0)
self.widgets[p].set_active(0)
if firmptype in(_PD.MXE0,_PD.MXE1):
temp = 0
if firmptype == _PD.MXE1: temp = 1
self.widgets[complabel].set_text("%d:"%(compnum *2 + temp))
self.widgets[p].set_sensitive(1)
self.widgets[ptype].show()
self.widgets[p].show()
elif firmptype == _PD.MXEM:
self.widgets[complabel].set_text("%d:"%compnum)
self.widgets[p].set_sensitive(0)
self.widgets[ptype].show()
self.widgets[p].hide()
else:
self.widgets[complabel].set_text("")
self.widgets[p].set_sensitive(0)
self.widgets[ptype].hide()
self.widgets[p].hide()
else:
firmptype = _PD.GPIOI
compnum = 0
# ---SETUP GUI FOR RESOLVER FAMILY COMPONENTS---
elif firmptype in (_PD.RES0,_PD.RES1,_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5,_PD.RESU):
if 0 == 0:
self.widgets[pinv].set_sensitive(0)
self.widgets[pinv].set_active(0)
self.widgets[p].set_model(self.d._resolversignaltree)
self.widgets[ptype].set_model(self.d._resolverliststore)
self.widgets[ptype].set_sensitive(0)
self.widgets[ptype].set_active(0)
if firmptype == _PD.RESU:
self.widgets[complabel].set_text("")
self.widgets[p].hide()
self.widgets[p].set_sensitive(0)
self.widgets[p].set_active(0)
self.widgets[ptype].set_active(6)
else:
temp = (_PD.RES0,_PD.RES1,_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5)
self.widgets[p].show()
for num,i in enumerate(temp):
if firmptype == i:break
self.widgets[complabel].set_text("%d:"% (compnum*6+num))
self.widgets[p].set_sensitive(1)
self.widgets[p].set_active(0)
self.widgets[ptype].set_active(num)
# ---SETUP 8i20 amp---
elif firmptype == _PD.AMP8I20:
self.widgets[ptype].set_model(self.d._8i20liststore)
self.widgets[p].set_model(self.d._8i20signaltree)
self.widgets[complabel].set_text("%d:"%compnum)
self.widgets[p].set_active(0)
self.widgets[p].set_sensitive(1)
self.widgets[pinv].set_sensitive(1)
self.widgets[pinv].set_active(0)
self.widgets[ptype].set_sensitive(0)
self.widgets[ptype].set_active(0)
# --- SETUP potentiometer output
elif firmptype in (_PD.POTO,_PD.POTE):
self.widgets[ptype].set_model(self.d._potliststore)
self.widgets[p].set_model(self.d._potsignaltree)
self.widgets[complabel].set_text("%d:"%compnum)
self.widgets[p].set_active(0)
self.widgets[pinv].set_sensitive(1)
self.widgets[pinv].set_active(0)
self.widgets[ptype].set_sensitive(0)
if firmptype == _PD.POTO:
self.widgets[ptype].set_active(0)
self.widgets[p].set_sensitive(1)
else:
self.widgets[ptype].set_active(1)
self.widgets[p].set_sensitive(0)
# --- SETUP analog input
elif firmptype == (_PD.ANALOGIN):
self.widgets[ptype].set_model(self.d._analoginliststore)
self.widgets[p].set_model(self.d._analoginsignaltree)
self.widgets[complabel].set_text("%d:"%compnum)
self.widgets[p].set_active(0)
self.widgets[pinv].set_sensitive(1)
self.widgets[pinv].set_active(0)
self.widgets[ptype].set_sensitive(0)
self.widgets[ptype].set_active(0)
self.widgets[p].set_sensitive(1)
# ---SETUP GUI FOR PWM FAMILY COMPONENT---
# the user has a choice of pulse width or pulse density modulation
elif firmptype in ( _PD.PWMP,_PD.PWMD,_PD.PWME,_PD.PDMP,_PD.PDMD,_PD.PDME ):
if numofpwmgens >= (compnum+1):
self.widgets[pinv].set_sensitive(1)
self.widgets[pinv].set_active(0)
self.widgets[p].set_model(self.d._pwmsignaltree)
# only add the -pulse signal names for the user to see
if firmptype in(_PD.PWMP,_PD.PDMP):
self.widgets[complabel].set_text("%d:"%compnum)
#print "firmptype = controlling"
self.widgets[ptype].set_model(self.d._pwmcontrolliststore)
self.widgets[ptype].set_sensitive(not sserialflag) # sserial pwm cannot be changed
self.widgets[p].set_sensitive(1)
self.widgets[p].set_active(0)
self.widgets[ptype].set_active(0)
# add them all here
elif firmptype in (_PD.PWMD,_PD.PWME,_PD.PDMD,_PD.PDME):
self.widgets[complabel].set_text("")
#print "firmptype = related"
if firmptype in (_PD.PWMD,_PD.PWME):
self.widgets[ptype].set_model(self.d._pwmrelatedliststore)
else:
self.widgets[ptype].set_model(self.d._pdmrelatedliststore)
self.widgets[p].set_sensitive(0)
self.widgets[p].set_active(0)
self.widgets[ptype].set_sensitive(0)
temp = 1
if firmptype in (_PD.PWME,_PD.PDME):
self.widgets[pinv].set_sensitive(0)
temp = 2
self.widgets[ptype].set_active(temp)
else:
firmptype = _PD.GPIOI
compnum = 0
# ---SETUP GUI FOR TP PWM FAMILY COMPONENT---
elif firmptype in ( _PD.TPPWMA,_PD.TPPWMB,_PD.TPPWMC,_PD.TPPWMAN,_PD.TPPWMBN,_PD.TPPWMCN,_PD.TPPWME,_PD.TPPWMF ):
if numoftppwmgens >= (compnum+1):
if not self.widgets[ptype].get_active_text() == firmptype or not self.d["_mesa%d_configured"%boardnum]:
self.widgets[p].set_model(self.d._tppwmsignaltree)
self.widgets[ptype].set_model(self.d._tppwmliststore)
self.widgets[pinv].set_sensitive(0)
self.widgets[pinv].set_active(0)
self.widgets[ptype].set_sensitive(0)
self.widgets[ptype].set_active(_PD.pintype_tp_pwm.index(firmptype))
self.widgets[p].set_active(0)
# only add the -a signal names for the user to change
if firmptype == _PD.TPPWMA:
self.widgets[complabel].set_text("%d:"%compnum)
self.widgets[p].set_sensitive(1)
# the rest the user can't change
else:
self.widgets[complabel].set_text("")
self.widgets[p].set_sensitive(0)
else:
firmptype = _PD.GPIOI
compnum = 0
# ---SETUP SMART SERIAL COMPONENTS---
# smart serial has port numbers (0-3) and channels (0-7).
# so the component number check is different from other components it checks the port number and channel number
elif firmptype in (_PD.TXDATA0,_PD.RXDATA0,_PD.TXEN0,_PD.TXDATA1,_PD.RXDATA1,_PD.TXEN1,
_PD.TXDATA2,_PD.RXDATA2,_PD.TXEN2,_PD.TXDATA3,_PD.RXDATA3,_PD.TXEN3,
_PD.TXDATA4,_PD.RXDATA4,_PD.TXEN4,_PD.TXDATA5,_PD.RXDATA5,_PD.TXEN5,
_PD.TXDATA6,_PD.RXDATA6,_PD.TXEN6,_PD.TXDATA7,_PD.RXDATA7,_PD.TXEN7,
_PD.SS7I76M0,_PD.SS7I76M2,_PD.SS7I76M3,_PD.SS7I77M0,_PD.SS7I77M1,_PD.SS7I77M3,_PD.SS7I77M4):
channelnum = 1
if firmptype in (_PD.TXDATA1,_PD.RXDATA1,_PD.TXEN1,_PD.SS7I77M1): channelnum = 2
if firmptype in (_PD.TXDATA2,_PD.RXDATA2,_PD.TXEN2,_PD.SS7I76M2): channelnum = 3
if firmptype in (_PD.TXDATA3,_PD.RXDATA3,_PD.TXEN3,_PD.SS7I76M3,_PD.SS7I77M3): channelnum = 4
if firmptype in (_PD.TXDATA4,_PD.RXDATA4,_PD.TXEN4,_PD.SS7I77M4): channelnum = 5
if firmptype in (_PD.TXDATA5,_PD.RXDATA5,_PD.TXEN5): channelnum = 6
if firmptype in (_PD.TXDATA6,_PD.RXDATA6,_PD.TXEN6): channelnum = 7
if firmptype in (_PD.TXDATA7,_PD.RXDATA7,_PD.TXEN7): channelnum = 8
# control combobox is the one the user can select from others are unsensitized
CONTROL = False
if firmptype in (_PD.TXDATA0,_PD.TXDATA1,_PD.TXDATA2,_PD.TXDATA3,_PD.TXDATA4,_PD.TXDATA5,
_PD.TXDATA6,_PD.TXDATA7,_PD.SS7I76M0,_PD.SS7I76M2,_PD.SS7I76M3,
_PD.SS7I77M0,_PD.SS7I77M1,_PD.SS7I77M3,_PD.SS7I77M4):
CONTROL = True
#print "**** INFO: SMART SERIAL ENCODER:",firmptype," compnum = ",compnum," channel = ",channelnum
#print "sserial channel:%d"% numofsserialchannels
if numofsserialports >= (compnum + 1) and numofsserialchannels >= (channelnum):
# if the combobox is not already displaying the right component:
# then we need to set up the comboboxes for this pin, otherwise skip it
#if compnum < _PD._NUM_CHANNELS: # TODO not all channels available
# self.widgets["mesa%dsserialtab%d"% (boardnum,compnum)].show()
self.widgets[pinv].set_sensitive(0)
self.widgets[pinv].set_active(0)
# Filter the selection that the user can choose.
# eg only show two modes for 7i77 and 7i76 or
# don't give those selections on regular sserial channels
if CONTROL:
self.widgets[p].set_model(self.d['_sserial%d_signalfilter'%(channelnum-1)])
if firmptype in (_PD.SS7I77M0,_PD.SS7I77M1,_PD.SS7I77M3,_PD.SS7I77M4):
self.set_filter('_sserial%d'% (channelnum-1),'7I77')
elif firmptype in (_PD.SS7I76M0,_PD.SS7I76M2,_PD.SS7I76M3):
self.set_filter('_sserial%d'% (channelnum-1),'7I76')
else:
self.set_filter('_sserial%d'% (channelnum-1),'ALL')
else:
self.widgets[p].set_model(self.d._sserialsignaltree)
self.widgets[ptype].set_model(self.d._sserialliststore)
self.widgets[ptype].set_active(_PD.pintype_sserial.index(firmptype))
self.widgets[ptype].set_sensitive(0)
self.widgets[p].set_active(0)
self.widgets[p].child.set_editable(False) # sserial cannot have custom names
# controlling combbox
if CONTROL:
self.widgets[complabel].set_text("%d:"% (channelnum -1))
if channelnum <= _PD._NUM_CHANNELS:#TODO not all channels available
self.widgets[p].set_sensitive(1)
else:
self.widgets[p].set_sensitive(0)
# This is a bit of a hack to make 7i77 and 7i76 firmware automatically choose
# the apropriate sserial component and allow the user to select different modes
# if the sserial ptype is 7i76 or 7i77 then the data must be set to 7i76/7i77 signal
# as that sserial instance can only be for the 7i76/7i77 I/O points
# 7i76:
if firmptype in (_PD.SS7I76M0,_PD.SS7I76M2,_PD.SS7I76M3):
if not self.d[p] in (_PD.I7I76_M0_T,_PD.I7I76_M2_T):
self.d[p] = _PD.I7I76_M0_T
self.d[ptype] = firmptype
self.widgets[p].set_sensitive(self.d.advanced_option)
# 7i77:
elif firmptype in (_PD.SS7I77M0,_PD.SS7I77M1,_PD.SS7I77M3,_PD.SS7I77M4):
if not self.d[p] in (_PD.I7I77_M3_T,_PD.I7I77_M0_T):
self.d[p] = _PD.I7I77_M0_T
if not firmptype in( _PD.SS7I77M1,_PD.SS7I77M4):
self.widgets[p].set_sensitive(self.d.advanced_option)
else:
self.widgets[p].set_sensitive(0)
self.d[ptype] = firmptype
else:
print 'found a sserial channel'
ssdevice = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._SSDEVICES]
for port,channel,device in (ssdevice):
print port,channel,device,channelnum
if port == 0 and channel+1 == channelnum:
print 'configure for: %s device'% device
if '7I64' in device:
if not '7i64' in self.d[p]:
self.d[p] = _PD.I7I64_T
elif '7I73' in device:
if not '7i73' in self.d[p]:
self.d[p] = _PD.I7I73_M0_T
else:
self.widgets[complabel].set_text("")
self.widgets[p].set_sensitive(0)
else:
firmptype = _PD.GPIOI
compnum = 0
# ---SETUP FOR STEPPER FAMILY COMPONENT---
elif firmptype in (_PD.STEPA,_PD.STEPB):
if numofstepgens >= (compnum+1):
self.widgets[ptype].set_model(self.d._stepperliststore)
self.widgets[p].set_model(self.d._steppersignaltree)
self.widgets[pinv].set_sensitive(1)
self.widgets[pinv].set_active(0)
self.widgets[ptype].set_sensitive(0)
self.widgets[ptype].set_active( _PD.pintype_stepper.index(firmptype) )
self.widgets[p].set_active(0)
#self.widgets[p].set_active(0)
if firmptype == _PD.STEPA:
self.widgets[complabel].set_text("%d:"%compnum)
self.widgets[p].set_sensitive(1)
elif firmptype == _PD.STEPB:
self.widgets[complabel].set_text("")
self.widgets[p].set_sensitive(0)
else:
firmptype = _PD.GPIOI
compnum = 0
# ---SETUP FOR GPIO FAMILY COMPONENT---
# first check to see if firmware says it should be in GPIO family
# (note this can be because firmware says it should be some other
# type but the user wants to deselect it so as to use it as GPIO
# this is done in the firmptype checks before this check.
# They will change firmptype variable to GPIOI)
# check if firmptype is in GPIO family
# check if widget is already configured
# we now set everything in a known state.
if firmptype in (_PD.GPIOI,_PD.GPIOO,_PD.GPIOD,_PD.SSR0):
if self.widgets[ptype].get_model():
widgettext = self.widgets[ptype].get_active_text()
else:
widgettext = None
if sserialflag:
if "7i77" in subboardname or "7i76" in subboardname or "7i84" in subboardname:
if pin <16:
self.widgets[complabel].set_text("%02d:"%(pin)) # sserial input
elif (pin >23 and pin < 40):
self.widgets[complabel].set_text("%02d:"%(pin-8)) # sserial input
elif pin >15 and pin < 24:
self.widgets[complabel].set_text("%02d:"%(pin-16)) #sserial output
elif pin >39:
self.widgets[complabel].set_text("%02d:"%(pin-32)) #sserial output
elif "7i70" in subboardname or "7i71" in subboardname:
self.widgets[complabel].set_text("%02d:"%(pin))
else:
if pin <24 :
self.widgets[complabel].set_text("%02d:"%(concount*24+pin)) # sserial input
else:
self.widgets[complabel].set_text("%02d:"%(concount*24+pin-24)) #sserial output
else:
if firmptype == _PD.SSR0:
self.widgets[complabel].set_text("%02d:"%(compnum - 100))
else:
self.widgets[complabel].set_text("%03d:"%(concount*ppc+pin))# mainboard GPIO
if compnum >= 100 and widgettext == firmptype:
return
elif not compnum >= 100 and (widgettext in (_PD.GPIOI,_PD.GPIOO,_PD.GPIOD)):
return
else:
#self.widgets[ptype].show()
#self.widgets[p].show()
self.widgets[p].set_sensitive(1)
self.widgets[pinv].set_sensitive(1)
self.widgets[ptype].set_sensitive(not compnum >= 100) # compnum = 100 means GPIO cannot be changed by user
if firmptype == _PD.SSR0:
self.widgets[ptype].set_model(self.d._ssrliststore)
else:
self.widgets[ptype].set_model(self.d._gpioliststore)
if firmptype == _PD.GPIOI:
# set pin treestore to gpioi signals
if not self.widgets[p].get_model() == self.d._gpioisignaltree:
self.widgets[p].set_model(self.d._gpioisignaltree)
# set ptype gpioi
self.widgets[ptype].set_active(0)
# set p unused signal
self.widgets[p].set_active(0)
# set pinv unset
self.widgets[pinv].set_active(False)
elif firmptype == _PD.SSR0:
if not self.widgets[p].get_model() == self.d._gpioosignaltree:
self.widgets[p].set_model(self.d._gpioosignaltree)
# set ptype gpioo
self.widgets[ptype].set_active(0)
# set p unused signal
self.widgets[p].set_active(0)
# set pinv unset
self.widgets[pinv].set_active(False)
else:
if not self.widgets[p].get_model() == self.d._gpioosignaltree:
self.widgets[p].set_model(self.d._gpioosignaltree)
# set ptype gpioo
self.widgets[ptype].set_active(1)
# set p unused signal
self.widgets[p].set_active(0)
# set pinv unset
self.widgets[pinv].set_active(False)
def find_sig_name_iter(self,model, signal_name):
for i, k in enumerate(model):
itr = model.get_iter(i)
title = model.get_value(itr,2)
#print 'first:',title
# check first set
if title == signal_name :return itr
cld_itr = model.iter_children(itr)
if cld_itr != None:
while cld_itr != None:
gcld_itr = model.iter_children(cld_itr)
if gcld_itr != None:
while gcld_itr != None:
title = model.get_value(gcld_itr,2)
#print title
# check third set
if title == signal_name :return gcld_itr
gcld_itr = model.iter_next(gcld_itr)
title = model.get_value(cld_itr,2)
#print title
# check second set
if title == signal_name :return cld_itr
cld_itr = model.iter_next(cld_itr)
# return first entry if no signal name is found
return model.get_iter_first()
def mesa_mainboard_data_to_widgets(self,boardnum):
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._NUMOFCNCTRS]) :
for pin in range (0,24):
firmptype,compnum = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._STARTOFDATA+pin+(concount*24)]
p = 'mesa%dc%dpin%d' % (boardnum, connector, pin)
ptype = 'mesa%dc%dpin%dtype' % (boardnum, connector , pin)
pinv = 'mesa%dc%dpin%dinv' % (boardnum, connector , pin)
self.data_to_widgets(boardnum,firmptype,compnum,p,ptype,pinv)
# by now the widgets should be right according to the firmware (and user deselected components)
# now we apply the data - setting signalnames and possible changing the pintype choice (eg pwm to pdm)
# We need to only set the 'controlling' signalname the pinchanged method will be called
# immediately and set the 'related' pins (if there are related pins)
def data_to_widgets(self,boardnum,firmptype,compnum,p,ptype,pinv):
debug = False
datap = self.d[p]
dataptype = self.d[ptype]
datapinv = self.d[pinv]
widgetp = self.widgets[p].get_active_text()
widgetptype = self.widgets[ptype].get_active_text()
#print "**** INFO set-data-options DATA:",p,datap,dataptype
#print "**** INFO set-data-options WIDGET:",p,widgetp,widgetptype
# ignore related pins
if widgetptype in (_PD.ENCB,_PD.ENCI,_PD.ENCM,
_PD.MXEI,_PD.MXEM,_PD.MXES,
_PD.RESU,
_PD.STEPB,_PD.STEPC,_PD.STEPD,_PD.STEPE,_PD.STEPF,
_PD.PDMD,_PD.PDME,_PD.PWMD,_PD.PWME,_PD.UDMD,_PD.UDME,
_PD.TPPWMB,_PD.TPPWMC,_PD.TPPWMAN,_PD.TPPWMBN,_PD.TPPWMCN,_PD.TPPWME,_PD.TPPWMF,
_PD.NUSED,_PD.POTD,_PD.POTE,
_PD.RXDATA0,_PD.TXEN0,_PD.RXDATA1,_PD.TXEN1,_PD.RXDATA2,_PD.TXEN2,_PD.RXDATA3,_PD.TXEN3,
_PD.RXDATA4,_PD.TXEN4,_PD.RXDATA5,_PD.TXEN5,_PD.RXDATA6,_PD.TXEN6,_PD.RXDATA7,_PD.TXEN7
):
self.widgets[pinv].set_active(datapinv)
return
# TODO fix this for cmboboxes withgrandchildren
# we are searching through human names - why not just search the model?
# type GPIO
# if compnum = 100 then it means that the GPIO type can not
# be changed from what the firmware designates it as.
if widgetptype in (_PD.GPIOI,_PD.GPIOO,_PD.GPIOD,_PD.SSR0):
#print "data ptype index:",_PD.pintype_gpio.index(dataptype)
#self.debug_iter(0,p,"data to widget")
#self.debug_iter(0,ptype,"data to widget")
# signal names for GPIO INPUT
#print "compnum = ",compnum
if compnum >= 100: dataptype = widgetptype
self.widgets[pinv].set_active(self.d[pinv])
if widgetptype == _PD.SSR0:
self.widgets[ptype].set_active(0)
else:
try:
self.widgets[ptype].set_active( _PD.pintype_gpio.index(dataptype) )
except:
self.widgets[ptype].set_active( _PD.pintype_gpio.index(widgetptype) )
# if GPIOI or dataptype not in GPIO family force it GPIOI
if dataptype == _PD.GPIOI or dataptype not in(_PD.GPIOO,_PD.GPIOI,_PD.GPIOD,_PD.SSR0):
human = _PD.human_input_names
signal = _PD.hal_input_names
tree = self.d._gpioisignaltree
# signal names for GPIO OUTPUT and OPEN DRAIN OUTPUT
elif dataptype in (_PD.GPIOO,_PD.GPIOD,_PD.SSR0):
human = _PD.human_output_names
signal = _PD.hal_output_names
tree = self.d._gpioosignaltree
self.widgets[p].set_model(tree)
itr = self.find_sig_name_iter(tree, datap)
self.widgets[p].set_active_iter(itr)
# type encoder / mux encoder
# we find the data's signal index
# then we search through the combobox's actual model's 4th array index
# this contains the comboxbox's signal's index number
# when they match then that is the row to show in the combobox
# this is different because the sserial combobox's model
# can be filtered and that screws with the relationship of
# signalname array vrs model row
elif widgetptype == _PD.ENCA or widgetptype in(_PD.MXE0,_PD.MXE1):
#print "ENC ->dataptype:",self.d[ptype]," dataptype:",self.d[p],signalindex
pinmodel = self.widgets[p].get_model()
itr = self.find_sig_name_iter(pinmodel, datap)
self.widgets[p].set_active_iter(itr)
# type resolver
elif widgetptype in(_PD.RES0,_PD.RES1,_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5,_PD.RESU):
try:
signalindex = _PD.hal_resolver_input_names.index(datap)
except:
if debug: print "**** INFO: PNCCONF warning no resolver signal named: %s\n found for pin %s"% (datap ,p)
signalindex = 0
#print "dataptype:",self.d[ptype]," dataptype:",self.d[p],signalindex
count = 0
temp = (0) # set unused resolver
if signalindex > 0:
for row,parent in enumerate(_PD.human_resolver_input_names):
if row == 0: continue
if len(parent[1]) == 0:
count +=1
#print row,count,"parent-",parent[0]
if count == signalindex:
#print "match",row
temp = (row)
break
continue
for column,child in enumerate(parent[1]):
count +=1
#print row,column,count,parent[0],child
if count == signalindex:
#print "match",row
temp = (row,column)
break
if count >= signalindex:break
#print "temp",temp
treeiter = self.d._resolversignaltree.get_iter(temp)
self.widgets[p].set_active_iter(treeiter)
# Type 8i20 AMP
elif widgetptype == _PD.AMP8I20:
try:
signalindex = _PD.hal_8i20_input_names.index(datap)
except:
if debug: print "**** INFO: PNCCONF warning no 8i20 signal named: %s\n found for pin %s"% (datap ,p)
signalindex = 0
#print "dataptype:",self.d[ptype]," dataptype:",self.d[p],signalindex
count = 0
temp = (0) # set unused 8i20 amp
if signalindex > 0:
for row,parent in enumerate(_PD.human_8i20_input_names):
if row == 0: continue
if len(parent[1]) == 0:
count +=1
#print row,count,"parent-",parent[0]
if count == signalindex:
#print "match",row
temp = (row)
break
continue
for column,child in enumerate(parent[1]):
count +=1
#print row,column,count,parent[0],child
if count == signalindex:
#print "match",row
temp = (row,column)
break
if count >= signalindex:break
#print "temp",temp
treeiter = self.d._8i20signaltree.get_iter(temp)
self.widgets[p].set_active_iter(treeiter)
# Type potentiometer (7i76"s spindle control)
elif widgetptype in (_PD.POTO,_PD.POTE):
self.widgets[pinv].set_active(self.d[pinv])
try:
signalindex = _PD.hal_pot_output_names.index(datap)
except:
if debug: print "**** INFO: PNCCONF warning no potentiometer signal named: %s\n found for pin %s"% (datap ,p)
signalindex = 0
#print "dataptype:",self.d[ptype]," dataptype:",self.d[p],signalindex
count = -1
temp = (0) # set unused potentiometer
if signalindex > 0:
for row,parent in enumerate(_PD.human_pot_output_names):
if row == 0: continue
if len(parent[1]) == 0:
count +=2
#print row,count,"parent-",parent[0]
if count == signalindex:
#print "match",row
temp = (row)
break
continue
for column,child in enumerate(parent[1]):
count +=2
#print row,column,count,parent[0],child
if count == signalindex:
#print "match",row
temp = (row,column)
break
if count >= signalindex:break
#print "temp",temp
treeiter = self.d._potsignaltree.get_iter(temp)
self.widgets[p].set_active_iter(treeiter)
# Type analog in
elif widgetptype == _PD.ANALOGIN:
try:
signalindex = _PD.hal_analog_input_names.index(datap)
except:
if debug: print "**** INFO: PNCCONF warning no analog in signal named: %s\n found for pin %s"% (datap ,p)
signalindex = 0
#print "dataptype:",self.d[ptype]," dataptype:",self.d[p],signalindex
count = 0
temp = (0) # set unused 8i20 amp
if signalindex > 0:
for row,parent in enumerate(_PD.human_analog_input_names):
if row == 0: continue
if len(parent[1]) == 0:
count +=1
#print row,count,"parent-",parent[0]
if count == signalindex:
#print "match",row
temp = (row)
break
continue
for column,child in enumerate(parent[1]):
count +=1
#print row,column,count,parent[0],child
if count == signalindex:
#print "match",row
temp = (row,column)
break
if count >= signalindex:break
#print "temp",temp
treeiter = self.d._analoginsignaltree.get_iter(temp)
self.widgets[p].set_active_iter(treeiter)
# type PWM gen
elif widgetptype in (_PD.PDMP,_PD.PWMP,_PD.UDMU):
self.widgets[pinv].set_active(datapinv)
if self.widgets["mesa%d_numof_resolvers"% boardnum].get_value(): dataptype = _PD.UDMU # hack resolver board needs UDMU
if dataptype == _PD.PDMP:
#print "pdm"
self.widgets[ptype].set_model(self.d._pdmcontrolliststore)
self.widgets[ptype].set_active(1)
elif dataptype == _PD.PWMP:
#print "pwm",self.d._pwmcontrolliststore
self.widgets[ptype].set_model(self.d._pwmcontrolliststore)
self.widgets[ptype].set_active(0)
elif dataptype == _PD.UDMU:
#print "udm",self.d._udmcontrolliststore
self.widgets[ptype].set_model(self.d._udmcontrolliststore)
self.widgets[ptype].set_active(2)
itr = self.find_sig_name_iter(self.d._pwmsignaltree, datap)
self.widgets[p].set_active_iter(itr)
# type tp 3 pwm for direct brushless motor control
elif widgetptype == _PD.TPPWMA:
#print "3 pwm"
count = -7
try:
signalindex = _PD.hal_tppwm_output_names.index(datap)
except:
if debug: print "**** INFO: PNCCONF warning no THREE PWM signal named: %s\n found for pin %s"% (datap ,p)
signalindex = 0
#print "3 PWw ,dataptype:",self.d[ptype]," dataptype:",self.d[p],signalindex
temp = (0) # set unused stepper
if signalindex > 0:
for row,parent in enumerate(_PD.human_tppwm_output_names):
if row == 0:continue
if len(parent[1]) == 0:
count += 8
#print row,count,parent[0]
if count == signalindex:
#print "match",row
temp = (row)
break
continue
for column,child in enumerate(parent[1]):
count +=8
#print row,column,count,parent[0],child
if count == signalindex:
#print "match",row
temp = (row,column)
break
if count >= signalindex:break
treeiter = self.d._tppwmsignaltree.get_iter(temp)
self.widgets[p].set_active_iter(treeiter)
# type step gen
elif widgetptype == _PD.STEPA:
#print "stepper", dataptype
self.widgets[ptype].set_active(0)
self.widgets[p].set_active(0)
self.widgets[pinv].set_active(datapinv)
itr = self.find_sig_name_iter(self.d._steppersignaltree, datap)
self.widgets[p].set_active_iter(itr)
# type smartserial
# we do things differently here
# we find the data's signal index
# then we search through the combobox's model's 4th array index
# this contains the comboxbox's signal's index number
# when they match then that is the row to show in the combobox
# this is different because the sserial combobox's model
# can be filtered and that screws with the relationship of
# signalname array vrs model row
elif widgetptype in( _PD.TXDATA0,_PD.SS7I76M0,_PD.SS7I77M0,_PD.SS7I77M3,_PD.TXDATA1,
_PD.TXDATA2,_PD.TXDATA3,_PD.TXDATA4,_PD.TXDATA5,_PD.TXDATA6,_PD.TXDATA7,
_PD.SS7I76M2,_PD.SS7I76M3,_PD.SS7I77M1,_PD.SS7I77M4):
#print "SMART SERIAL", dataptype,widgetptype
self.widgets[pinv].set_active(datapinv)
try:
signalindex = _PD.hal_sserial_names.index(self.d[p])
except:
if debug: print "**** INFO: PNCCONF warning no SMART SERIAL signal named: %s\n found for pin %s"% (datap ,p)
signalindex = 0
pinmodel = self.widgets[p].get_model()
for row,parent in enumerate(pinmodel):
#print row,parent[0],parent[2],parent[3],parent[4]
if parent[4] == signalindex:
#print 'FOUND',parent[2],parent[4]
treeiter = pinmodel.get_iter(row)
self.widgets[p].set_active_iter(treeiter)
else:
print "**** WARNING: PNCCONF data to widget: ptype not recognized/match:",dataptype,widgetptype
# This is for when a user picks a signal name or creates a custom signal (by pressing enter)
# if searches for the 'related pins' of a component so it can update them too
# it also handles adding and updating custom signal names
# it is used for mesa boards and parport boards according to boardtype
def on_general_pin_changed(self, widget, boardtype, boardnum, connector, channel, pin, custom):
self.p.set_buttons_sensitive(0,0)
if boardtype == "sserial":
p = 'mesa%dsserial%d_%dpin%d' % (boardnum,connector,channel,pin)
ptype = 'mesa%dsserial%d_%dpin%dtype' % (boardnum,connector,channel,pin)
widgetptype = self.widgets[ptype].get_active_text()
#print "pinchanged-",p
elif boardtype == "mesa":
p = 'mesa%dc%dpin%d' % (boardnum,connector,pin)
ptype = 'mesa%dc%dpin%dtype' % (boardnum,connector,pin)
widgetptype = self.widgets[ptype].get_active_text()
elif boardtype == "parport":
p = '%s_%s%d' % (boardnum,connector, pin)
#print p
if "I" in p: widgetptype = _PD.GPIOI
else: widgetptype = _PD.GPIOO
pinchanged = self.widgets[p].get_active_text()
piter = self.widgets[p].get_active_iter()
signaltree = self.widgets[p].get_model()
try:
basetree = signaltree.get_model()
except:
basetree = signaltree
#print "generalpin changed",p
#print "*** INFO ",boardtype,"-pin-changed: pin:",p,"custom:",custom
#print "*** INFO ",boardtype,"-pin-changed: ptype:",widgetptype,"pinchaanged:",pinchanged
if piter == None and not custom:
#print "*** INFO ",boardtype,"-pin-changed: no iter and not custom"
self.p.set_buttons_sensitive(1,1)
return
if widgetptype in (_PD.ENCB,_PD.ENCI,_PD.ENCM,
_PD.MXEI,_PD.MXEM,_PD.MXES,
_PD.RESU,
_PD.STEPB,_PD.STEPC,_PD.STEPD,_PD.STEPE,_PD.STEPF,
_PD.PDMD,_PD.PDME,_PD.PWMD,_PD.PWME,_PD.UDMD,_PD.UDME,
_PD.TPPWMB,_PD.TPPWMC,_PD.TPPWMAN,_PD.TPPWMBN,_PD.TPPWMCN,_PD.TPPWME,_PD.TPPWMF,
_PD.RXDATA0,_PD.TXEN0,_PD.RXDATA1,_PD.TXEN1,_PD.RXDATA2,_PD.TXEN2,_PD.RXDATA3,_PD.TXEN3,
_PD.POTE,_PD.POTD, _PD.SSR0):
self.p.set_buttons_sensitive(1,1)
return
# for GPIO output
if widgetptype in (_PD.GPIOO,_PD.GPIOD):
#print"ptype GPIOO\n"
halsignallist = 'hal_output_names'
humansignallist = _PD.human_output_names
addsignalto = self.d.haloutputsignames
relatedsearch = ["dummy"]
relatedending = [""]
customindex = len(humansignallist)-1
# for GPIO input
elif widgetptype == _PD.GPIOI:
#print"ptype GPIOI\n"
halsignallist = 'hal_input_names'
humansignallist = _PD.human_input_names
addsignalto = self.d.halinputsignames
relatedsearch = ["dummy"]
relatedending = [""]
customindex = len(humansignallist)-1
# for stepgen pins
elif widgetptype == _PD.STEPA:
#print"ptype step\n"
halsignallist = 'hal_stepper_names'
humansignallist = _PD.human_stepper_names
addsignalto = self.d.halsteppersignames
relatedsearch = [_PD.STEPA,_PD.STEPB,_PD.STEPC,_PD.STEPD,_PD.STEPE,_PD.STEPF]
relatedending = ["-step","-dir","-c","-d","-e","-f"]
customindex = len(humansignallist)-1
# for encoder pins
elif widgetptype == _PD.ENCA:
#print"\nptype encoder"
halsignallist = 'hal_encoder_input_names'
humansignallist = _PD.human_encoder_input_names
addsignalto = self.d.halencoderinputsignames
relatedsearch = [_PD.ENCA,_PD.ENCB,_PD.ENCI,_PD.ENCM]
relatedending = ["-a","-b","-i","-m"]
customindex = len(humansignallist)-1
# for mux encoder pins
elif widgetptype in(_PD.MXE0,_PD.MXE1):
#print"\nptype encoder"
halsignallist = 'hal_encoder_input_names'
humansignallist = _PD.human_encoder_input_names
addsignalto = self.d.halencoderinputsignames
relatedsearch = ["dummy","dummy","dummy","dummy",]
relatedending = ["-a","-b","-i","-m"]
customindex = len(humansignallist)-1
# resolvers
elif widgetptype in (_PD.RES0,_PD.RES1,_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5):
halsignallist = 'hal_resolver_input_names'
humansignallist = _PD.human_resolver_input_names
addsignalto = self.d.halresolversignames
relatedsearch = ["dummy"]
relatedending = [""]
customindex = len(humansignallist)-1
# 8i20 amplifier
elif widgetptype == _PD.AMP8I20:
halsignallist = 'hal_8i20_input_names'
humansignallist = _PD.human_8i20_input_names
addsignalto = self.d.hal8i20signames
relatedsearch = ["dummy"]
relatedending = [""]
customindex = len(humansignallist)-1
# potentiometer output
elif widgetptype == _PD.POTO:
halsignallist = 'hal_pot_output_names'
humansignallist = _PD.human_pot_output_names
addsignalto = self.d.halpotsignames
relatedsearch = [_PD.POTO,_PD.POTE]
relatedending = ["-output","-enable"]
customindex = 2
# analog input
elif widgetptype == _PD.ANALOGIN:
halsignallist = 'hal_analog_input_names'
humansignallist = _PD.human_analog_input_names
addsignalto = self.d.halanaloginsignames
relatedsearch = ["dummy"]
relatedending = [""]
customindex = len(humansignallist)-1
# for PWM,PDM,UDM pins
elif widgetptype in(_PD.PWMP,_PD.PDMP,_PD.UDMU):
#print"ptype pwmp\n"
halsignallist = 'hal_pwm_output_names'
humansignallist = _PD.human_pwm_output_names
addsignalto = self.d.halpwmoutputsignames
relatedsearch = [_PD.PWMP,_PD.PWMD,_PD.PWME]
relatedending = ["-pulse","-dir","-enable"]
customindex = len(humansignallist)-1
elif widgetptype == _PD.TPPWMA:
#print"ptype pdmp\n"
halsignallist = 'hal_tppwm_output_names'
humansignallist = _PD.human_tppwm_output_names
addsignalto = self.d.haltppwmoutputsignames
relatedsearch = [_PD.TPPWMA,_PD.TPPWMB,_PD.TPPWMC,_PD.TPPWMAN,_PD.TPPWMBN,_PD.TPPWMCN,_PD.TPPWME,_PD.TPPWMF]
relatedending = ["-a","-b","c","-anot","-bnot","cnot","-enable","-fault"]
customindex = len(humansignallist)-1
elif widgetptype in (_PD.TXDATA0,_PD.TXDATA1,_PD.TXDATA2,_PD.TXDATA3,_PD.TXDATA4,_PD.TXDATA5,_PD.SS7I76M0,_PD.SS7I76M3,
_PD.SS7I76M2,_PD.SS7I77M0,_PD.SS7I77M1,_PD.SS7I77M3,_PD.SS7I77M4):
portnum = 0 #TODO support more ports
for count,temp in enumerate(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._NUMOFCNCTRS]) :
if connector == temp:
firmptype,portnum = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._STARTOFDATA+pin+(count*24)]
if widgetptype in (_PD.TXDATA0,_PD.SS7I76M0,_PD.SS7I77M0): channelnum = 0
elif widgetptype in (_PD.TXDATA1,_PD.SS7I77M1): channelnum = 1
elif widgetptype in (_PD.TXDATA2,_PD.SS7I76M2): channelnum = 2
elif widgetptype in (_PD.TXDATA3,_PD.SS7I77M3,_PD.SS7I76M3): channelnum = 3
elif widgetptype in (_PD.TXDATA4,_PD.SS7I77M4): channelnum = 4
elif widgetptype in (_PD.TXDATA5): channelnum = 5
BASE = "mesa%dsserial0_%d"% (boardnum,channelnum)
if self.widgets[p].get_active_text() == _("Unused Channel"):
self.widgets[BASE].hide()
self.d[BASE+"subboard"] = "none"
self.p.set_buttons_sensitive(1,1)
return
else:
self.widgets[BASE].show()
# TODO we should search for these names rather then use hard coded logic
# so as to make adding cards easier
temp = self.widgets[p].get_active_text()
table = BASE+"table2"
self.widgets[table].show()
table = BASE+"table3"
self.widgets[table].show()
if "7i76" in temp:
if 'Mode 2' in temp:
ssfirmname = "7i76-m2"
else:
ssfirmname = "7i76-m0"
self.d[BASE+"subboard"] = ssfirmname
self.widgets[BASE+'_tablabel'].set_text("7I76 I/O\n (SS# %d)"% channelnum)
elif "7i64" in temp:
self.d[BASE+"subboard"] = "7i64"
self.widgets[BASE+'_tablabel'].set_text("7I64 I/O\n (SS# %d)"% channelnum)
elif "7i69" in temp:
self.d[BASE+"subboard"] = "7i69"
self.widgets[table].hide()
self.widgets[BASE+'_tablabel'].set_text("7I69 I/O\n (SS# %d)"% channelnum)
elif "7i70" in temp:
self.d[BASE+"subboard"] = "7i70"
self.widgets[table].hide()
self.widgets[BASE+'_tablabel'].set_text("7I70 I/O\n (SS# %d)"% channelnum)
elif "7i71" in temp:
self.d[BASE+"subboard"] = "7i71"
self.widgets[table].hide()
self.widgets[BASE+'_tablabel'].set_text("7I71 I/O\n (SS# %d)"% channelnum)
elif "7i73" in temp:
self.d[BASE+"subboard"] = "7i73-m1"
self.widgets[BASE+'_tablabel'].set_text("7I73 I/O\n (SS# %d)"% channelnum)
elif "7i77" in temp:
print 'ssname',temp,'sschannel#',channelnum
if 'Mode 3' in temp:
ssfirmname = "7i77-m3"
else:
ssfirmname = "7i77-m0"
self.d[BASE+"subboard"] = ssfirmname
if channelnum in(0,3):
self.widgets[BASE+'_tablabel'].set_text("7I77 I/O\n (SS# %d)"% channelnum)
self.widgets[table].hide()
elif channelnum in(1,4):
self.widgets[BASE+'_tablabel'].set_text("7I77 PWM\n (SS# %d)"% channelnum)
table = BASE+"table2"
self.widgets[table].hide()
table = BASE+"table1"
self.widgets[table].hide()
elif "7i84" in temp:
print 'ssname',temp,'sschannel#',channelnum
if 'Mode 3' in temp:
ssfirmname = "7i84-m3"
else:
ssfirmname = "7i84-m0"
self.d[BASE+"subboard"] = ssfirmname
self.widgets[table].hide()
self.widgets[BASE+'_tablabel'].set_text("7I84 I/O\n (SS# %d)"%channelnum)
elif "8i20" in temp:
self.d[BASE+"subboard"] = "8i20"
self.widgets[table].hide()
table = BASE+"table2"
self.widgets[table].hide()
self.widgets[BASE+'_tablabel'].set_text("8I20\n (SS# %d)"% channelnum)
else:
self.d[BASE+"subboard"] = "none"
self.widgets[table].hide()
table = BASE+"table2"
self.widgets[table].hide()
table = BASE+"table1"
self.widgets[table].hide()
self.p.set_buttons_sensitive(1,1)
return
# set sserial tab names to corresond to connector numbers so users have a clue
# first we have to find the daughter board in pncconf's internal list
# TODO here we search the list- this should be done for the table names see above todo
subfirmname = self.d[BASE+"subboard"]
for subnum,temp in enumerate(self._p.MESA_DAUGHTERDATA):
if self._p.MESA_DAUGHTERDATA[subnum][self._p._SUBFIRMNAME] == subfirmname: break
subconlist = self._p.MESA_DAUGHTERDATA[subnum][self._p._SUBCONLIST]
# now search the connector list and write it to the tab names
for tabnum in range(0,3):
conname = subconlist[tabnum]
tab = BASE+"tab%d"% tabnum
self.widgets[tab].set_text(conname)
#print p,temp," set at",self.d[BASE+"subboard"]
self.set_sserial_options(boardnum,portnum,channelnum)
self.p.set_buttons_sensitive(1,1)
return
self.p.set_buttons_sensitive(1,1)
return
else:
print"**** INFO: pncconf on_general_pin_changed: pintype not found:%s\n"% widgetptype
self.p.set_buttons_sensitive(1,1)
return
# *** change the related pin's signal names ***
# see if the piter is none - if it is a custom names has been entered
# else find the signal name index number if the index is zero set the piter to unused signal
# this is a work around for thye combo box allowing the parent to be shown and selected in the
# child column haven\t figured out how to stop that #TODO
# either way we have to search the current firmware array for the pin numbers of the related
# pins so we can change them to the related signal name
# all signal names have related signal (eg encoders have A and B phase and index and index mask)
# except 'unused' signal it is a special case as there is no related signal names with it.
if piter == None or custom:
#print "*** INFO ",boardtype,"-pin-changed: PITER:",piter," length:",len(signaltree)
if pinchanged in (addsignalto):return
for i in (humansignallist):
if pinchanged == i[0]:return
if pinchanged in i[1]:return
length = len(signaltree)
index = len(_PD[halsignallist]) - len(relatedsearch)
customiter = signaltree.get_iter((length-1,))
childiter = signaltree.iter_nth_child(customiter, 0)
n = 0
while childiter:
dummy, index = signaltree.get(childiter, 0, 1)
n+=1
childiter = signaltree.iter_nth_child(customiter, n)
index += len(relatedsearch)
else:
dummy, index = signaltree.get(piter, 0, 1)
if index == 0:
piter = signaltree.get_iter_first()
#print "*** INFO ",boardtype,"-pin-changed: index",index
# This finds the pin type and component number of the pin that has changed
pinlist = []
# this components have no related pins - fake the list
if widgetptype in(_PD.GPIOI,_PD.GPIOO,_PD.GPIOD,_PD.SSR0,_PD.MXE0,_PD.MXE1,_PD.RES0,_PD.RES1,
_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5,_PD.AMP8I20,_PD.ANALOGIN):
pinlist = [["%s"%p,boardnum,connector,channel,pin]]
else:
pinlist = self.list_related_pins(relatedsearch, boardnum, connector, channel, pin, 0)
#print pinlist
# Now we have a list of pins that need to be updated
# first check if the name is a custom name if it is
# add the legalized custom name to ;
# addsignalto -> for recording custom names for next time loaded
# signalsto check -> for making signal names (we add different endings for different signalnames
# signaltree -> for display in the gui - itis automatically added to all comboboxes that uses this treesort
# then go through the pinlist:
# block signals
# display the proper text depending if custom or not
# then unblock signals
if custom:
legal_name = pinchanged.replace(" ","_")
addsignalto.append ((legal_name))
print "add: "+legal_name+" to human list",humansignallist[customindex][1]
humansignallist[customindex][1].append ((legal_name))
endoftree = len(basetree)-1
customiter = basetree.get_iter((endoftree,))
newiter = basetree.append(customiter, [legal_name,index,legal_name,halsignallist,index])
#print 'new signal:',legal_name,index,legal_name,halsignallist,endoftree,index
for offset,i in enumerate(relatedsearch):
with_endings = legal_name + relatedending[offset]
#print "new signal:",with_endings
_PD[halsignallist].append ((with_endings))
for data in(pinlist):
if boardtype == "mesa":
blocksignal1 = "_mesa%dsignalhandlerc%ipin%i" % (data[1], data[2], data[4])
blocksignal2 = "_mesa%dactivatehandlerc%ipin%i" % (data[1], data[2], data[4])
if boardtype == "sserial":
blocksignal1 = "_mesa%dsignalhandlersserial%i_%ipin%i" % (data[1], data[2], data[3], data[4])
blocksignal2 = "_mesa%dactivatehandlersserial%i_%ipin%i" % (data[1], data[2], data[3],data[4])
elif boardtype =="parport":
blocksignal1 = "_%s_%s%dsignalhandler" % (data[1], data[2], data[4])
blocksignal2 = "_%s_%s%dactivatehandler" % (data[1], data[2], data[4])
self.widgets[data[0]].handler_block(self.d[blocksignal1])
self.widgets[data[0]].child.handler_block(self.d[blocksignal2])
if custom:
if basetree == signaltree:
temp = newiter
else:
temp = signaltree.convert_child_iter_to_iter(newiter)
self.widgets[data[0]].set_active_iter(temp)
else:
self.widgets[data[0]].set_active_iter(piter)
self.widgets[data[0]].child.handler_unblock(self.d[blocksignal2])
self.widgets[data[0]].handler_unblock(self.d[blocksignal1])
#self.debug_iter(0,p,"pin changed")
#if boardtype == "mesa": self.debug_iter(0,ptype,"pin changed")
self.p.set_buttons_sensitive(1,1)
def pport_push_data(self,port,direction,pin,pinv,signaltree,signaltocheck):
p = '%s_%s%d' % (port, direction, pin)
piter = self.widgets[p].get_active_iter()
selection = self.widgets[p].get_active_text()
# **Start widget to data Convertion**
if piter == None:# means new custom signal name and user never pushed enter
#print "callin pin changed !!!"
self.on_general_pin_changed( None,"parport", port, direction, None, pin, True)
selection = self.widgets[p].get_active_text()
piter = self.widgets[p].get_active_iter()
#print "found signame -> ",selection," "
# ok we have a piter with a signal type now- lets convert it to a signalname
#print "**** INFO parport-data-transfer piter:",piter
#self.debug_iter(piter,p,"signal")
dummy, index = signaltree.get(piter,0,1)
#print "signaltree: ",dummy
return p, signaltocheck[index], self.widgets[pinv].get_active()
def set_pport_combo(self,pinname):
#print pinname
# signal names for GPIO INPUT
datap = self.d[pinname]
if '_Ipin' in pinname:
human = self._p.human_input_names
signal = self._p.hal_input_names
tree = self.d._gpioisignaltree
# signal names for GPIO OUTPUT and OPEN DRAIN OUTPUT
elif 'Opin'in pinname:
human = self._p.human_output_names
signal =self._p.hal_output_names
tree = self.d._gpioosignaltree
#self.w[pinname].set_model(tree)
# an error probably means the signal name cannot be found
# set it as unused rather then error
itr = self.find_sig_name_iter(tree, datap)
self.widgets[pinname].set_active_iter(itr)
return
try:
signalindex = signal.index(datap)
except:
signalindex = 0
print "**** INFO: PNCCONF warning no GPIO signal named: %s\n found for pin %s"% (datap , p)
#print "gpio temp ptype:",pinname,datap,signalindex
count = 0
temp = (0) # set unused gpio if no match
if signalindex > 0:
for row,parent in enumerate(human):
#print row,parent
if len(parent[1]) == 0:continue
for column,child in enumerate(parent[1]):
count +=1
#print row,column,count,parent[0],child
if count == signalindex:
#print "match",row,column
break
if count >= signalindex:break
temp = (row,column)
treeiter = tree.get_iter(temp)
self.widgets[pinname].set_active_iter(treeiter)
def signal_sanity_check(self, *args):
warnings = []
do_warning = False
do_error = False
for i in self.d.available_axes:
tppwm = pwm = amp_8i20 = False
step = self.findsignal(i+"-stepgen-step")
step2 = self.findsignal(i+"2-stepgen-step")
enc = self.findsignal(i+"-encoder-a")
resolver = self.findsignal(i+"-resolver")
if self.findsignal("%s-8i20"% i): amp_8i20 = pwm =True
if self.findsignal(i+"-pwm-pulse"): pwm = True
if self.findsignal(i+"-tppwm-a"): tppwm = pwm = True
#print "signal sanity check: axis",i,"\n pwm = ",pwm,"\n 3pwm =",tppwm,"\n encoder =",enc,"\n step=",step
if i == 's':
if step and pwm:
warnings.append(_("You can not have both steppers and pwm signals for spindle control\n") )
do_error = True
continue
if not step and not pwm:
warnings.append(_("You forgot to designate a stepper or pwm signal for axis %s\n")% i)
do_error = True
if pwm and not (enc or resolver):
warnings.append(_("You forgot to designate an encoder /resolver signal for axis %s servo\n")% i)
do_error = True
if enc and not pwm and not step:
warnings.append(_("You forgot to designate a pwm signal or stepper signal for axis %s\n")% i)
do_error = True
if step and pwm:
warnings.append(_("You can not have both steppers and pwm signals for axis %s\n")% i)
do_error = True
if step2 and not step:
warnings.append(_("If using a tandem axis stepper, you must select a master stepgen for axis %s\n")% i)
do_error = True
if self.d.frontend == _PD._TOUCHY:# TOUCHY GUI
abort = self.findsignal("abort")
cycle = self.findsignal("cycle-start")
single = self.findsignal("single-step")
mpg = self.findsignal("select-mpg-a")
if not cycle:
warnings.append(_("Touchy require an external cycle start signal\n"))
do_warning = True
if not abort:
warnings.append(_("Touchy require an external abort signal\n"))
do_warning = True
if not single:
warnings.append(_("Touchy require an external single-step signal\n"))
do_warning = True
if not mpg:
warnings.append(_("Touchy require an external multi handwheel MPG encoder signal on the mesa page\n"))
do_warning = True
if not self.d.externalmpg:
warnings.append(_("Touchy require 'external mpg jogging' to be selected on the external control page\n"))
do_warning = True
if self.d.multimpg:
warnings.append(_("Touchy require the external mpg to be in 'shared mpg' mode on the external controls page\n"))
do_warning = True
if self.d.incrselect:
warnings.append(_("Touchy require selectable increments to be unchecked on the external controls page\n"))
do_warning = True
if do_warning or do_error:
self.warning_dialog("\n".join(warnings),True)
if do_error: return True
return False
def daughter_board_sanity_check(self,widget):
warnings = []
do_warning = False
for boardnum in range(0,int(self.d.number_mesa)):
if widget == self.widgets["mesa%d_7i29_sanity_check"%boardnum]:
warnings.append(_("The 7i29 daughter board requires PWM type generators and a PWM base frequency of 20 khz\n"))
do_warning = True
if widget == self.widgets["mesa%d_7i30_sanity_check"%boardnum]:
warnings.append(_("The 7i30 daughter board requires PWM type generators and a PWM base frequency of 20 khz\n"))
do_warning = True
if widget == self.widgets["mesa%d_7i33_sanity_check"%boardnum]:
warnings.append(_("The 7i33 daughter board requires PDM type generators and a PDM base frequency of 6 Mhz\n"))
do_warning = True
if widget == self.widgets["mesa%d_7i40_sanity_check"%boardnum]:
warnings.append(_("The 7i40 daughter board requires PWM type generators and a PWM base frequency of 50 khz\n"))
do_warning = True
if widget == self.widgets["mesa%d_7i48_sanity_check"%boardnum]:
warnings.append(_("The 7i48 daughter board requires UDM type generators and a PWM base frequency of 24 khz\n"))
do_warning = True
if do_warning:
self.warning_dialog("\n".join(warnings),True)
def axis_prepare(self, axis):
d = self.d
w = self.widgets
def set_text_from_text(n): w[axis + n].set_text("%s" % d[axis + n])
def set_text(n): w[axis + n].set_text(locale.format("%.4f", (d[axis + n])))
def set_value(n): w[axis + n].set_value(d[axis + n])
def set_active(n): w[axis + n].set_active(d[axis + n])
stepdriven = encoder = pwmgen = resolver = tppwm = digital_at_speed = amp_8i20 = False
spindlepot = sserial_scaling = False
vfd_spindle = self.d.serial_vfd and (self.d.mitsub_vfd or self.d.gs2_vfd)
if self.findsignal("%s-8i20"% axis):amp_8i20 = True
if self.findsignal("spindle-at-speed"): digital_at_speed = True
if self.findsignal(axis+"-stepgen-step"): stepdriven = True
if self.findsignal(axis+"-encoder-a"): encoder = True
if self.findsignal(axis+"-resolver"): encoder = resolver = True
temp = self.findsignal(axis+"-pwm-pulse")
if temp:
pwmgen = True
pinname = self.make_pinname(temp)
if "analog" in pinname: sserial_scaling = True
if self.findsignal(axis+"-tppwm-a"): pwmgen = tppwm = True
if self.findsignal(axis+"-pot-output"): spindlepot = sserial_scaling = True
model = w[axis+"drivertype"].get_model()
model.clear()
for i in _PD.alldrivertypes:
model.append((i[1],))
model.append((_("Custom"),))
w["steprev"].set_text("%s" % d[axis+"steprev"])
w["microstep"].set_text("%s" % d[axis +"microstep"])
# P setting needs to default to different values based on
# stepper vrs servo configs. But we still want to allow user setting it.
# If the value is None then we should set a default value, if not then
# that means it's been set to something already...hopefully right.
# TODO this should be smarter - after going thru a config once it
# always uses the value set here - if it is set to a default value
# if should keep checking that the value is still right.
# but thats a bigger change then we want now.
# We check fo None and 'None' because when None is saved
# it's saved as a string
if not d[axis + "P"] == None and not d[axis + "P"] == 'None':
set_value("P")
elif stepdriven == True:
w[axis + "P"].set_value(1/(d.servoperiod/1000000000))
else:
w[axis + "P"].set_value(50)
set_value("I")
set_value("D")
set_value("FF0")
set_value("FF1")
set_value("FF2")
set_value("bias")
set_value("deadband")
set_value("steptime")
set_value("stepspace")
set_value("dirhold")
set_value("dirsetup")
set_value("outputscale")
set_value("3pwmscale")
set_value("3pwmdeadtime")
set_active("invertmotor")
set_active("invertencoder")
set_value("maxoutput")
if amp_8i20:
w[axis + "bldc_option"].set_active(True)
else:
set_active("bldc_option")
set_active("bldc_no_feedback")
set_active("bldc_absolute_feedback")
set_active("bldc_incremental_feedback")
set_active("bldc_use_hall")
set_active("bldc_use_encoder" )
set_active("bldc_use_index")
set_active("bldc_fanuc_alignment")
set_active("bldc_digital_output")
set_active("bldc_six_outputs")
set_active("bldc_emulated_feedback")
set_active("bldc_output_hall")
set_active("bldc_output_fanuc")
set_active("bldc_force_trapz")
set_active("bldc_reverse")
set_value("bldc_scale")
set_value("bldc_poles")
set_value("bldc_lead_angle")
set_value("bldc_inital_value")
set_value("bldc_encoder_offset")
set_value("bldc_drive_offset")
set_value("bldc_pattern_out")
set_value("bldc_pattern_in")
set_value("8i20maxcurrent")
w["encoderline"].set_value((d[axis+"encodercounts"]/4))
set_value("stepscale")
set_value("encoderscale")
w[axis+"maxvel"].set_value(d[axis+"maxvel"]*60)
set_value("maxacc")
if not axis == "s" or axis == "s" and (encoder and (pwmgen or tppwm or stepdriven or sserial_scaling)):
w[axis + "servo_info"].show()
else:
w[axis + "servo_info"].hide()
if stepdriven or not (pwmgen or spindlepot):
w[axis + "output_info"].hide()
else:
w[axis + "output_info"].show()
w[axis + "invertencoder"].set_sensitive(encoder)
w[axis + "encoderscale"].set_sensitive(encoder)
w[axis + "stepscale"].set_sensitive(stepdriven)
if stepdriven:
w[axis + "stepper_info"].show()
else:
w[axis + "stepper_info"].hide()
if pwmgen or sserial_scaling:
w[axis + "outputscale"].show()
w[axis + "outputscalelabel"].show()
else:
w[axis + "outputscale"].hide()
w[axis + "outputscalelabel"].hide()
if amp_8i20 or pwmgen and d.advanced_option == True:
w[axis + "bldcframe"].show()
else: w[axis + "bldcframe"].hide()
if tppwm:
w[axis + "3pwmdeadtime"].show()
w[axis + "3pwmscale"].show()
w[axis + "3pwmdeadtimelabel"].show()
w[axis + "3pwmscalelabel"].show()
else:
w[axis + "3pwmdeadtime"].hide()
w[axis + "3pwmscale"].hide()
w[axis + "3pwmdeadtimelabel"].hide()
w[axis + "3pwmscalelabel"].hide()
w[axis + "drivertype"].set_active(self.drivertype_toindex(axis))
if w[axis + "drivertype"].get_active_text() == _("Custom"):
w[axis + "steptime"].set_value(d[axis + "steptime"])
w[axis + "stepspace"].set_value(d[axis + "stepspace"])
w[axis + "dirhold"].set_value(d[axis + "dirhold"])
w[axis + "dirsetup"].set_value(d[axis + "dirsetup"])
gobject.idle_add(lambda: self.motor_encoder_sanity_check(None,axis))
if axis == "s":
unit = "rev"
pitchunit =_("Gearbox Reduction Ratio")
elif axis == "a":
unit = "degree"
pitchunit = _("Reduction Ratio")
elif d.units ==_PD._METRIC:
unit = "mm"
pitchunit =_("Leadscrew Pitch")
else:
unit = "inch"
pitchunit =_("Leadscrew TPI")
if axis == "s" or axis =="a":
w["labelmotor_pitch"].set_text(pitchunit)
w["labelencoder_pitch"].set_text(pitchunit)
w["motor_screwunits"].set_text(_("("+unit+" / rev)"))
w["encoder_screwunits"].set_text(_("("+unit+" / rev)"))
w[axis + "velunits"].set_text(_(unit+" / min"))
w[axis + "accunits"].set_text(_(unit+" / sec²"))
w["accdistunits"].set_text(unit)
if stepdriven:
w[ "resolutionunits1"].set_text(_(unit+" / Step"))
w["scaleunits"].set_text(_("Steps / "+unit))
else:
w["resolutionunits1"].set_text(_(unit+" / encoder pulse"))
w["scaleunits"].set_text(_("Encoder pulses / "+unit))
if not axis =="s":
w[axis + "homevelunits"].set_text(_(unit+" / min"))
w[axis + "homelatchvelunits"].set_text(_(unit+" / min"))
w[axis + "homefinalvelunits"].set_text(_(unit+" / min"))
w[axis + "minfollowunits"].set_text(unit)
w[axis + "maxfollowunits"].set_text(unit)
if resolver:
w[axis + "encoderscale_label"].set_text(_("Resolver Scale:"))
if axis == 's':
if vfd_spindle:
w.serial_vfd_info.show()
else:
w.serial_vfd_info.hide()
set_value("outputscale2")
w.ssingleinputencoder.set_sensitive(encoder)
w["sinvertencoder"].set_sensitive(encoder)
w["ssingleinputencoder"].show()
w["saxistest"].set_sensitive(pwmgen or spindlepot)
w["sstepper_info"].set_sensitive(stepdriven)
w["smaxvel"].set_sensitive(stepdriven)
w["smaxacc"].set_sensitive(stepdriven)
w["suseatspeed"].set_sensitive(not digital_at_speed and encoder)
if encoder or resolver:
if (self.d.pyvcp and self.d.pyvcphaltype == 1 and self.d.pyvcpconnect == 1) or (self.d.gladevcp
and self.d.spindlespeedbar):
w["sfiltergain"].set_sensitive(True)
set_active("useatspeed")
w.snearrange_button.set_active(d.susenearrange)
w["snearscale"].set_value(d["snearscale"]*100)
w["snearrange"].set_value(d["snearrange"])
set_value("filtergain")
set_active("singleinputencoder")
set_value("outputmaxvoltage")
set_active("usenegativevoltage")
set_active("useoutputrange2")
self.useoutputrange2_toggled()
else:
if sserial_scaling:
w[axis + "outputminlimit"].show()
w[axis + "outputminlimitlabel"].show()
w[axis + "outputmaxlimit"].show()
w[axis + "outputmaxlimitlabel"].show()
else:
w[axis + "outputminlimit"].hide()
w[axis + "outputminlimitlabel"].hide()
w[axis + "outputmaxlimit"].hide()
w[axis + "outputmaxlimitlabel"].hide()
set_value("outputminlimit")
set_value("outputmaxlimit")
set_text("encodercounts")
w[axis+"maxferror"].set_sensitive(True)
w[axis+"minferror"].set_sensitive(True)
set_value("maxferror")
set_value("minferror")
set_text_from_text("compfilename")
set_active("comptype")
set_active("usebacklash")
set_value("backlash")
set_active("usecomp")
set_text("homepos")
set_text("minlim")
set_text("maxlim")
set_text("homesw")
w[axis+"homesearchvel"].set_text("%d" % (d[axis+"homesearchvel"]*60))
w[axis+"homelatchvel"].set_text("%d" % (d[axis+"homelatchvel"]*60))
w[axis+"homefinalvel"].set_text("%d" % (d[axis+"homefinalvel"]*60))
w[axis+"homesequence"].set_text("%d" % abs(d[axis+"homesequence"]))
set_active("searchdir")
set_active("latchdir")
set_active("usehomeindex")
thisaxishome = set(("all-home", "home-" + axis, "min-home-" + axis,"max-home-" + axis, "both-home-" + axis))
homes = False
for i in thisaxishome:
test = self.findsignal(i)
if test: homes = True
w[axis + "homesw"].set_sensitive(homes)
w[axis + "homesearchvel"].set_sensitive(homes)
w[axis + "searchdir"].set_sensitive(homes)
w[axis + "latchdir"].set_sensitive(homes)
w[axis + "usehomeindex"].set_sensitive(encoder and homes)
w[axis + "homefinalvel"].set_sensitive(homes)
w[axis + "homelatchvel"].set_sensitive(homes)
i = d[axis + "usecomp"]
w[axis + "comptype"].set_sensitive(i)
w[axis + "compfilename"].set_sensitive(i)
i = d[axis + "usebacklash"]
w[axis + "backlash"].set_sensitive(i)
self.p.set_buttons_sensitive(1,0)
self.motor_encoder_sanity_check(None,axis)
def driver_changed(self, axis):
d = self.d
w = self.widgets
v = w[axis + "drivertype"].get_active()
if v < len(_PD.alldrivertypes):
d = _PD.alldrivertypes[v]
w[axis + "steptime"].set_value(d[2])
w[axis + "stepspace"].set_value(d[3])
w[axis + "dirhold"].set_value(d[4])
w[axis + "dirsetup"].set_value(d[5])
w[axis + "steptime"].set_sensitive(0)
w[axis + "stepspace"].set_sensitive(0)
w[axis + "dirhold"].set_sensitive(0)
w[axis + "dirsetup"].set_sensitive(0)
else:
w[axis + "steptime"].set_sensitive(1)
w[axis + "stepspace"].set_sensitive(1)
w[axis + "dirhold"].set_sensitive(1)
w[axis + "dirsetup"].set_sensitive(1)
def drivertype_toindex(self, axis, what=None):
if what is None: what = self.d[axis + "drivertype"]
for i, d in enumerate(_PD.alldrivertypes):
if d[0] == what: return i
return len(_PD.alldrivertypes)
def drivertype_toid(self, axis, what=None):
if not isinstance(what, int): what = self.drivertype_toindex(axis, what)
if what < len(_PD.alldrivertypes): return _PD.alldrivertypes[what][0]
return "custom"
def drivertype_fromindex(self, axis):
i = self.widgets[axis + "drivertype"].get_active()
if i < len(_PD.alldrivertypes): return _PD.alldrivertypes[i][1]
return _("Custom")
def comp_toggle(self, axis):
i = self.widgets[axis + "usecomp"].get_active()
self.widgets[axis + "compfilename"].set_sensitive(i)
self.widgets[axis + "comptype"].set_sensitive(i)
if i:
self.widgets[axis + "backlash"].set_sensitive(0)
self.widgets[axis + "usebacklash"].set_active(0)
def bldc_toggled(self, axis):
i = self.widgets[axis + "bldc_option"].get_active()
self.widgets[axis + "bldcoptionbox"].set_sensitive(i)
def useatspeed_toggled(self):
i = self.widgets.suseatspeed.get_active()
self.widgets.snearscale.set_sensitive(self.widgets.snearscale_button.get_active() and i)
self.widgets.snearrange.set_sensitive(self.widgets.snearrange_button.get_active() and i)
def useoutputrange2_toggled(self):
i = self.widgets.suseoutputrange2.get_active()
self.widgets.soutputscale2.set_sensitive(i)
def bldc_update(self,Widgets,axis):
w = self.widgets
i = False
if w[axis+"bldc_incremental_feedback"].get_active():
i = True
w[axis+"bldc_pattern_in"].set_sensitive(i and w[axis+"bldc_use_hall"].get_active() )
w[axis+"bldc_inital_value"].set_sensitive(i and w[axis+"bldc_use_encoder"].get_active() and not w[axis+"bldc_use_hall"].get_active() )
w[axis+"bldc_use_hall"].set_sensitive(i)
w[axis+"bldc_use_encoder"].set_sensitive(i)
w[axis+"bldc_use_index"].set_sensitive(i)
w[axis+"bldc_fanuc_alignment"].set_sensitive(i)
i = False
if w[axis+"bldc_emulated_feedback"].get_active():
i = True
w[axis+"bldc_output_hall"].set_sensitive(i)
w[axis+"bldc_output_fanuc"].set_sensitive(i)
w[axis+"bldc_pattern_out"].set_sensitive(i and w[axis+"bldc_output_hall"].get_active() )
def backlash_toggle(self, axis):
i = self.widgets[axis + "usebacklash"].get_active()
self.widgets[axis + "backlash"].set_sensitive(i)
if i:
self.widgets[axis + "compfilename"].set_sensitive(0)
self.widgets[axis + "comptype"].set_sensitive(0)
self.widgets[axis + "usecomp"].set_active(0)
def axis_done(self, axis):
d = self.d
w = self.widgets
def get_text(n): d[axis + n] = get_value(w[axis + n])
def get_pagevalue(n): d[axis + n] = get_value(w[axis + n])
def get_active(n): d[axis + n] = w[axis + n].get_active()
stepdrive = self.findsignal(axis+"-stepgen-step")
encoder = self.findsignal(axis+"-encoder-a")
resolver = self.findsignal(axis+"-resolver")
get_pagevalue("P")
get_pagevalue("I")
get_pagevalue("D")
get_pagevalue("FF0")
get_pagevalue("FF1")
get_pagevalue("FF2")
get_pagevalue("bias")
get_pagevalue("deadband")
if stepdrive:
d[axis + "maxoutput"] = (get_value(w[axis + "maxvel"])/60) *1.25 # TODO should be X2 if using backlash comp ?
if axis == "s":
d[axis + "maxoutput"] = (get_value(w[axis +"outputscale"]))
else:
get_pagevalue("maxoutput")
get_pagevalue("steptime")
get_pagevalue("stepspace")
get_pagevalue("dirhold")
get_pagevalue("dirsetup")
get_pagevalue("outputscale")
get_pagevalue("3pwmscale")
get_pagevalue("3pwmdeadtime")
get_active("bldc_option")
get_active("bldc_reverse")
get_pagevalue("bldc_scale")
get_pagevalue("bldc_poles")
get_pagevalue("bldc_encoder_offset")
get_pagevalue("bldc_drive_offset")
get_pagevalue("bldc_pattern_out")
get_pagevalue("bldc_pattern_in")
get_pagevalue("bldc_lead_angle")
get_pagevalue("bldc_inital_value")
get_pagevalue("8i20maxcurrent")
get_active("bldc_no_feedback")
get_active("bldc_absolute_feedback")
get_active("bldc_incremental_feedback")
get_active("bldc_use_hall")
get_active("bldc_use_encoder" )
get_active("bldc_use_index")
get_active("bldc_fanuc_alignment")
get_active("bldc_digital_output")
get_active("bldc_six_outputs")
get_active("bldc_emulated_feedback")
get_active("bldc_output_hall")
get_active("bldc_output_fanuc")
get_active("bldc_force_trapz")
if w[axis + "bldc_option"].get_active():
self.configure_bldc(axis)
d[axis + "encodercounts"] = int(float(w["encoderline"].get_text())*4)
if stepdrive: get_pagevalue("stepscale")
if encoder: get_pagevalue("encoderscale")
if resolver: get_pagevalue("encoderscale")
get_active("invertmotor")
get_active("invertencoder")
d[axis + "maxvel"] = (get_value(w[axis + "maxvel"])/60)
get_pagevalue("maxacc")
d[axis + "drivertype"] = self.drivertype_toid(axis, w[axis + "drivertype"].get_active())
if not axis == "s":
get_pagevalue("outputminlimit")
get_pagevalue("outputmaxlimit")
get_pagevalue("maxferror")
get_pagevalue("minferror")
get_text("homepos")
get_text("minlim")
get_text("maxlim")
get_text("homesw")
d[axis + "homesearchvel"] = (get_value(w[axis + "homesearchvel"])/60)
d[axis + "homelatchvel"] = (get_value(w[axis + "homelatchvel"])/60)
d[axis + "homefinalvel"] = (get_value(w[axis + "homefinalvel"])/60)
d[axis+"homesequence"] = (abs(get_value(w[axis+"homesequence"])))
get_active("searchdir")
get_active("latchdir")
get_active("usehomeindex")
d[axis + "compfilename"] = w[axis + "compfilename"].get_text()
get_active("comptype")
d[axis + "backlash"]= w[axis + "backlash"].get_value()
get_active("usecomp")
get_active("usebacklash")
else:
get_active("useatspeed")
d.susenearrange = w.snearrange_button.get_active()
get_pagevalue("nearscale")
d["snearscale"] = w["snearscale"].get_value()/100
d["snearrange"] = w["snearrange"].get_value()
get_pagevalue("filtergain")
get_active("singleinputencoder")
get_pagevalue("outputscale2")
self.d.gsincrvalue0 = self.d.soutputscale
self.d.gsincrvalue1 = self.d.soutputscale2
get_active("useoutputrange2")
self.d.scaleselect = self.d.suseoutputrange2
get_active("usenegativevoltage")
get_pagevalue("outputmaxvoltage")
def configure_bldc(self,axis):
d = self.d
string = ""
# Inputs
if d[axis + "bldc_no_feedback"]: string = string + "n"
elif d[axis +"bldc_absolute_feedback"]: string = string + "a"
elif d[axis + "bldc_incremental_feedback"]:
if d[axis + "bldc_use_hall"]: string = string + "h"
if d[axis + "bldc_use_encoder" ]: string = string + "q"
if d[axis + "bldc_use_index"]: string = string + "i"
if d[axis + "bldc_fanuc_alignment"]: string = string + "f"
# Outputs
if d[axis + "bldc_digital_output"]: string = string + "B"
if d[axis + "bldc_six_outputs"]: string = string + "6"
if d[axis + "bldc_emulated_feedback"]:
if d[axis + "bldc_output_hall"]: string = string + "H"
if d[axis + "bldc_output_fanuc"]: string = string +"F"
if d[axis + "bldc_force_trapz"]: string = string + "T"
#print "axis ",axis,"bldc config ",string
d[axis+"bldc_config"] = string
def calculate_spindle_scale(self):
def get(n): return get_value(self.widgets[n])
stepdrive = bool(self.findsignal("s-stepgen-step"))
encoder = bool(self.findsignal("s-encoder-a"))
resolver = bool(self.findsignal("s-resolver"))
twoscales = self.widgets.suseoutputrange2.get_active()
data_list=[ "steprev","microstep","motor_pulleydriver","motor_pulleydriven","motor_gear1driver","motor_gear1driven",
"motor_gear2driver","motor_gear2driven","motor_max"]
templist1 = ["encoderline","steprev","microstep","motor_gear1driven","motor_gear1driver","motor_gear2driven","motor_gear2driver",
"motor_pulleydriven","motor_pulleydriver","motor_max"]
checkbutton_list = ["cbmicrosteps","cbmotor_gear1","cbmotor_gear2","cbmotor_pulley","rbvoltage_5"
]
self.widgets.spindle_cbmicrosteps.set_sensitive(stepdrive)
self.widgets.spindle_microstep.set_sensitive(stepdrive)
self.widgets.spindle_steprev.set_sensitive(stepdrive)
self.widgets.label_steps_per_rev.set_sensitive(stepdrive)
self.widgets.spindle_motor_max.set_sensitive(not stepdrive)
self.widgets.label_motor_at_max_volt.set_sensitive(not stepdrive)
self.widgets.label_volt_at_max_rpm.set_sensitive(not stepdrive)
self.widgets.spindle_rbvoltage_10.set_sensitive(not stepdrive)
self.widgets.spindle_rbvoltage_5.set_sensitive(not stepdrive)
self.widgets.spindle_cbnegative_rot.set_sensitive(not stepdrive)
# pre set data
for i in data_list:
self.widgets['spindle_'+i].set_value(self.d['s'+i])
for i in checkbutton_list:
self.widgets['spindle_'+i].set_active(self.d['s'+i])
self.widgets.spindle_encoderline.set_value(self.widgets.sencoderscale.get_value()/4)
self.widgets.spindle_cbmotor_gear2.set_active(twoscales)
self.widgets.spindle_cbnegative_rot.set_active(self.widgets.susenegativevoltage.get_active())
# temparally add signals
for i in templist1:
self.d[i] = self.widgets['spindle_'+i].connect("value-changed", self.update_spindle_calculation)
for i in checkbutton_list:
self.d[i] = self.widgets['spindle_'+i].connect("toggled", self.update_spindle_calculation)
self.update_spindle_calculation(None)
# run dialog
self.widgets.spindle_scaledialog.set_title(_("Spindle Scale Calculation"))
self.widgets.spindle_scaledialog.show_all()
result = self.widgets.spindle_scaledialog.run()
self.widgets.spindle_scaledialog.hide()
# remove signals
for i in templist1:
self.widgets['spindle_'+i].disconnect(self.d[i])
for i in checkbutton_list:
self.widgets['spindle_'+i].disconnect(self.d[i])
if not result: return
# record data values
for i in data_list:
self.d['s'+i] = get('spindle_'+i)
for i in checkbutton_list:
self.d['s'+i] = self.widgets['spindle_'+i].get_active()
# set the widgets on the spindle page as per calculations
self.widgets.susenegativevoltage.set_active(self.widgets.spindle_cbnegative_rot.get_active())
if self.widgets.spindle_rbvoltage_5.get_active():
self.widgets.soutputmaxvoltage.set_value(5)
else:
self.widgets.soutputmaxvoltage.set_value(10)
self.widgets.soutputscale.set_value(self.temp_max_motor_speed1)
self.widgets.soutputscale2.set_value(self.temp_max_motor_speed2)
self.widgets.smaxoutput.set_value(self.temp_max_motor_speed1)
self.widgets.sencoderscale.set_value(self.widgets.spindle_encoderline.get_value()*4)
self.widgets.suseoutputrange2.set_active(self.widgets.spindle_cbmotor_gear2.get_active())
if stepdrive:
motor_steps = get_value(self.widgets.spindle_steprev)
if self.widgets.spindle_cbmicrosteps.get_active():
microstepfactor = get_value(self.widgets.spindle_microstep)
else:
microstepfactor = 1
self.widgets.sstepscale.set_value(motor_steps * microstepfactor)
if encoder or resolver:
self.widgets.sencoderscale.set_value(get("spindle_encoderline")*4)
def update_spindle_calculation(self,widget):
w= self.widgets
def get(n): return get_value(w[n])
motor_pulley_ratio = gear1_ratio = gear2_ratio = 1
motor_rpm = get("spindle_motor_max")
volts_at_max_rpm = 5
if self.widgets.spindle_rbvoltage_10.get_active():
volts_at_max_rpm = 10
if w["spindle_cbmotor_pulley"].get_active():
w["spindle_motor_pulleydriver"].set_sensitive(True)
w["spindle_motor_pulleydriven"].set_sensitive(True)
motor_pulley_ratio = (get("spindle_motor_pulleydriver") / get("spindle_motor_pulleydriven"))
else:
w["spindle_motor_pulleydriver"].set_sensitive(False)
w["spindle_motor_pulleydriven"].set_sensitive(False)
motor_pulley_ratio = 1
if w["spindle_cbmotor_gear1"].get_active():
w["spindle_motor_gear1driver"].set_sensitive(True)
w["spindle_motor_gear1driven"].set_sensitive(True)
gear1_ratio = (get("spindle_motor_gear1driver") / get("spindle_motor_gear1driven"))
else:
w["spindle_motor_gear1driver"].set_sensitive(False)
w["spindle_motor_gear1driven"].set_sensitive(False)
gear1_ratio = 1
i = w["spindle_cbmotor_gear2"].get_active()
w["spindle_motor_gear2driver"].set_sensitive(i)
w["spindle_motor_gear2driven"].set_sensitive(i)
w["label_rpm_at_max_motor2"].set_sensitive(i)
w["label_gear2_max_speed"].set_sensitive(i)
if i:
gear2_ratio = (get("spindle_motor_gear2driver") / get("spindle_motor_gear2driven"))
else:
gear2_ratio = 1
w["spindle_microstep"].set_sensitive(w["spindle_cbmicrosteps"].get_active())
self.temp_max_motor_speed1 = (motor_pulley_ratio * gear1_ratio * motor_rpm)
self.temp_max_motor_speed2 = (motor_pulley_ratio * gear2_ratio * motor_rpm)
w["label_motor_at_max_volt"].set_markup(" <b>MOTOR</b> RPM at %d Volt Command"% volts_at_max_rpm)
w["label_volt_at_max_rpm"].set_text(" Voltage for %d Motor RPM:"% motor_rpm)
w["label_rpm_at_max_motor1"].set_text("Spindle RPM at %d Motor RPM -gear 1:"% motor_rpm)
w["label_rpm_at_max_motor2"].set_text("Spindle RPM at %d Motor RPM -gear 2:"% motor_rpm)
w["label_gear1_max_speed"].set_text("%d" % (motor_pulley_ratio * gear1_ratio * motor_rpm))
w["label_gear2_max_speed"].set_text("%d" % (motor_pulley_ratio * gear2_ratio * motor_rpm))
def calculate_scale(self,axis):
def get(n): return get_value(self.widgets[n])
stepdrive = self.findsignal(axis+"-stepgen-step")
encoder = self.findsignal(axis+"-encoder-a")
resolver = self.findsignal(axis+"-resolver")
data_list=[ "steprev","microstep","motor_pulleydriver","motor_pulleydriven","motor_wormdriver","motor_wormdriven",
"encoder_pulleydriver","encoder_pulleydriven","encoder_wormdriver","encoder_wormdriven","motor_leadscrew",
"encoder_leadscrew","motor_leadscrew_tpi","encoder_leadscrew_tpi",
]
templist1 = ["encoderline","encoder_leadscrew","encoder_leadscrew_tpi","encoder_wormdriven",
"encoder_wormdriver","encoder_pulleydriven","encoder_pulleydriver","steprev","motor_leadscrew","motor_leadscrew_tpi",
"microstep","motor_wormdriven","motor_wormdriver","motor_pulleydriven","motor_pulleydriver"
]
checkbutton_list = [ "cbencoder_pitch","cbencoder_tpi","cbencoder_worm","cbencoder_pulley","cbmotor_pitch",
"cbmotor_tpi","cbmicrosteps","cbmotor_worm","cbmotor_pulley"
]
# pre set data
for i in data_list:
self.widgets[i].set_value(self.d[axis+i])
for i in checkbutton_list:
self.widgets[i].set_active(self.d[axis+i])
# temparally add signals
for i in templist1:
self.d[i] = self.widgets[i].connect("value-changed", self.update_scale_calculation,axis)
for i in checkbutton_list:
self.d[i] = self.widgets[i].connect("toggled", self.update_scale_calculation,axis)
# pre calculate
self.update_scale_calculation(self.widgets,axis)
# run dialog
self.widgets.scaledialog.set_title(_("Axis Scale Calculation"))
self.widgets.scaledialog.show_all()
result = self.widgets.scaledialog.run()
self.widgets.scaledialog.hide()
# remove signals
for i in templist1:
self.widgets[i].disconnect(self.d[i])
for i in checkbutton_list:
self.widgets[i].disconnect(self.d[i])
if not result: return
# record data values
for i in data_list:
self.d[axis+i] = self.widgets[i].get_value()
for i in checkbutton_list:
self.d[axis+i] = self.widgets[i].get_active()
# set the calculations result
if encoder or resolver:
self.widgets[axis+"encoderscale"].set_value(get("calcencoder_scale"))
if stepdrive:
self.widgets[axis+"stepscale"].set_value(get("calcmotor_scale"))
def update_scale_calculation(self,widget,axis):
w = self.widgets
d = self.d
def get(n): return get_value(w[n])
stepdrive = self.findsignal(axis+"-stepgen-step")
encoder = self.findsignal(axis+"-encoder-a")
resolver = self.findsignal(axis+"-resolver")
motor_pulley_ratio = encoder_pulley_ratio = 1
motor_worm_ratio = encoder_worm_ratio = 1
encoder_scale = motor_scale = 0
microstepfactor = motor_pitch = encoder_pitch = motor_steps = 1
if axis == "a": rotary_scale = 360
else: rotary_scale = 1
try:
if stepdrive:
# stepmotor scale
w["calcmotor_scale"].set_sensitive(True)
w["stepscaleframe"].set_sensitive(True)
if w["cbmotor_pulley"].get_active():
w["motor_pulleydriver"].set_sensitive(True)
w["motor_pulleydriven"].set_sensitive(True)
motor_pulley_ratio = (get("motor_pulleydriven") / get("motor_pulleydriver"))
else:
w["motor_pulleydriver"].set_sensitive(False)
w["motor_pulleydriven"].set_sensitive(False)
if w["cbmotor_worm"].get_active():
w["motor_wormdriver"].set_sensitive(True)
w["motor_wormdriven"].set_sensitive(True)
motor_worm_ratio = (get("motor_wormdriver") / get("motor_wormdriven"))
else:
w["motor_wormdriver"].set_sensitive(False)
w["motor_wormdriven"].set_sensitive(False)
if w["cbmicrosteps"].get_active():
w["microstep"].set_sensitive(True)
microstepfactor = get("microstep")
else:
w["microstep"].set_sensitive(False)
if w["cbmotor_pitch"].get_active():
w["motor_leadscrew"].set_sensitive(True)
w["cbmotor_tpi"].set_active(False)
if self.d.units == _PD._METRIC:
motor_pitch = 1./ get("motor_leadscrew")
else:
motor_pitch = 1./ (get("motor_leadscrew")* .03937008)
else: w["motor_leadscrew"].set_sensitive(False)
if w["cbmotor_tpi"].get_active():
w["motor_leadscrew_tpi"].set_sensitive(True)
w["cbmotor_pitch"].set_active(False)
if self.d.units == _PD._METRIC:
motor_pitch = (get("motor_leadscrew_tpi")* .03937008)
else:
motor_pitch = get("motor_leadscrew_tpi")
else: w["motor_leadscrew_tpi"].set_sensitive(False)
motor_steps = get("steprev")
motor_scale = (motor_steps * microstepfactor * motor_pulley_ratio * motor_worm_ratio * motor_pitch) / rotary_scale
w["calcmotor_scale"].set_text(locale.format("%.4f", (motor_scale)))
else:
w["calcmotor_scale"].set_sensitive(False)
w["stepscaleframe"].set_sensitive(False)
# encoder scale
if encoder or resolver:
w["calcencoder_scale"].set_sensitive(True)
w["encoderscaleframe"].set_sensitive(True)
if w["cbencoder_pulley"].get_active():
w["encoder_pulleydriver"].set_sensitive(True)
w["encoder_pulleydriven"].set_sensitive(True)
encoder_pulley_ratio = (get("encoder_pulleydriven") / get("encoder_pulleydriver"))
else:
w["encoder_pulleydriver"].set_sensitive(False)
w["encoder_pulleydriven"].set_sensitive(False)
if w["cbencoder_worm"].get_active():
w["encoder_wormdriver"].set_sensitive(True)
w["encoder_wormdriven"].set_sensitive(True)
encoder_worm_ratio = (get("encoder_wormdriver") / get("encoder_wormdriven"))
else:
w["encoder_wormdriver"].set_sensitive(False)
w["encoder_wormdriven"].set_sensitive(False)
if w["cbencoder_pitch"].get_active():
w["encoder_leadscrew"].set_sensitive(True)
w["cbencoder_tpi"].set_active(False)
if self.d.units == _PD._METRIC:
encoder_pitch = 1./ get("encoder_leadscrew")
else:
encoder_pitch = 1./ (get("encoder_leadscrew")*.03937008)
else: w["encoder_leadscrew"].set_sensitive(False)
if w["cbencoder_tpi"].get_active():
w["encoder_leadscrew_tpi"].set_sensitive(True)
w["cbencoder_pitch"].set_active(False)
if self.d.units == _PD._METRIC:
encoder_pitch = (get("encoder_leadscrew_tpi")*.03937008)
else:
encoder_pitch = get("encoder_leadscrew_tpi")
else: w["encoder_leadscrew_tpi"].set_sensitive(False)
encoder_cpr = get_value(w[("encoderline")]) * 4
encoder_scale = (encoder_pulley_ratio * encoder_worm_ratio * encoder_pitch * encoder_cpr) / rotary_scale
w["calcencoder_scale"].set_text(locale.format("%.4f", (encoder_scale)))
else:
w["calcencoder_scale"].set_sensitive(False)
w["encoderscaleframe"].set_sensitive(False)
#new stuff
if stepdrive: scale = motor_scale
else: scale = encoder_scale
maxvps = (get_value(w[axis+"maxvel"]))/60
pps = (scale * (maxvps))/1000
if pps == 0: raise ValueError
pps = abs(pps)
w["khz"].set_text("%.1f" % pps)
acctime = (maxvps) / get_value(w[axis+"maxacc"])
accdist = acctime * .5 * (maxvps)
if encoder or resolver:
maxrpm = int(maxvps * 60 * (scale/encoder_cpr))
else:
maxrpm = int(maxvps * 60 * (scale/(microstepfactor * motor_steps)))
w["acctime"].set_text("%.4f" % acctime)
w["accdist"].set_text("%.4f" % accdist)
w["chartresolution"].set_text("%.7f" % (1.0 / scale))
w["calscale"].set_text(str(scale))
w["maxrpm"].set_text("%d" % maxrpm)
except (ValueError, ZeroDivisionError):
w["calcmotor_scale"].set_text("200")
w["calcencoder_scale"].set_text("1000")
w["chartresolution"].set_text("")
w["acctime"].set_text("")
if not axis == 's':
w["accdist"].set_text("")
w["khz"].set_text("")
w["calscale"].set_text("")
def motor_encoder_sanity_check(self,widgets,axis):
stepdrive = encoder = bad = resolver = pot = False
if self.findsignal(axis+"-stepgen-step"): stepdrive = True
if self.findsignal(axis+"-encoder-a"): encoder = True
if self.findsignal(axis+"-resolver"): resolver = True
if self.findsignal(axis+"-pot-outpot"): pot = True
if encoder or resolver:
if self.widgets[axis+"encoderscale"].get_value() < 1:
self.widgets[axis+"encoderscale"].modify_bg(gtk.STATE_NORMAL, self.widgets[axis+"encoderscale"].get_colormap().alloc_color("red"))
dbg('encoder resolver scale bad %f'%self.widgets[axis+"encoderscale"].get_value())
bad = True
if stepdrive:
if self.widgets[axis+"stepscale"].get_value() < 1:
self.widgets[axis+"stepscale"].modify_bg(gtk.STATE_NORMAL, self.widgets[axis+"stepscale"].get_colormap().alloc_color("red"))
dbg('step scale bad')
bad = True
if not (encoder or resolver) and not stepdrive and not axis == "s":
dbg('encoder %s resolver %s stepper %s axis %s'%(encoder,resolver,stepdrive,axis))
bad = True
if self.widgets[axis+"maxvel"] < 1:
dbg('max vel low')
bad = True
if self.widgets[axis+"maxacc"] < 1:
dbg('max accl low')
bad = True
if bad:
dbg('motor %s_encoder sanity check -bad'%axis)
self.p.set_buttons_sensitive(1,0)
self.widgets[axis + "axistune"].set_sensitive(0)
self.widgets[axis + "axistest"].set_sensitive(0)
else:
dbg('motor %s_encoder sanity check - good'%axis)
self.widgets[axis+"encoderscale"].modify_bg(gtk.STATE_NORMAL, self.origbg)
self.widgets[axis+"stepscale"].modify_bg(gtk.STATE_NORMAL, self.origbg)
self.p.set_buttons_sensitive(1,1)
self.widgets[axis + "axistune"].set_sensitive(1)
self.widgets[axis + "axistest"].set_sensitive(1)
def update_gladevcp(self):
i = self.widgets.gladevcp.get_active()
self.widgets.gladevcpbox.set_sensitive( i )
if self.d.frontend == _PD._TOUCHY:
self.widgets.centerembededgvcp.set_active(True)
self.widgets.centerembededgvcp.set_sensitive(True)
self.widgets.sideembededgvcp.set_sensitive(False)
self.widgets.standalonegvcp.set_sensitive(False)
elif self.d.frontend == _PD._GMOCCAPY or self.d.frontend == _PD._AXIS:
self.widgets.sideembededgvcp.set_sensitive(True)
self.widgets.centerembededgvcp.set_sensitive(True)
self.widgets.standalonegvcp.set_sensitive(False)
if not self.widgets.centerembededgvcp.get_active() and not self.widgets.sideembededgvcp.get_active():
self.widgets.centerembededgvcp.set_active(True)
else:
self.widgets.sideembededgvcp.set_sensitive(False)
self.widgets.centerembededgvcp.set_sensitive(False)
self.widgets.standalonegvcp.set_sensitive(True)
self.widgets.standalonegvcp.set_active(True)
i = self.widgets.standalonegvcp.get_active()
self.widgets.gladevcpsize.set_sensitive(i)
self.widgets.gladevcpposition.set_sensitive(i)
self.widgets.gladevcpforcemax.set_sensitive(i)
if not i:
self.widgets.gladevcpsize.set_active(False)
self.widgets.gladevcpposition.set_active(False)
self.widgets.gladevcpforcemax.set_active(False)
i = self.widgets.gladevcpsize.get_active()
self.widgets.gladevcpwidth.set_sensitive(i)
self.widgets.gladevcpheight.set_sensitive(i)
i = self.widgets.gladevcpposition.get_active()
self.widgets.gladevcpxpos.set_sensitive(i)
self.widgets.gladevcpypos.set_sensitive(i)
for i in (("zerox","x"),("zeroy","y"),("zeroz","z"),("zeroa","a"),("autotouchz","z")):
if not i[1] in(self.d.available_axes):
self.widgets[i[0]].set_active(False)
self.widgets[i[0]].set_sensitive(False)
else:
self.widgets[i[0]].set_sensitive(True)
def has_spindle_speed_control(self):
for test in ("s-stepgen-step", "s-pwm-pulse", "s-encoder-a", "spindle-enable", "spindle-cw", "spindle-ccw", "spindle-brake",
"s-pot-output"):
has_spindle = self.findsignal(test)
print test,has_spindle
if has_spindle:
return True
if self.d.serial_vfd and (self.d.mitsub_vfd or self.d.gs2_vfd):
return True
return False
def clean_unused_ports(self, *args):
# if parallel ports not used clear all signals
parportnames = ("pp1","pp2","pp3")
for check,connector in enumerate(parportnames):
if self.d.number_pports >= (check+1):continue
# initialize parport input / inv pins
for i in (1,2,3,4,5,6,7,8,10,11,12,13,15):
pinname ="%s_Ipin%d"% (connector,i)
self.d[pinname] = _PD.UNUSED_INPUT
pinname ="%s_Ipin%d_inv"% (connector,i)
self.d[pinname] = False
# initialize parport output / inv pins
for i in (1,2,3,4,5,6,7,8,9,14,16,17):
pinname ="%s_Opin%d"% (connector,i)
self.d[pinname] = _PD.UNUSED_OUTPUT
pinname ="%s_Opin%d_inv"% (connector,i)
self.d[pinname] = False
# clear all unused mesa signals
for boardnum in(0,1):
for connector in(1,2,3,4,5,6,7,8,9):
if self.d.number_mesa >= boardnum + 1 :
if connector in(self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._NUMOFCNCTRS]) :
continue
# This initializes GPIO input pins
for i in range(0,16):
pinname ="mesa%dc%dpin%d"% (boardnum,connector,i)
self.d[pinname] = _PD.UNUSED_INPUT
pinname ="mesa%dc%dpin%dtype"% (boardnum,connector,i)
self.d[pinname] = _PD.GPIOI
# This initializes GPIO output pins
for i in range(16,24):
pinname ="mesa%dc%dpin%d"% (boardnum,connector,i)
self.d[pinname] = _PD.UNUSED_OUTPUT
pinname ="mesa%dc%dpin%dtype"% (boardnum,connector,i)
self.d[pinname] = _PD.GPIOO
# This initializes the mesa inverse pins
for i in range(0,24):
pinname ="mesa%dc%dpin%dinv"% (boardnum,connector,i)
self.d[pinname] = False
# clear unused sserial signals
keeplist =[]
# if the current firmware supports sserial better check for used channels
# and make a 'keeplist'. we don't want to clear them
if self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._MAXSSERIALPORTS]:
#search all pins for sserial port
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._NUMOFCNCTRS]) :
for pin in range (0,24):
firmptype,compnum = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._STARTOFDATA+pin+(concount*24)]
p = 'mesa%dc%dpin%d' % (boardnum, connector, pin)
ptype = 'mesa%dc%dpin%dtype' % (boardnum, connector , pin)
if self.d[ptype] in (_PD.TXDATA0,_PD.TXDATA1,_PD.TXDATA2,_PD.TXDATA3,_PD.TXDATA4,_PD.SS7I76M0,_PD.SS7I76M2,_PD.SS7I76M3,
_PD.SS7I77M0,_PD.SS7I77M1,_PD.SS7I77M3,_PD.SS7I77M4) and not self.d[p] == _PD.UNUSED_SSERIAL:
if self.d[ptype] in (_PD.TXDATA0,_PD.SS7I76M0,_PD.SS7I77M0): channelnum = 0
elif self.d[ptype] in (_PD.TXDATA1,_PD.SS7I77M1): channelnum = 1
elif self.d[ptype] == _PD.TXDATA2: channelnum = 2
elif self.d[ptype] in (_PD.TXDATA3,_PD.SS7I76M3,_PD.SS7I77M3): channelnum = 3
elif self.d[ptype] in (_PD.TXDATA4,_PD.SS7I77M4): channelnum = 4
keeplist.append(channelnum)
#print "board # %d sserial keeplist"%(boardnum),keeplist
# ok clear the sserial pins unless they are in the keeplist
port = 0# TODO hard code at only 1 sserial port
for channel in range(0,_PD._NUM_CHANNELS): #TODO hardcoded at 5 sserial channels instead of 8
if channel in keeplist: continue
# This initializes pins
for i in range(0,self._p._SSCOMBOLEN):
pinname ="mesa%dsserial%d_%dpin%d"% (boardnum, port,channel,i)
if i < 24:
self.d[pinname] = _PD.UNUSED_INPUT
else:
self.d[pinname] = _PD.UNUSED_OUTPUT
pinname ="mesa%dsserial%d_%dpin%dtype"% (boardnum, port,channel,i)
if i < 24:
self.d[pinname] = _PD.GPIOI
else:
self.d[pinname] = _PD.GPIOO
pinname ="mesa%dsserial%d_%dpin%dinv"% (boardnum, port,channel,i)
self.d[pinname] = False
def debug_iter(self,test,testwidget,message=None):
print "#### DEBUG :",message
for i in ("_gpioosignaltree","_gpioisignaltree","_steppersignaltree","_encodersignaltree","_muxencodersignaltree",
"_pwmcontrolsignaltree","_pwmrelatedsignaltree","_tppwmsignaltree",
"_gpioliststore","_encoderliststore","_muxencoderliststore","_pwmliststore","_tppwmliststore"):
modelcheck = self.widgets[testwidget].get_model()
if modelcheck == self.d[i]:print i;break
#********************
# Common Helper functions
#********************
def tandem_check(self, letter):
tandem_stepper = self.make_pinname(self.stepgen_sig("%s2"%letter))
tandem_pwm = self.make_pinname(self.pwmgen_sig("%s2"%letter))
print letter, bool(tandem_stepper or tandem_pwm), tandem_stepper, tandem_pwm
return bool(tandem_stepper or tandem_pwm)
def stepgen_sig(self, axis):
thisaxisstepgen = axis + "-stepgen-step"
test = self.findsignal(thisaxisstepgen)
return test
# find the individual related oins to step gens
# so that we can check if they were inverted
def stepgen_invert_pins(self,pinnumber):
# sample pinname = mesa0c0pin11
signallist_a = []
signallist_b = []
pin = int(pinnumber[10:])
connector = int(pinnumber[6:7])
boardnum = int(pinnumber[4:5])
channel = None
pinlist = self.list_related_pins([_PD.STEPA,_PD.STEPB], boardnum, connector, channel, pin, 0)
#print pinlist
for num,i in enumerate(pinlist):
if self.d[i[0]+"inv"]:
gpioname = self.make_pinname(self.findsignal( self.d[i[0]] ),True)
#print gpioname
if num:
signallist_b.append(gpioname)
else:
signallist_a.append(gpioname)
return [signallist_a, signallist_b]
def spindle_invert_pins(self,pinnumber):
# sample pinname = mesa0sserial0_0pin11
signallist = []
pin = int(pinnumber[18:])
port = int(pinnumber[12:13])
boardnum = int(pinnumber[4:5])
channel = int(pinnumber[14:15])
pinlist = self.list_related_pins([_PD.POTO,_PD.POTE], boardnum, port, channel, pin, 0)
for i in pinlist:
if self.d[i[0]+"inv"]:
name = self.d[i[0]+"type"]
signallist.append(name)
return signallist
def encoder_sig(self, axis):
thisaxisencoder = axis +"-encoder-a"
test = self.findsignal(thisaxisencoder)
return test
def resolver_sig(self, axis):
thisaxisresolver = axis +"-resolver"
test = self.findsignal(thisaxisresolver)
return test
def amp_8i20_sig(self, axis):
thisaxis8i20 = "%s-8i20"% axis
test = self.findsignal(thisaxis8i20)
return test
def potoutput_sig(self,axis):
thisaxispot = "%s-pot-output"% axis
test = self.findsignal(thisaxispot)
return test
def pwmgen_sig(self, axis):
thisaxispwmgen = axis + "-pwm-pulse"
test = self.findsignal( thisaxispwmgen)
return test
def pwmgen_invert_pins(self,pinnumber):
print "list pwm invert pins",pinnumber
# sample pinname = mesa0c0pin11
signallist = []
pin = int(pinnumber[10:])
connector = int(pinnumber[6:7])
boardnum = int(pinnumber[4:5])
channel = None
pinlist = self.list_related_pins([_PD.PWMP, _PD.PWMD, _PD.PWME], boardnum, connector, channel, pin, 0)
print pinlist
for i in pinlist:
if self.d[i[0]+"inv"]:
gpioname = self.make_pinname(self.findsignal( self.d[i[0]] ),True)
print gpioname
signallist.append(gpioname)
return signallist
def tppwmgen_sig(self, axis):
thisaxispwmgen = axis + "-tppwm-a"
test = self.findsignal(thisaxispwmgen)
return test
def tppwmgen_has_6(self, axis):
thisaxispwmgen = axis + "-tppwm-anot"
test = self.findsignal(thisaxispwmgen)
return test
def home_sig(self, axis):
thisaxishome = set(("all-home", "home-" + axis, "min-home-" + axis, "max-home-" + axis, "both-home-" + axis))
for i in thisaxishome:
if self.findsignal(i): return i
return None
def min_lim_sig(self, axis):
thisaxishome = set(("all-limit", "min-" + axis,"min-home-" + axis, "both-" + axis, "both-home-" + axis))
for i in thisaxishome:
if self.findsignal(i): return i
return None
def max_lim_sig(self, axis):
thisaxishome = set(("all-limit", "max-" + axis, "max-home-" + axis, "both-" + axis, "both-home-" + axis))
for i in thisaxishome:
if self.findsignal(i): return i
return None
def get_value(self,w):
return get_value(w)
def show_try_errors(self):
exc_type, exc_value, exc_traceback = sys.exc_info()
formatted_lines = traceback.format_exc().splitlines()
print
print "****Pncconf verbose debugging:",formatted_lines[0]
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print formatted_lines[-1]
def hostmot2_command_string(self, substitution = False):
def make_name(bname,bnum):
if substitution:
return "[HMOT](CARD%d)"% (bnum)
else:
return "hm2_%s.%d"% (bname,bnum)
# mesa stuff
load_cmnds = []
board0 = self.d.mesa0_currentfirmwaredata[_PD._BOARDNAME]
board1 = self.d.mesa1_currentfirmwaredata[_PD._BOARDNAME]
driver0 = ' %s'% self.d.mesa0_currentfirmwaredata[_PD._HALDRIVER]
driver1 = ' %s'% self.d.mesa1_currentfirmwaredata[_PD._HALDRIVER]
directory0 = self.d.mesa0_currentfirmwaredata[_PD._DIRECTORY]
directory1 = self.d.mesa1_currentfirmwaredata[_PD._DIRECTORY]
firm0 = self.d.mesa0_currentfirmwaredata[_PD._FIRMWARE]
firm1 = self.d.mesa1_currentfirmwaredata[_PD._FIRMWARE]
firmstring0 = firmstring1 = board0_ip = board1_ip = ""
mesa0_3pwm = mesa1_3pwm = ''
mesa0_ioaddr = mesa1_ioaddr = ''
load_cmnds.append("loadrt hostmot2")
if '7i43' in board0:
mesa0_ioaddr = ' ioaddr=%s ioaddr_hi=0 epp_wide=1'% self.d.mesa0_parportaddrs
if '7i43' in board1:
mesa1_ioaddr = ' ioaddr=%s ioaddr_hi=0 epp_wide=1'% self.d.mesa1_parportaddrs
if 'eth' in driver0:
firmstring0 =''
if self.d.mesa0_card_addrs:
board0_ip = ''' board_ip="%s"''' % self.d.mesa0_card_addrs
elif not "5i25" in board0:
firmstring0 = "firmware=hm2/%s/%s.BIT" % (directory0, firm0)
if 'eth' in driver1:
firmstring1 =''
if self.d.mesa1_card_addrs:
board1_ip = ''' board_ip="%s"'''% self.d.mesa1_card_addrs
elif not "5i25" in board1:
firmstring1 = "firmware=hm2/%s/%s.BIT" % (directory1, firm1)
# TODO fix this hardcoded hack: only one serialport
ssconfig0 = ssconfig1 = resolver0 = resolver1 = temp = ""
if self.d.mesa0_numof_sserialports:
for i in range(1,_PD._NUM_CHANNELS+1):
if i <= self.d.mesa0_numof_sserialchannels:
# m number in the name signifies the required sserial mode
for j in ("123456789"):
if ("m"+j) in self.d["mesa0sserial0_%dsubboard"% (i-1)]:
temp = temp + j
break
else: temp = temp + "0" # default case
else:
temp = temp + "x"
ssconfig0 = " sserial_port_0=%s"% temp
if self.d.number_mesa == 2 and self.d.mesa1_numof_sserialports:
for i in range(1,_PD._NUM_CHANNELS+1):
if i <= self.d.mesa1_numof_sserialchannels:
# m number in the name signifies the required sserial mode
for j in ("123456789"):
if ("m"+j) in self.d["mesa1sserial0_%dsubboard"% (i-1)]:
temp = temp + j
break
else: temp = temp + "0" # default case
else:
temp = temp + "x"
ssconfig1 = " sserial_port_0=%s"% temp
if self.d.mesa0_numof_resolvers:
resolver0 = " num_resolvers=%d"% self.d.mesa0_numof_resolvers
if self.d.mesa1_numof_resolvers:
resolver1 = " num_resolvers=%d"% self.d.mesa1_numof_resolvers
if self.d.mesa0_numof_tppwmgens:
mesa0_3pwm = ' num_3pwmgens=%d' %self.d.mesa0_numof_tppwmgens
if self.d.mesa1_numof_tppwmgens:
mesa1_3pwm = ' num_3pwmgens=%d' %self.d.mesa1_numof_tppwmgens
if self.d.number_mesa == 1:
load_cmnds.append( """loadrt%s%s%s config="%s num_encoders=%d num_pwmgens=%d%s num_stepgens=%d%s%s" """ % (
driver0, board0_ip, mesa0_ioaddr, firmstring0, self.d.mesa0_numof_encodergens, self.d.mesa0_numof_pwmgens,
mesa0_3pwm, self.d.mesa0_numof_stepgens, ssconfig0, resolver0))
elif self.d.number_mesa == 2 and (driver0 == driver1):
load_cmnds.append( """loadrt%s%s%s config="%s num_encoders=%d num_pwmgens=%d%s num_stepgens=%d%s%s,\
%s%s num_encoders=%d num_pwmgens=%d%s num_stepgens=%d%s%s" """ % (
driver0, board0_ip, mesa0_ioaddr, firmstring0, self.d.mesa0_numof_encodergens, self.d.mesa0_numof_pwmgens,
mesa0_3pwm, self.d.mesa0_numof_stepgens, ssconfig0, resolver0, mesa1_ioaddr, firmstring1,
self.d.mesa1_numof_encodergens, self.d.mesa1_numof_pwmgens, mesa1_3pwm,
self.d.mesa1_numof_stepgens, ssconfig1, resolver1))
elif self.d.number_mesa == 2:
load_cmnds.append( """loadrt%s%s%s config="%s num_encoders=%d num_pwmgens=%d%s num_stepgens=%d%s%s" """ % (
driver0, board0_ip, mesa0_ioaddr, firmstring0, self.d.mesa0_numof_encodergens, self.d.mesa0_numof_pwmgens,
mesa0_3pwm, self.d.mesa0_numof_stepgens, ssconfig0, resolver0 ))
load_cmnds.append( """loadrt%s%s%s config="%s num_encoders=%d num_pwmgens=%d%s num_stepgens=%d%s%s" """ % (
driver1, board1_ip, mesa1_ioaddr, firmstring1, self.d.mesa1_numof_encodergens, self.d.mesa1_numof_pwmgens,
mesa0_3pwm, self.d.mesa1_numof_stepgens, ssconfig1, resolver1 ))
for boardnum in range(0,int(self.d.number_mesa)):
if boardnum == 1 and (board0 == board1):
halnum = 1
else:
halnum = 0
prefix = make_name(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME],halnum)
if self.d["mesa%d_numof_pwmgens"% boardnum] > 0:
load_cmnds.append( "setp %s.pwmgen.pwm_frequency %d"% (prefix, self.d["mesa%d_pwm_frequency"% boardnum] ))
load_cmnds.append( "setp %s.pwmgen.pdm_frequency %d"% (prefix, self.d["mesa%d_pdm_frequency"% boardnum] ))
load_cmnds.append( "setp %s.watchdog.timeout_ns %d"% (prefix, self.d["mesa%d_watchdog_timeout"% boardnum] ))
# READ
read_cmnds = []
for boardnum in range(0,int(self.d.number_mesa)):
if boardnum == 1 and (self.d.mesa0_currentfirmwaredata[_PD._BOARDNAME] == self.d.mesa1_currentfirmwaredata[_PD._BOARDNAME]):
halnum = 1
else:
halnum = 0
prefix = make_name(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME],halnum)
read_cmnds.append( "addf %s.read servo-thread"% (prefix))
# WRITE
write_cmnds = []
for boardnum in range(0,int(self.d.number_mesa)):
if boardnum == 1 and (self.d.mesa0_currentfirmwaredata[_PD._BOARDNAME] == self.d.mesa1_currentfirmwaredata[_PD._BOARDNAME]):
halnum = 1
else:
halnum = 0
prefix = make_name(self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME],halnum)
write_cmnds.append( "addf %s.write servo-thread"% (prefix))
if '7i76e' in self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME] or \
'7i92' in self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME]:
write_cmnds.append( "setp %s.dpll.01.timer-us -50"% (prefix))
write_cmnds.append( "setp %s.stepgen.timer-number 1"% (prefix))
return load_cmnds,read_cmnds,write_cmnds
def pport_command_string(self):
# LOAD
load_cmnds = []
# parport stuff
port3name = port2name = port1name = port3dir = port2dir = port1dir = ""
if self.d.number_pports>2:
port3name = " " + self.d.ioaddr3
if self.d.pp3_direction:
port3dir =" out"
else:
port3dir =" in"
if self.d.number_pports>1:
port2name = " " + self.d.ioaddr2
if self.d.pp2_direction:
port2dir =" out"
else:
port2dir =" in"
port1name = self.d.ioaddr1
if self.d.pp1_direction:
port1dir =" out"
else:
port1dir =" in"
load_cmnds.append("loadrt hal_parport cfg=\"%s%s%s%s%s%s\"" % (port1name, port1dir, port2name, port2dir, port3name, port3dir))
# READ
read_cmnds = []
read_cmnds.append( "addf parport.0.read servo-thread")
if self.d.number_pports > 1:
read_cmnds.append( "addf parport.1.read servo-thread")
if self.d.number_pports > 2:
read_cmnds.append( "addf parport.2.read servo-thread")
# WRITE
write_cmnds = []
write_cmnds.append( "addf parport.0.write servo-thread")
if self.d.number_pports > 1:
write_cmnds.append( "addf parport.1.write servo-thread")
if self.d.number_pports > 2:
write_cmnds.append( "addf parport.2.write servo-thread")
return load_cmnds,read_cmnds,write_cmnds
# This method returns I/O pin designation (name and number) of a given HAL signalname.
# It does not check to see if the signalname is in the list more then once.
# if parports are not used then signals are not searched.
def findsignal(self, sig):
if self.d.number_pports:
ppinput = {}
ppoutput = {}
for i in (1,2,3):
for s in (2,3,4,5,6,7,8,9,10,11,12,13,15):
key = self.d["pp%d_Ipin%d" %(i,s)]
ppinput[key] = "pp%d_Ipin%d" %(i,s)
for s in (1,2,3,4,5,6,7,8,9,14,16,17):
key = self.d["pp%d_Opin%d" %(i,s)]
ppoutput[key] = "pp%d_Opin%d" %(i,s)
mesa = {}
for boardnum in range(0,int(self.d.number_mesa)):
for concount,connector in enumerate(self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._NUMOFCNCTRS]) :
for s in range(0,24):
key = self.d["mesa%dc%dpin%d"% (boardnum,connector,s)]
mesa[key] = "mesa%dc%dpin%d" %(boardnum,connector,s)
if self.d["mesa%d_numof_sserialports"% boardnum]:
sserial = {}
port = 0
for channel in range (0,self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._MAXSSERIALCHANNELS]):
if channel ==_PD._NUM_CHANNELS: break # TODO may not be all channels available
for pin in range (0,_PD._SSCOMBOLEN):
key = self.d['mesa%dsserial%d_%dpin%d' % (boardnum, port, channel, pin)]
sserial[key] = 'mesa%dsserial%d_%dpin%d' % (boardnum, port, channel, pin)
try:
return mesa[sig]
except:
try:
return sserial[sig]
except:
pass
if self.d.number_pports:
try:
return ppinput[sig]
except:
try:
return ppoutput[sig]
except:
return None
else: return None
# search all the current firmware array for related pins
# if not the same component number as the pin that changed or
# if not in the relate component type keep searching
# if is the right component type and number, check the relatedsearch array for a match
# if its a match add it to a list of pins (pinlist) that need to be updated
def list_related_pins(self, relatedsearch, boardnum, connector, channel, pin, style):
#print relatedsearch, boardnum, connector, channel, pin, style
pinlist =[]
if not channel == None:
subfirmname = self.d["mesa%dsserial%d_%dsubboard"% (boardnum, connector, channel)]
for subnum,temp in enumerate(_PD.MESA_DAUGHTERDATA):
if _PD.MESA_DAUGHTERDATA[subnum][_PD._SUBFIRMNAME] == subfirmname: break
subboardname = _PD.MESA_DAUGHTERDATA[subnum][_PD._SUBBOARDNAME]
currentptype,currentcompnum = _PD.MESA_DAUGHTERDATA[subnum][_PD._SUBSTARTOFDATA+pin]
for t_pin in range (0,_PD._SSCOMBOLEN):
comptype,compnum = _PD.MESA_DAUGHTERDATA[subnum][_PD._SUBSTARTOFDATA+t_pin]
if compnum != currentcompnum: continue
if comptype not in (relatedsearch): continue
if style == 0:
tochange = ['mesa%dsserial%d_%dpin%d'% (boardnum,connector,channel,t_pin),boardnum,connector,channel,t_pin]
if style == 1:
tochange = ['mesa%dsserial%d_%dpin%dtype'% (boardnum,connector,channel,t_pin),boardnum,connector,channel,t_pin]
if style == 2:
tochange = ['mesa%dsserial%d_%dpin%dinv'% (boardnum,connector,channel,t_pin),boardnum,connector,channel,t_pin]
pinlist.append(tochange)
else:
for concount,i in enumerate(self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._NUMOFCNCTRS]):
if i == connector:
currentptype,currentcompnum = self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._STARTOFDATA+pin+(concount*24)]
for t_concount,t_connector in enumerate(self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._NUMOFCNCTRS]):
for t_pin in range (0,24):
comptype,compnum = self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._STARTOFDATA+t_pin+(t_concount*24)]
if compnum != currentcompnum: continue
if comptype not in (relatedsearch): continue
if style == 0:
tochange = ['mesa%dc%dpin%d'% (boardnum,t_connector,t_pin),boardnum,t_connector,None,t_pin]
if style == 1:
tochange = ['mesa%dc%dpin%dtype'% (boardnum,t_connector,t_pin),boardnum,t_connector,None,t_pin]
if style == 2:
tochange = ['mesa%dc%dpin%dinv'% (boardnum,t_connector,t_pin),boardnum,t_connector,None,t_pin]
pinlist.append(tochange)
return pinlist
# This method takes a signalname data pin (eg mesa0c3pin1)
# and converts it to a HAL pin names (eg hm2_5i20.0.gpio.01)
# component number conversion is for adjustment of position of pins related to the
# 'controlling pin' eg encoder-a (controlling pin) encoder-b encoder -I
# (a,b,i are related pins for encoder component)
# gpionumber is a flag to return a gpio piname instead of the component pinname
# this is used when we want to invert the pins of a component output (such as a stepper)
# because you actually must invert the GPIO that would be in that position
# prefixonly flag is used when we want the pin name without the component name.
# used with sserial when we want the sserial port and channel so we can add out own name (eg enable pins)
def make_pinname(self, pin, gpionumber = False, prefixonly = False, substitution = False):
def make_name(bname,bnum):
if substitution:
return "[HMOT](CARD%d)"% (bnum)
else:
return "hm2_%s.%d"% (bname, bnum)
test = str(pin)
halboardnum = 0
if test == "None": return None
elif 'mesa' in test:
type_name = { _PD.GPIOI:"gpio", _PD.GPIOO:"gpio", _PD.GPIOD:"gpio", _PD.SSR0:"ssr",
_PD.ENCA:"encoder", _PD.ENCB:"encoder",_PD.ENCI:"encoder",_PD.ENCM:"encoder",
_PD.RES0:"resolver",_PD.RES1:"resolver",_PD.RES2:"resolver",_PD.RES3:"resolver",_PD.RES4:"resolver",_PD.RES5:"resolver",
_PD.MXE0:"encoder", _PD.MXE1:"encoder",
_PD.PWMP:"pwmgen",_PD.PWMD:"pwmgen", _PD.PWME:"pwmgen", _PD.PDMP:"pwmgen", _PD.PDMD:"pwmgen", _PD.PDME:"pwmgen",
_PD.UDMU:"pwmgen",_PD.UDMD:"pwmgen", _PD.UDME:"pwmgen",_PD.STEPA:"stepgen", _PD.STEPB:"stepgen",
_PD.TPPWMA:"tppwmgen",_PD.TPPWMB:"tppwmgen",_PD.TPPWMC:"tppwmgen",
_PD.TPPWMAN:"tppwmgen",_PD.TPPWMBN:"tppwmgen",_PD.TPPWMCN:"tppwmgen",
_PD.TPPWME:"tppwmgen",_PD.TPPWMF:"tppwmgen",_PD.AMP8I20:"8i20",_PD.POTO:"spinout",
_PD.POTE:"spinena",_PD.POTD:"spindir",_PD.ANALOGIN:"analog","Error":"None" }
boardnum = int(test[4:5])
boardname = self.d["mesa%d_currentfirmwaredata"% boardnum][_PD._BOARDNAME]
meta = self.get_board_meta(boardname)
num_of_pins = meta.get('PINS_PER_CONNECTOR')
ptype = self.d[pin+"type"]
if boardnum == 1 and self.d.mesa1_currentfirmwaredata[_PD._BOARDNAME] == self.d.mesa0_currentfirmwaredata[_PD._BOARDNAME]:
halboardnum = 1
if 'serial' in test:
# sample pin name = mesa0sserial0_0pin24
pinnum = int(test[18:])
portnum = int(test[12:13])
channel = int(test[14:15])
subfirmname = self.d["mesa%dsserial%d_%dsubboard"% (boardnum, portnum, channel)]
for subnum,temp in enumerate(_PD.MESA_DAUGHTERDATA):
#print "pinname search -",_PD.MESA_DAUGHTERDATA[subnum][_PD._SUBFIRMNAME],subfirmname
if _PD.MESA_DAUGHTERDATA[subnum][_PD._SUBFIRMNAME] == subfirmname: break
#print "pinname -found subboard name:",_PD.MESA_DAUGHTERDATA[subnum][_PD._SUBFIRMNAME],subfirmname,subnum,"channel:",channel
subboardname = _PD.MESA_DAUGHTERDATA[subnum][_PD._SUBBOARDNAME]
firmptype,compnum = _PD.MESA_DAUGHTERDATA[subnum][_PD._SUBSTARTOFDATA+pinnum]
# we iter over this dic because of locale translation problems when using
# comptype = type_name[ptype]
comptype = "ERROR FINDING COMPONENT TYPE"
for key,value in type_name.iteritems():
if key == ptype:
comptype = value
break
if value == "Error":
print "**** ERROR PNCCONF: pintype error in make_pinname: (sserial) ptype = ",ptype
return None
# if gpionumber flag is true - convert to gpio pin name
if gpionumber or ptype in(_PD.GPIOI,_PD.GPIOO,_PD.GPIOD,_PD.SSR0):
if "7i77" in (subboardname) or "7i76" in(subboardname)or "7i84" in(subboardname):
if ptype in(_PD.GPIOO,_PD.GPIOD):
comptype = "output"
if pinnum >15 and pinnum <24:
pinnum = pinnum-16
elif pinnum >39:
pinnum = pinnum -32
elif ptype == _PD.GPIOI:
comptype = "input"
if pinnum >23 and pinnum < 40:
pinnum = pinnum-8
return "%s.%s.%d.%d."% (make_name(boardname,halboardnum),subboardname,portnum,channel) + comptype+"-%02d"% (pinnum)
elif "7i69" in (subboardname) or "7i73" in (subboardname) or "7i64" in(subboardname):
if ptype in(_PD.GPIOO,_PD.GPIOD):
comptype = "output"
pinnum -= 24
elif ptype == _PD.GPIOI:
comptype = "input"
return "%s.%s.%d.%d."% (make_name(boardname,halboardnum),subboardname,portnum,channel) + comptype+"-%02d"% (pinnum)
elif "7i70" in (subboardname) or "7i71" in (subboardname):
if ptype in(_PD.GPIOO,_PD.GPIOD):
comptype = "output"
elif ptype == _PD.GPIOI:
comptype = "input"
return "%s.%s.%d.%d."% (make_name(boardname,halboardnum),subboardname,portnum,channel) + comptype+"-%02d"% (pinnum)
else:
print "**** ERROR PNCCONF: subboard name ",subboardname," in make_pinname: (sserial) ptype = ",ptype,pin
return None
elif ptype in (_PD.AMP8I20,_PD.POTO,_PD.POTE,_PD.POTD) or prefixonly:
return "%s.%s.%d.%d."% (make_name(boardname,halboardnum),subboardname,portnum,channel)
elif ptype in(_PD.PWMP,_PD.PDMP,_PD.UDMU):
comptype = "analogout"
return "%s.%s.%d.%d."% (make_name(boardname,halboardnum),subboardname,portnum,channel) + comptype+"%d"% (compnum)
elif ptype == (_PD.ANALOGIN):
if "7i64" in(subboardname):
comptype = "analog"
else:
comptype = "analogin"
return "%s.%s.%d.%d."% (make_name(boardname,halboardnum),subboardname,portnum,channel) + comptype+"%d"% (compnum)
elif ptype == (_PD.ENCA):
comptype = "enc"
return "%s.%s.%d.%d."% (make_name(boardname,halboardnum),subboardname,portnum,channel) + comptype+"%d"% (compnum)
else:
print "**** ERROR PNCCONF: pintype error in make_pinname: (sserial) ptype = ",ptype,pin
return None
else:
# sample pin name = mesa0c3pin1
pinnum = int(test[10:])
connum = int(test[6:7])
# we iter over this dic because of locale translation problems when using
# comptype = type_name[ptype]
comptype = "ERROR FINDING COMPONENT TYPE"
# we need concount (connector designations are not in numerical order, pin names are) and comnum from this
for concount,i in enumerate(self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._NUMOFCNCTRS]):
if i == connum:
dummy,compnum = self.d["mesa%d_currentfirmwaredata"% (boardnum)][_PD._STARTOFDATA+pinnum+(concount*24)]
break
for key,value in type_name.iteritems():
if key == ptype: comptype = value
if value == "Error":
print "**** ERROR PNCCONF: pintype error in make_pinname: (mesa) ptype = ",ptype
return None
# if gpionumber flag is true - convert to gpio pin name
if gpionumber or ptype in(_PD.GPIOI,_PD.GPIOO,_PD.GPIOD,_PD.SSR0):
print '->',ptype,dummy,compnum,pin
if ptype == _PD.SSR0:
compnum -= 100
return "%s."% (make_name(boardname,halboardnum)) + "ssr.00.out-%02d"% (compnum)
else:
compnum = int(pinnum)+(concount* num_of_pins )
return "%s."% (make_name(boardname,halboardnum)) + "gpio.%03d"% (compnum)
elif ptype in (_PD.ENCA,_PD.ENCB,_PD.ENCI,_PD.ENCM,_PD.PWMP,_PD.PWMD,_PD.PWME,_PD.PDMP,_PD.PDMD,_PD.PDME,_PD.UDMU,_PD.UDMD,_PD.UDME,
_PD.STEPA,_PD.STEPB,_PD.STEPC,_PD.STEPD,_PD.STEPE,_PD.STEPF,
_PD.TPPWMA,_PD.TPPWMB,_PD.TPPWMC,_PD.TPPWMAN,_PD.TPPWMBN,_PD.TPPWMCN,_PD.TPPWME,_PD.TPPWMF):
return "%s."% (make_name(boardname,halboardnum)) + comptype+".%02d"% (compnum)
elif ptype in (_PD.RES0,_PD.RES1,_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5):
temp = (_PD.RES0,_PD.RES1,_PD.RES2,_PD.RES3,_PD.RES4,_PD.RES5)
for num,dummy in enumerate(temp):
if ptype == dummy:break
return "%s."% (make_name(boardname,halboardnum)) + comptype+".%02d"% (compnum*6+num)
elif ptype in (_PD.MXE0,_PD.MXE1):
num = 0
if ptype == _PD.MXE1: num = 1
return "%s."% (make_name(boardname,halboardnum)) + comptype+".%02d"% ((compnum * 2 + num))
elif 'pp' in test:
print test
ending = "-out"
test = str(pin)
print self.d[pin]
pintype = str(test[4:5])
print pintype
pinnum = int(test[8:])
print pinnum
connum = int(test[2:3])-1
print connum
if pintype == 'I': ending = "-in"
return "parport."+str(connum)+".pin-%02d"%(pinnum)+ending
else:
print "pintype error in make_pinname: pinname = ",test
return None
# Boiler code
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
# starting with 'pncconf -d' gives debug messages
if __name__ == "__main__":
usage = "usage: pncconf -h for options"
parser = OptionParser(usage=usage)
parser.add_option("-d", action="store", metavar='all', dest="debug",
help="Print debug info and ignore realtime/kernel tests.\nUse 'alldev' to show all the page tabs. 'step' to stop at each debug print,'excl','5i25','rawfirm','curfirm'")
(options, args) = parser.parse_args()
if options.debug:
app = App(dbgstate=options.debug)
else:
app = App('')
gtk.main()
| lgpl-2.1 | -1,847,330,764,788,389,000 | 54.005267 | 192 | 0.527267 | false |
HaebinShin/tensorflow | tensorflow/python/ops/nn_ops.py | 1 | 56530 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
output[b, i, j, k] = sum_{di, dj, q} filters[di, dj, q, k] *
value[b, i + rate * di, j + rate * dj, q]
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: [Semantic Image Segmentation with Deep
Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).
The same operation is investigated further in [Multi-Scale Context Aggregation
by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works
that effectively use atrous convolution in different ways are, among others,
[OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image
Scanning with Deep Max-Pooling Convolutional Neural Networks]
(http://arxiv.org/abs/1302.1700). Atrous convolution is also closely related
to the so-called noble identities in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
atrous_conv2d(value, filters, rate, padding=padding)
to the following three operations:
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
can be equivalently performed cheaper in terms of computation and memory as:
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.op_scope([value, filters], name, "atrous_conv2d") as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape()[3].is_compatible_with(filters.get_shape()[2]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3], filters.get_shape()[2]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
value = gen_nn_ops.conv2d(input=value,
filter=filters,
strides=[1, 1, 1, 1],
padding=padding)
return value
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("Invalid padding")
# Handle input whose shape is unknown during graph creation.
if value.get_shape().is_fully_defined():
value_shape = value.get_shape().as_list()
else:
value_shape = array_ops.shape(value)
in_height = value_shape[1] + pad_top + pad_bottom
in_width = value_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch includes both padding components.
space_to_batch_pad = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
value = array_ops.space_to_batch(input=value,
paddings=space_to_batch_pad,
block_size=rate)
value = gen_nn_ops.conv2d(input=value,
filter=filters,
strides=[1, 1, 1, 1],
padding="VALID",
name=name)
# The crops argument to batch_to_space is just the extra padding component.
batch_to_space_crop = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.batch_to_space(input=value,
crops=batch_to_space_crop,
block_size=rate)
return value
def conv2d_transpose(value,
filter,
output_shape,
strides,
padding="SAME",
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]`.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.op_scope([value, filter, output_shape], name,
"conv2d_transpose") as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter")
if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[3], filter.get_shape(
)[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}"
.format(output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape()[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3], filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
name=name)
def conv3d_transpose(value,
filter,
output_shape,
strides,
padding="SAME",
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv3d` rather than an actual
deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.op_scope([value, filter, output_shape], name,
"conv3d_transpose") as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter")
if not value.get_shape()[4].is_compatible_with(filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[4], filter.get_shape(
)[4]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
raise ValueError("output_shape must have shape (5,), got {}"
.format(output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [5] if reached this point.
if not filter.get_shape()[3].is_compatible_with(output_shape[4]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[4], filter.get_shape()[3]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv3d_backprop_input_v2(input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
name=name)
# pylint: disable=protected-access
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.op_scope([value, bias], name, "BiasAdd") as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name)
ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape)
ops.RegisterShape("BiasAddGrad")(common_shapes.bias_add_grad_shape)
# pylint: disable=protected-access
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.op_scope([value, bias], name, "BiasAddV1") as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add_v1(value, bias, name=name)
ops.RegisterShape("BiasAddV1")(common_shapes.bias_add_shape)
ops.RegisterShape("BiasAddGradV1")(common_shapes.bias_add_grad_shape)
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.op_scope([features], name, "Relu6") as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops._relu6(features, name=name)
def softmax_cross_entropy_with_logits(logits, labels, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
`logits` and `labels` must have the same shape `[batch_size, num_classes]`
and the same dtype (either `float32` or `float64`).
Args:
logits: Unscaled log probabilities.
labels: Each row `labels[i]` must be a valid probability distribution.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
logits, labels, name=name)
return cost
def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a softmax
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape `[batch_size, num_classes]` and
labels of shape `[batch_size]`. But higher dimensions are supported.
Args:
logits: Unscaled log probabilities of rank `r` and shape
`[d_0, d_1, ..., d_{r-2}, num_classes]` and dtype `float32` or `float64`.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-2}]` and dtype `int32` or
`int64`. Each entry in `labels` must be an index in `[0, num_classes)`.
Other values will result in a loss of 0, but incorrect gradient
computations.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the labels minus one.
"""
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.op_scope([labels, logits], name,
"SparseSoftmaxCrossEntropyWithLogits"):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError("Logits cannot be scalars - received shape %s.",
logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Labels rank (received %s) should equal "
"logits rank (received %s) - 1.",
labels_static_shape.ndims, logits.get_shape().ndims)
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
logits, labels, name=name)
return cost
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.gather(array_ops.shape(logits),
array_ops.rank(logits) - 1)
logits = array_ops.reshape(logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
return cost
@ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SparseSoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
input_shape = logits_shape.with_rank(2)
batch_size = input_shape[0]
# labels_shape
op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))
return [tensor_shape.vector(batch_size.value), input_shape]
@ops.RegisterShape("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
labels_shape = op.inputs[1].get_shape()
input_shape = logits_shape.merge_with(labels_shape).with_rank(2)
batch_size = input_shape[0]
return [tensor_shape.vector(batch_size.value), input_shape]
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list of ints that has length >= 4.
The size of the window for each dimension of the input tensor.
strides: A list of ints that has length >= 4.
The stride of the sliding window for each dimension of the
input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.op_scope([value], name, "AvgPool") as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._avg_pool(value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and
type `tf.float32`.
ksize: A list of ints that has length >= 4. The size of the window for
each dimension of the input tensor.
strides: A list of ints that has length >= 4. The stride of the sliding
window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution)
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with type `tf.float32`. The max pooled output tensor.
"""
with ops.op_scope([value], name, "MaxPool") as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._max_pool(value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
ops.RegisterShape("Relu")(common_shapes.unchanged_shape)
ops.RegisterShape("Relu6")(common_shapes.unchanged_shape)
ops.RegisterShape("Elu")(common_shapes.unchanged_shape)
ops.RegisterShape("Softplus")(common_shapes.unchanged_shape)
ops.RegisterShape("Softsign")(common_shapes.unchanged_shape)
@ops.RegisterShape("ReluGrad")
@ops.RegisterShape("Relu6Grad")
@ops.RegisterShape("EluGrad")
@ops.RegisterShape("SoftplusGrad")
@ops.RegisterShape("SoftsignGrad")
def _BinaryElementwiseShape(op):
"""Returns same shape as both inputs to op.
Args:
op: Input operation.
Returns:
Shape of both inputs to `op`.
"""
return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]
ops.RegisterShape("L2Loss")(common_shapes.scalar_shape)
ops.RegisterShape("LRN")(common_shapes.unchanged_shape_with_rank(4))
@ops.RegisterShape("LRNGrad")
def _LRNGradShape(op):
"""Shape function for LRNGrad op."""
in_grads_shape = op.inputs[0].get_shape().with_rank(4)
in_image_shape = op.inputs[1].get_shape().with_rank(4)
out_image_shape = op.inputs[2].get_shape().with_rank(4)
return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)]
ops.RegisterShape("Softmax")(common_shapes.unchanged_shape_with_rank(2))
ops.RegisterShape("LogSoftmax")(common_shapes.unchanged_shape_with_rank(2))
@ops.RegisterShape("InTopK")
def _InTopKShape(op):
"""Shape function for InTopK op."""
predictions_shape = op.inputs[0].get_shape().with_rank(2)
targets_shape = op.inputs[1].get_shape().with_rank(1)
batch_size = predictions_shape[0].merge_with(targets_shape[0])
return [tensor_shape.vector(batch_size.value)]
@ops.RegisterShape("TopK")
@ops.RegisterShape("TopKV2")
def _TopKShape(op):
"""Shape function for TopK and TopKV2 ops."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(1)
if len(op.inputs) >= 2:
k = tensor_util.constant_value(op.inputs[1])
else:
k = op.get_attr("k")
last = input_shape[-1].value
if last is not None and k is not None and last < k:
raise ValueError("input.shape %s must have last dimension >= k = %d" %
(input_shape, k))
output_shape = input_shape[:-1].concatenate([k])
return [output_shape, output_shape]
@ops.RegisterShape("BatchNormWithGlobalNormalization")
def _BatchNormShape(op):
"""Shape function for BatchNormWithGlobalNormalization op."""
input_shape = op.inputs[0].get_shape().with_rank(4)
mean_shape = op.inputs[1].get_shape().with_rank(1)
var_shape = op.inputs[2].get_shape().with_rank(1)
beta_shape = op.inputs[3].get_shape().with_rank(1)
gamma_shape = op.inputs[4].get_shape().with_rank(1)
mean_shape[0].merge_with(input_shape[3])
var_shape[0].merge_with(input_shape[3])
beta_shape[0].merge_with(input_shape[3])
gamma_shape[0].merge_with(input_shape[3])
return [input_shape]
@ops.RegisterShape("BatchNormWithGlobalNormalizationGrad")
def _BatchNormGradShape(op):
"""Shape function for BatchNormWithGlobalNormalizationGrad op."""
input_shape = op.inputs[0].get_shape().with_rank(4)
mean_shape = op.inputs[1].get_shape().with_rank(1)
var_shape = op.inputs[2].get_shape().with_rank(1)
beta_shape = op.inputs[3].get_shape().with_rank(1)
out_backprop_shape = op.inputs[4].get_shape().with_rank(4)
input_shape = input_shape.merge_with(out_backprop_shape)
vector_dim = input_shape[3]
vector_dim = vector_dim.merge_with(mean_shape[0])
vector_dim = vector_dim.merge_with(var_shape[0])
vector_dim = vector_dim.merge_with(beta_shape[0])
return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4)
ops.RegisterShape("Conv2D")(common_shapes.conv2d_shape)
ops.RegisterShape("DepthwiseConv2dNative")(
common_shapes.depthwise_conv2d_native_shape)
ops.RegisterShape("AvgPool")(common_shapes.avg_pool_shape)
ops.RegisterShape("MaxPool")(common_shapes.max_pool_shape)
@ops.RegisterShape("MaxPoolWithArgmax")
def _MaxPoolWithArgMaxShape(op):
"""Shape function for MaxPoolWithArgmax op."""
return common_shapes.max_pool_shape(op) * 2
@ops.RegisterShape("AvgPoolGrad")
def _AvgPoolGradShape(op):
"""Shape function for the AvgPoolGrad op."""
orig_input_shape = tensor_util.constant_value(op.inputs[0])
if orig_input_shape is not None:
return [tensor_shape.TensorShape(orig_input_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know orig_input_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("Conv2DBackpropFilter")
def _Conv2DBackpropFilterShape(op):
"""Shape function for the Conv2DBackpropFilter op."""
filter_shape = tensor_util.constant_value(op.inputs[1])
if filter_shape is not None:
return [tensor_shape.TensorShape(filter_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know filter_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("Conv2DBackpropInput")
def _Conv2DBackpropInputShape(op):
"""Shape function for the Conv2DBackpropInput op."""
input_shape = tensor_util.constant_value(op.inputs[0])
if input_shape is not None:
return [tensor_shape.TensorShape(input_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know input_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("DepthwiseConv2dNativeBackpropFilter")
def _DepthwiseConv2dNativeBackpropFilterShape(op):
"""Shape function for the DepthwiseConv2dNativeBackpropFilter op."""
filter_shape = tensor_util.constant_value(op.inputs[1])
if filter_shape is not None:
return [tensor_shape.TensorShape(filter_shape.tolist())]
else:
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("DepthwiseConv2dNativeBackpropInput")
def _DepthwiseConv2dNativeBackpropInputShape(op):
"""Shape function for the DepthwiseConv2dNativeBackpropInput op."""
input_shape = tensor_util.constant_value(op.inputs[0])
if input_shape is not None:
return [tensor_shape.TensorShape(input_shape.tolist())]
else:
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("MaxPoolGrad")
@ops.RegisterShape("MaxPoolGradWithArgmax")
def _MaxPoolGradShape(op):
"""Shape function for the MaxPoolGrad op."""
orig_input_shape = op.inputs[0].get_shape().with_rank(4)
return [orig_input_shape]
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_in_depth * filter_height *
filter_width * 2))
@ops.RegisterStatistics("Conv2D", "weight_parameters")
def _calc_conv_weight_params(graph, node):
"""Calculates the on-disk size of the weights for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
filter_out_depth = int(filter_shape[3])
return ops.OpStats("weight_parameters", (filter_height * filter_width *
filter_in_depth * filter_out_depth))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "weight_parameters")
def _calc_depthwise_conv_weight_params(graph, node):
"""Calculates the on-disk size of the weights for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
filter_channel_multiplier = int(filter_shape[3])
return ops.OpStats("weight_parameters", (filter_height * filter_width *
filter_in_depth *
filter_channel_multiplier))
@ops.RegisterShape("Conv3D")
def _Conv3DShape(op):
"""Shape function for Conv3D."""
input_shape = op.inputs[0].get_shape().with_rank(5)
filter_shape = op.inputs[1].get_shape().with_rank(5)
batch_size = input_shape[0]
out_channels = filter_shape[4]
# Check that the input number of channels is compatible between
# input data and filter size.
input_shape[4].assert_is_compatible_with(filter_shape[3])
stride_b, stride_p, stride_r, stride_c, stride_d = op.get_attr("strides")
assert stride_b == 1
assert stride_d == 1
padding_type = op.get_attr("padding")
out_planes, out_rows, out_cols = common_shapes.get_conv_output_size(
input_shape[1:4], filter_shape[0:3], (stride_p, stride_r, stride_c),
padding_type)
return [tensor_shape.TensorShape([batch_size, out_planes, out_rows, out_cols,
out_channels])]
@ops.RegisterShape("MaxPool3D")
@ops.RegisterShape("AvgPool3D")
def _Pool3DShape(op):
"""Shape function for Max/AvgPool3D."""
input_shape = op.inputs[0].get_shape().with_rank(5)
ksize_b, ksize_p, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
assert ksize_b == 1
assert ksize_d == 1
stride_b, stride_p, stride_r, stride_c, stride_d = op.get_attr("strides")
assert stride_b == 1
assert stride_d == 1
batch_size = input_shape[0]
channels = input_shape[4]
padding = op.get_attr("padding")
out_planes, out_rows, out_cols = common_shapes.get_conv_output_size(
input_shape[1:4], (ksize_p, ksize_r, ksize_c),
(stride_p, stride_r, stride_c), padding)
return [tensor_shape.TensorShape([batch_size, out_planes, out_rows, out_cols,
channels])]
@ops.RegisterShape("Conv3DBackpropFilter")
def _Conv3DBackpropFilterShape(op):
"""Shape function for the Conv3DBackpropFilter op."""
filter_shape = op.inputs[1].get_shape()
return [filter_shape.with_rank(5)]
@ops.RegisterShape("Conv3DBackpropInput")
def _Conv3DBackpropInputShape(op):
"""Shape function for the Conv3DBackpropInput op."""
input_shape = op.inputs[0].get_shape()
return [input_shape.with_rank(5)]
@ops.RegisterShape("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterShapeV2(op):
"""Shape function for the Conv3DBackpropFilterV2 op."""
filter_shape = tensor_util.constant_value(op.inputs[1])
return [tensor_shape.TensorShape(filter_shape).with_rank(5)]
@ops.RegisterShape("Conv3DBackpropInputV2")
def _Conv3DBackpropInputShapeV2(op):
"""Shape function for the Conv3DBackpropInputV2 op."""
input_shape = tensor_util.constant_value(op.inputs[0])
return [tensor_shape.TensorShape(input_shape).with_rank(5)]
@ops.RegisterShape("AvgPool3DGrad")
def _AvgPool3DGradShape(op):
"""Shape function for the AvgPool3DGrad op."""
orig_input_shape = tensor_util.constant_value(op.inputs[0])
return [tensor_shape.TensorShape(orig_input_shape).with_rank(5)]
@ops.RegisterShape("MaxPool3DGrad")
def _MaxPool3DGradShape(op):
"""Shape function for the MaxPoolGrad op."""
orig_input_shape = op.inputs[0].get_shape().with_rank(5)
return [orig_input_shape]
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@ops.RegisterStatistics("BiasAdd", "weight_parameters")
def _calc_bias_add_weight_params(graph, node):
"""Calculates the on-disk weight parameters for BiasAdd."""
bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])
bias_shape.assert_is_fully_defined()
bias_count = np.prod(bias_shape.as_list())
return ops.OpStats("weight_parameters", bias_count)
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b_v1") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
# pylint: disable=invalid-name
def dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
"""Computes dropout.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with ops.op_scope([x], name, "dropout") as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, float) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob,
dtype=x.dtype,
name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape,
seed=seed,
dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * math_ops.inv(keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
return ret
def top_k(input, k=1, sorted=True, name=None):
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops._top_kv2(input, k=k, sorted=sorted, name=name)
def conv1d(value, filters, stride, padding,
use_cudnn_on_gpu=None, data_format=None,
name=None):
"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape [batch, in_width, in_channels]
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes
`tf.nn.conv2d`. A tensor of shape [batch, in_width, in_channels]
is reshaped to [batch, 1, in_width, in_channels], and the filter
is reshaped to [1, filter_width, in_channels, out_channels].
The result is then reshaped back to [batch, out_width, out_channels]
(where out_width is a function of the stride and padding as in
conv2d) and returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float32` or `float64`.
filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NHWC", "NCHW"`. Defaults
to `"NHWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCHW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
"""
with ops.op_scope([value, filters], name, "conv1d") as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
value = array_ops.expand_dims(value, 1)
# And reshape the filter to [1, filter_width, in_channels, out_channels]
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(value, filters, [1, 1, stride, 1], padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
return array_ops.squeeze(result, [1])
@ops.RegisterShape("Dilation2D")
def _Dilation2DShape(op):
"""Shape function for Dilation2D op."""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(3)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
rate_b, rate_r, rate_c, rate_d = op.get_attr("rates")
if rate_b != 1 or rate_d != 1:
raise ValueError("Current implementation does not yet support "
"rates in the batch and depth dimensions.")
filter_rows_eff = filter_rows + (filter_rows - 1) * (rate_r - 1)
filter_cols_eff = filter_cols + (filter_cols - 1) * (rate_c - 1)
padding = op.get_attr("padding")
out_rows, out_cols = common_shapes.get2d_conv_output_size(in_rows, in_cols,
filter_rows_eff,
filter_cols_eff,
stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
return [tensor_shape.TensorShape(output_shape)]
@ops.RegisterShape("Dilation2DBackpropInput")
def _Dilation2DBackpropInputShape(op):
"""Shape function for Dilation2DBackpropInput op."""
return [op.inputs[0].get_shape()]
@ops.RegisterShape("Dilation2DBackpropFilter")
def _Dilation2DBackpropFilterShape(op):
"""Shape function for Dilation2DBackpropFilter op."""
return [op.inputs[1].get_shape()]
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("Dilation2D", "weight_parameters")
def _calc_dilation2d_weight_params(graph, node):
"""Calculates the on-disk size of the weights for Dilation2D."""
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_depth = int(filter_shape[2])
return ops.OpStats("weight_parameters",
(filter_height * filter_width * filter_depth))
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.op_scope([value, kernel], name, "erosion2d") as name:
# Reduce erosion to dilation by duality.
return math_ops.neg(gen_nn_ops.dilation2d(input=math_ops.neg(value),
filter=array_ops.reverse(
kernel, [True, True, False]),
strides=strides,
rates=rates,
padding=padding,
name=name))
# pylint: enable=invalid-name
| apache-2.0 | 5,910,611,416,458,937,000 | 41.535741 | 92 | 0.660375 | false |
jxwufan/AssociativeRetrieval | FW_model.py | 1 | 3847 | import tensorflow as tf
from FastWeightsRNN import LayerNormFastWeightsBasicRNNCell
from tensorflow.python.ops import seq2seq
import utils
import numpy as np
class FW_model(object):
def __init__(self, config=None, mode=None):
self.config = config
self.mode = mode
self.build_graph()
self.load_validation()
def load_validation(self):
data_reader = utils.DataReader(data_filename="input_seqs_validation", batch_size=16)
inputs_seqs_batch, outputs_batch = data_reader.read(False, 1)
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
sess = tf.Session()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
self.validation_inputs = []
self.validation_targets = []
try:
while not coord.should_stop():
input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
self.validation_inputs.append(input_data)
self.validation_targets.append(targets)
except tf.errors.OutOfRangeError:
pass
finally:
coord.request_stop()
coord.join(threads)
sess.close()
self.validation_inputs = np.array(self.validation_inputs).reshape([-1, self.config.input_length])
self.validation_targets = np.array(self.validation_targets).reshape([-1, 1])
def build_graph(self):
config = self.config
self.reader = utils.DataReader(seq_len=config.seq_length, batch_size=config.batch_size, data_filename=config.data_filename)
self.cell = LayerNormFastWeightsBasicRNNCell(num_units=config.rnn_size)
self.input_data = tf.placeholder(tf.int32, [None, config.input_length])
self.targets = tf.placeholder(tf.int32, [None, 1])
self.initial_state = self.cell.zero_state(tf.shape(self.targets)[0], tf.float32)
self.initial_fast_weights = self.cell.zero_fast_weights(tf.shape(self.targets)[0], tf.float32)
with tf.variable_scope("input_embedding"):
embedding = tf.get_variable("embedding", [config.vocab_size, config.embedding_size])
inputs = tf.split(1, config.input_length, tf.nn.embedding_lookup(embedding, self.input_data))
inputs = [tf.squeeze(input, [1]) for input in inputs]
with tf.variable_scope("send_to_rnn"):
state = (self.initial_state, self.initial_fast_weights)
output = None
for i, input in enumerate(inputs):
if i > 0:
tf.get_variable_scope().reuse_variables()
output, state = self.cell(input, state)
with tf.variable_scope("softmax"):
softmax_w = tf.get_variable("softmax_w", [config.rnn_size, config.vocab_size])
softmax_b = tf.get_variable("softmax_b", [config.vocab_size])
self.logits = tf.matmul(output, softmax_w) + softmax_b
self.probs = tf.nn.softmax(self.logits)
self.output = tf.cast(tf.reshape(tf.arg_max(self.probs, 1), [-1, 1]), tf.int32)
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.output, self.targets), tf.float32))
loss = seq2seq.sequence_loss_by_example([self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([config.batch_size])],
config.vocab_size)
self.cost = tf.reduce_mean(loss)
self.final_state = state
# self.lr = tf.Variable(0.001, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
config.grad_clip)
optimizer = tf.train.AdamOptimizer() # self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
self.summary_accuracy = tf.scalar_summary('accuracy', self.accuracy)
tf.scalar_summary('cost', self.cost)
self.summary_all = tf.merge_all_summaries()
| apache-2.0 | -3,620,637,527,844,677,000 | 40.815217 | 127 | 0.653236 | false |
joseguerrero/sembrando | src/presentacion/paginas/pantalla9.py | 1 | 30062 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
from librerias import pantalla
from librerias.boton import boton
from librerias.texto import texto
from librerias.popups import PopUp
from librerias.imagen import imagen
from librerias.contenido import cont
from librerias.imgfondo import fondo
from librerias.pixelperfect import *
from librerias.textopopups import p9
from librerias.objmask import object_mask
from paginas import menucfg
from paginas import pantalla2
from paginas import pantalla8
from paginas import pantalla10
class estado(pantalla.Pantalla):
def __init__(self, parent):
"""
Método inicializador de la clase.
@param parent: Instancia del gestor de pantallas.
@type parent: Manejador
"""
self.parent = parent
self.previa = True
self.deteccion_movimiento = False
self.fondo_texto = False
self.background = pygame.image.load(self.fondos + "fondo-mapa2.png")
self.banner_siembra = imagen(self.banners + "banner-siembra.png", 0, 0)
self.banner_inf = imagen(self.banners + "banner-inf.png", 0, 432)
self.mouse = object_mask("Cursor", 850, 512, self.varios + "puntero.png")
# Para mantener las piezas del mapa bien ubicadas no se deben modificar los valores x e y de las regiones, solo de zulia.
self.zulia = object_mask(u"región zuliana", 13, 140, self.varios + "zulia-des.png", self.varios + "zulia-act.png")
self.occ = object_mask(u"región occidental", self.zulia.rect.left + 55, self.zulia.rect.top - 6, self.varios + "occ-des.png", self.varios + "occ-act.png")
self.central = object_mask(u"región central", self.zulia.rect.left + 115, self.zulia.rect.top + 37, self.varios + "central-des.png", self.varios + "central-act.png")
self.capital = object_mask(u"región capital", self.zulia.rect.left + 152, self.zulia.rect.top + 32, self.varios + "capital-des.png", self.varios + "capital-act.png")
self.ori = object_mask(u"región nor oriental", self.zulia.rect.left +195, self.zulia.rect.top + 29, self.varios + "ori-des.png", self.varios + "ori-act.png")
self.andes = object_mask(u"región los andes", self.zulia.rect.left + 23, self.zulia.rect.top + 48, self.varios + "andes-des.png", self.varios + "andes-act.png")
self.llanos = object_mask(u"región los llanos", self.zulia.rect.left + 26, self.zulia.rect.top + 47, self.varios + "llanos-des.png", self.varios + "llanos-act.png")
self.guayana = object_mask(u"región guayana", self.zulia.rect.left + 140, self.zulia.rect.top + 48, self.varios + "guayana-des.png", self.varios + "guayana-act.png")
self.insu = object_mask(u"región insular", self.zulia.rect.left + 149, self.zulia.rect.top - 6, self.varios + "insular-des.png", self.varios + "insular-act.png")
self.limites1 = pygame.image.load(self.varios + "limitemar.png").convert_alpha()
self.limites2 = pygame.image.load(self.varios + "limitemar2.png").convert_alpha()
self.zona_r = pygame.image.load(self.varios + "zona-recla.png").convert_alpha()
self.n_estados = pygame.image.load(self.varios + "nombre-estados.png").convert_alpha()
self.cargar_botones()
self.cargar_textos()
self.resume()
self.bg = fondo(573, 377)
def cargar_textos(self):
"""
Carga los textos utilizados en esta pantalla.
"""
self.texto9_2_1 = texto(490, 60, cont["texto9_2_1"] , self.parent.config.t_fuente, "normal", 1000)
self.texto9_2_2 = texto(490, self.texto9_2_1.y + self.texto9_2_1.ancho_final + 10, cont["texto9_2_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_2_3 = texto(490, self.texto9_2_2.y + self.texto9_2_2.ancho_final + 10, cont["texto9_2_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_2_4 = texto(490, self.texto9_2_3.y + self.texto9_2_3.ancho_final + 10, cont["texto9_2_4"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_3_1 = texto(490, 60, cont["texto9_3_1"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_3_2 = texto(490, self.texto9_3_1.y + self.texto9_3_1.ancho_final + 10, cont["texto9_3_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_3_3 = texto(490, self.texto9_3_2.y + self.texto9_3_2.ancho_final + 10, cont["texto9_3_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_4_1 = texto(490, 60, cont["texto9_4_1"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_4_2 = texto(490, self.texto9_4_1.y + self.texto9_4_1.ancho_final + 10, cont["texto9_4_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_4_3 = texto(490, self.texto9_4_2.y + self.texto9_4_2.ancho_final + 10, cont["texto9_4_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_5_1 = texto(490, 60, cont["texto9_5_1"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_5_2 = texto(490, self.texto9_5_1.y + self.texto9_5_1.ancho_final + 10, cont["texto9_5_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_5_3 = texto(490, self.texto9_5_2.y + self.texto9_5_2.ancho_final + 10, cont["texto9_5_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_6_1 = texto(490, 60, cont["texto9_6_1"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_6_2 = texto(490, self.texto9_6_1.y + self.texto9_6_1.ancho_final + 10, cont["texto9_6_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_6_3 = texto(490, self.texto9_6_2.y + self.texto9_6_2.ancho_final + 10, cont["texto9_6_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_7_1 = texto(490, 60, cont["texto9_7_1"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_7_2 = texto(490, self.texto9_7_1.y + self.texto9_7_1.ancho_final + 10, cont["texto9_7_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_7_3 = texto(490, self.texto9_7_2.y + self.texto9_7_2.ancho_final + 10, cont["texto9_7_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_8_1 = texto(490, 60, cont["texto9_8_1"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_8_2 = texto(490, self.texto9_8_1.y + self.texto9_8_1.ancho_final + 10, cont["texto9_8_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_8_3 = texto(490, self.texto9_8_2.y + self.texto9_8_2.ancho_final + 10, cont["texto9_8_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_9_1 = texto(490, 60, cont["texto9_9_1"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_9_2 = texto(490, self.texto9_9_1.y + self.texto9_9_1.ancho_final + 10, cont["texto9_9_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_9_3 = texto(490, self.texto9_9_2.y + self.texto9_9_2.ancho_final + 10, cont["texto9_9_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_10_1 = texto(490, 60, cont["texto9_10_1"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_10_2 = texto(490, self.texto9_10_1.y + self.texto9_10_1.ancho_final + 10, cont["texto9_10_2"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_10_3 = texto(490, self.texto9_10_2.y + self.texto9_10_2.ancho_final + 10, cont["texto9_10_3"], self.parent.config.t_fuente, "normal", 1000)
self.texto9_10_4 = texto(490, self.texto9_10_3.y + self.texto9_10_3.ancho_final + 10, cont["texto9_10_4"], self.parent.config.t_fuente, "normal", 1000)
self.popup_ins1 = PopUp(self.parent, (p9["texto1"] , ), "", None , self.grupo_popup, 1, 750, 400, -100)
self.popup_ins1.agregar_grupo()
def cargar_botones(self):
"""
Carga los botones utilizados en esta pantalla.
"""
self.home = boton("home", "Menú", self.botones + "boton-menu.png", 3, 889, 440, None, False, 1)
self.volver = boton("volver", "Regresar", self.botones + "boton-regresar.png", 3, 320, 445, None, False, 1)
self.config = boton("config", "Accesibilidad", self.botones + "boton-acc.png", 3 ,60, 445, None, False, 1)
def start(self):
pass
def cleanUp(self):
pass
def pause(self):
pass
def resume(self):
"""
Verifica si se realizaron cambios en la configuración. Carga los valores iniciales de esta pantalla.
"""
if self.parent.config.texto_cambio == True:
self.cargar_botones()
self.cargar_textos()
self.parent.config.texto_cambio = False
self.popup_ins1.agregar_grupo()
self.capital.apagar()
self.ori.apagar()
self.zulia.apagar()
self.occ.apagar()
self.andes.apagar()
self.llanos.apagar()
self.central.apagar()
self.guayana.apagar()
self.grupo_banner.add(self.banner_siembra, self.banner_inf)
self.grupo_botones.add(self.config, self.volver, self.home)
self.grupo_mapa.add(self.zulia, self.occ, self.central, self.insu, self.capital, self.ori, self.andes, self.llanos, self.guayana)
self.spserver.processtext(u"Pantalla: La Agricultura en Venezuela: ", self.parent.config.activar_lector)
self.spserver.processtext(p9["lector1"], self.parent.config.activar_lector)
def handleEvents(self, events):
"""
Evalúa los eventos que se generan en esta pantalla.
@param events: Lista de los eventos.
@type events: list
"""
for event in events:
if event.type == pygame.QUIT:
self.parent.quit()
if event.type == pygame.KEYDOWN:
self.chequeo_mascaras(self.grupo_mapa)
self.chequeo_botones(self.grupo_botones)
self.lista_final = self.lista_palabra + self.lista_mascaras + self.lista_botones
self.numero_elementos = len(self.lista_final)
if event.key == pygame.K_RIGHT:
self.fondo_texto = False
self.grupo_palabras.empty()
self.deteccion_movimiento = True
self.controlador_lector_evento_K_RIGHT()
elif event.key == pygame.K_LEFT:
self.fondo_texto = False
self.grupo_palabras.empty()
self.controlador_lector_evento_K_LEFT()
if self.deteccion_movimiento:
if event.key == pygame.K_RETURN:
if self.x.tipo_objeto == "mapa":
self.fondo_texto = True
if self.x.id == u"región capital":
self.grupo_palabras.empty()
self.central.apagar()
self.llanos.apagar()
self.zulia.apagar()
self.ori.apagar()
self.occ.apagar()
self.andes.apagar()
self.llanos.apagar()
self.capital.iluminar()
self.insu.apagar()
self.grupo_palabras.add(self.texto9_2_1.img_palabras, self.texto9_2_2.img_palabras, self.texto9_2_3.img_palabras, self.texto9_2_4.img_palabras)
self.spserver.processtext(cont["texto9_2_1l"] + self.texto9_2_2.texto + self.texto9_2_3.texto + self.texto9_2_4.texto, self.parent.config.activar_lector)
elif self.x.id == u"región central":
self.grupo_palabras.empty()
self.capital.apagar()
self.llanos.apagar()
self.zulia.apagar()
self.ori.apagar()
self.occ.apagar()
self.andes.apagar()
self.llanos.apagar()
self.central.iluminar()
self.insu.apagar()
self.grupo_palabras.add(self.texto9_3_1.img_palabras, self.texto9_3_2.img_palabras, self.texto9_3_3.img_palabras)
self.spserver.processtext(cont["texto9_3_1l"] + self.texto9_3_2.texto + self.texto9_3_3.texto, self.parent.config.activar_lector)
if self.x.id == u"región los llanos":
self.grupo_palabras.empty()
self.capital.apagar()
self.central.apagar()
self.ori.apagar()
self.zulia.apagar()
self.occ.apagar()
self.andes.apagar()
self.llanos.iluminar()
self.insu.apagar()
self.grupo_palabras.add(self.texto9_4_1.img_palabras, self.texto9_4_2.img_palabras, self.texto9_4_3.img_palabras)
self.spserver.processtext(cont["texto9_4_1l"] + self.texto9_4_2.texto + self.texto9_4_3.texto, self.parent.config.activar_lector)
if self.x.id == u"región occidental":
self.grupo_palabras.empty()
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.ori.apagar()
self.zulia.apagar()
self.andes.apagar()
self.occ.iluminar()
self.llanos.apagar()
self.guayana.apagar()
self.insu.apagar()
self.grupo_palabras.add(self.texto9_5_1.img_palabras, self.texto9_5_2.img_palabras, self.texto9_5_3.img_palabras)
self.spserver.processtext(cont["texto9_5_1l"] + self.texto9_5_2.texto + self.texto9_5_3.texto, self.parent.config.activar_lector)
if self.x.id == u"región zuliana":
self.grupo_palabras.empty()
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.ori.apagar()
self.zulia.iluminar()
self.occ.apagar()
self.andes.apagar()
self.llanos.apagar()
self.guayana.apagar()
self.insu.apagar()
self.grupo_palabras.add(self.texto9_6_1.img_palabras, self.texto9_6_2.img_palabras, self.texto9_6_3.img_palabras)
self.spserver.processtext(cont["texto9_6_1l"] + self.texto9_6_2.texto + self.texto9_6_3.texto, self.parent.config.activar_lector)
if self.x.id == u"región los andes":
self.grupo_palabras.empty()
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.zulia.apagar()
self.ori.apagar()
self.occ.apagar()
self.andes.iluminar()
self.llanos.apagar()
self.guayana.apagar()
self.insu.apagar()
self.grupo_palabras.add(self.texto9_7_1.img_palabras, self.texto9_7_2.img_palabras, self.texto9_7_3.img_palabras)
self.spserver.processtext(cont["texto9_7_1l"] + self.texto9_7_2.texto + self.texto9_7_3.texto, self.parent.config.activar_lector)
if self.x.id == u"región nor oriental":
self.grupo_palabras.empty()
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.ori.iluminar()
self.zulia.apagar()
self.occ.apagar()
self.andes.apagar()
self.guayana.apagar()
self.insu.apagar()
self.grupo_palabras.add(self.texto9_8_1.img_palabras, self.texto9_8_2.img_palabras, self.texto9_8_3.img_palabras)
self.spserver.processtext(cont["texto9_8_1l"] + self.texto9_8_2.texto + self.texto9_8_3.texto, self.parent.config.activar_lector)
if self.x.id == u"región guayana":
self.grupo_palabras.empty()
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.ori.apagar()
self.occ.apagar()
self.zulia.apagar()
self.andes.apagar()
self.llanos.apagar()
self.insu.apagar()
self.guayana.iluminar()
self.grupo_palabras.add(self.texto9_9_1.img_palabras, self.texto9_9_2.img_palabras, self.texto9_9_3.img_palabras)
self.spserver.processtext(cont["texto9_9_1l"] + self.texto9_9_2.texto + self.texto9_9_3.texto, self.parent.config.activar_lector)
if self.x.id == u"región insular":
self.grupo_palabras.empty()
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.ori.apagar()
self.occ.apagar()
self.zulia.apagar()
self.andes.apagar()
self.llanos.apagar()
self.guayana.apagar()
self.insu.iluminar()
self.grupo_palabras.add(self.texto9_10_1.img_palabras, self.texto9_10_2.img_palabras, self.texto9_10_3.img_palabras, self.texto9_10_4.img_palabras )
self.spserver.processtext(cont["texto9_10_1l"] + self.texto9_10_2.texto + self.texto9_10_3.texto + self.texto9_10_4.texto, self.parent.config.activar_lector)
elif self.x.tipo_objeto == "boton":
if self.x.id == "volver":
self.limpiar_grupos()
self.parent.animacion = 3
self.parent.changeState(pantalla8.estado(self.parent, 3))
elif self.x.id == "config":
self.limpiar_grupos()
self.parent.pushState(menucfg.estado(self.parent, self.previa))
elif self.x.id == "home":
self.limpiar_grupos()
self.parent.changeState(pantalla2.estado(self.parent))
lista = spritecollide_pp(self.mouse, self.grupo_mapa)
if not lista == []:
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
self.deteccion_movimiento = False
self.fondo_texto = True
if lista[0].id == u"región capital":
self.central.apagar()
self.llanos.apagar()
self.ori.apagar()
self.occ.apagar()
self.zulia.apagar()
self.andes.apagar()
self.llanos.apagar()
self.capital.iluminar()
self.insu.apagar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_2_1.img_palabras, self.texto9_2_2.img_palabras, self.texto9_2_3.img_palabras , self.texto9_2_4.img_palabras)
if lista[0].id == u"región central":
self.capital.apagar()
self.llanos.apagar()
self.ori.apagar()
self.occ.apagar()
self.zulia.apagar()
self.andes.apagar()
self.llanos.apagar()
self.central.iluminar()
self.insu.apagar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_3_1.img_palabras, self.texto9_3_2.img_palabras, self.texto9_3_3.img_palabras)
if lista[0].id == u"región los llanos":
self.capital.apagar()
self.central.apagar()
self.llanos.iluminar()
self.zulia.apagar()
self.ori.apagar()
self.occ.apagar()
self.andes.apagar()
self.insu.apagar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_4_1.img_palabras, self.texto9_4_2.img_palabras, self.texto9_4_3.img_palabras)
if lista[0].id == u"región occidental":
self.capital.apagar()
self.llanos.apagar()
self.ori.apagar()
self.central.apagar()
self.zulia.apagar()
self.occ.iluminar()
self.llanos.apagar()
self.guayana.apagar()
self.andes.apagar()
self.insu.apagar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_5_1.img_palabras, self.texto9_5_2.img_palabras, self.texto9_5_3.img_palabras)
if lista[0].id == u"región zuliana":
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.ori.apagar()
self.zulia.iluminar()
self.occ.apagar()
self.andes.apagar()
self.llanos.apagar()
self.guayana.apagar()
self.insu.apagar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_6_1.img_palabras, self.texto9_6_2.img_palabras, self.texto9_6_3.img_palabras)
if lista[0].id == u"región los andes":
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.guayana.apagar()
self.zulia.apagar()
self.ori.apagar()
self.occ.apagar()
self.andes.iluminar()
self.llanos.apagar()
self.insu.apagar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_7_1.img_palabras, self.texto9_7_2.img_palabras, self.texto9_7_3.img_palabras)
if lista[0].id == u"región nor oriental":
self.capital.apagar()
self.central.apagar()
self.ori.iluminar()
self.llanos.apagar()
self.guayana.apagar()
self.zulia.apagar()
self.occ.apagar()
self.andes.apagar()
self.insu.apagar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_8_1.img_palabras, self.texto9_8_2.img_palabras, self.texto9_8_3.img_palabras)
if lista[0].id == u"región guayana":
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.ori.apagar()
self.zulia.apagar()
self.occ.apagar()
self.andes.apagar()
self.guayana.iluminar()
self.insu.apagar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_9_1.img_palabras, self.texto9_9_2.img_palabras, self.texto9_9_3.img_palabras)
if lista[0].id == u"región insular":
self.capital.apagar()
self.llanos.apagar()
self.central.apagar()
self.ori.apagar()
self.zulia.apagar()
self.occ.apagar()
self.andes.apagar()
self.guayana.apagar()
self.insu.iluminar()
self.grupo_palabras.empty()
self.grupo_palabras.add(self.texto9_10_1.img_palabras, self.texto9_10_2.img_palabras, self.texto9_10_3.img_palabras, self.texto9_10_4.img_palabras)
elif not self.deteccion_movimiento:
self.fondo_texto = False
self.capital.apagar()
self.central.apagar()
self.guayana.apagar()
self.andes.apagar()
self.zulia.apagar()
self.occ.apagar()
self.ori.apagar()
self.llanos.apagar()
self.grupo_palabras.empty()
self.grupo_fondotexto.empty()
if pygame.sprite.spritecollideany(self.raton, self.grupo_botones):
sprite = pygame.sprite.spritecollide(self.raton, self.grupo_botones, False)
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if sprite[0].id == "volver":
self.limpiar_grupos()
self.parent.animacion = 3
self.parent.changeState(pantalla8.estado(self.parent, 3))
elif sprite[0].id == "config":
self.limpiar_grupos()
self.parent.pushState(menucfg.estado(self.parent, self.previa))
elif sprite[0].id == "home":
self.limpiar_grupos()
self.parent.changeState(pantalla2.estado(self.parent))
self.minimag(events)
def update(self):
"""
Actualiza la posición del cursor, el magnificador de pantalla en caso de que este activado, los
tooltip de los botones y animaciones o textos correspondientes.
"""
self.raton.update()
self.obj_magno.magnificar(self.parent.screen)
self.grupo_botones.update(self.grupo_tooltip)
self.mouse.rect.center = pygame.mouse.get_pos()
def draw(self):
"""
Dibuja el fondo de pantalla y los elementos pertenecientes a los grupos de sprites sobre la superficie
del manejador de pantallas.
"""
self.parent.screen.blit(self.background, (0, 0))
self.grupo_banner.draw(self.parent.screen)
self.parent.screen.blit(self.zona_r, (320, 233))
self.parent.screen.blit(self.limites1, (50, 60))
self.parent.screen.blit(self.limites2, (305, 145))
self.grupo_mapa.draw(self.parent.screen)
self.grupo_popup.draw(self.parent.screen)
if self.fondo_texto:
self.parent.screen.blit(self.bg.img, (451, 55))
self.grupo_botones.draw(self.parent.screen)
self.grupo_fondotexto.draw(self.parent.screen)
self.grupo_palabras.draw(self.parent.screen)
self.grupo_tooltip.draw(self.parent.screen)
self.parent.screen.blit(self.n_estados, (40, 95))
if self.parent.habilitar:
self.grupo_magnificador.draw(self.parent.screen, self.enable)
if self.deteccion_movimiento:
self.dibujar_rect()
def ir_glosario(self):
self.parent.pushState(pantalla10.estado(self.parent))
| gpl-3.0 | 7,081,254,828,094,702,000 | 58.114173 | 189 | 0.497036 | false |
barct/odoo-coop | infocoop_epec_consumos/tab_fact.py | 1 | 5194 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
from openerp.osv import osv
from collections import OrderedDict
class infocoop_tab_fact(models.Model):
_inherit = "infocoop_tab_fact"
class Values():
code = "(desconocido)"
conexiones = 1
consumo = 0
cargo_fijo = 0
monto_ee = 0
monto_ts = 0
consumo_ts = 0
monto_pe = 0
consumo_pe = 0
monto_impuestos = 0
monto_otros = 0
def __iadd__(self, vals):
self.conexiones += vals.conexiones
self.consumo += vals.consumo
self.cargo_fijo += vals.cargo_fijo
self.monto_ee += vals.monto_ee
self.monto_ts += vals.monto_ts
self.consumo_ts += vals.consumo_ts
self.monto_pe += vals.monto_pe
self.consumo_pe += vals.consumo_pe
self.monto_impuestos += vals.monto_impuestos
self.monto_otros += vals.monto_otros
return self
def __unicode__(self):
txt = """code %s
conexiones %s
consumo: %s
cargo_fijo: %s
monto_ee: %s
monto_ts: %s
consumo_ts: %s
monto_pe: %s
consumo_pe: %s
monto_impuestos: %s
monto_otros: %s """
return txt % (self.code,
self.conexiones,
self.consumo,
self.cargo_fijo,
self.monto_ee,
self.monto_ts,
self.consumo_ts,
self.monto_pe,
self.consumo_pe,
self.monto_impuestos,
self.monto_otros, )
class ParticularReport(models.AbstractModel):
_name = 'report.infocoop_epec_consumos.report_example_report_view'
def get_epec_data(self, docs):
data = list()
for r in docs:
values = dict()
liq_ids = self.env["infocoop_liquidac"].search([
("servicios", "=", "/E"),
("periodo", "=", r.periodo), ])
for l in liq_ids:
if l.service_category_id.group_id:
group, level = l.service_category_id.\
group_id.define_segment(l.cons_ee)
else:
group = l.service_category_id.group_id.code
level = None
v = Values()
v.consumo = float(l.cons_ee)
v.cargo_fijo = float(l.cargo_fijo)
v.monto_ee = float(l.imp_ee)
v.monto_impuestos = float(l.neto_imp)
v.consumo_ts = float(l.ts_kwh)
v.monto_ts = float(l.ts_amount)
v.consumo_pe = float(l.pe_kwh)
v.monto_pe = float(l.pe_amount)
v.monto_otros = l.neto_serv - \
(v.monto_ee + v.cargo_fijo + v.monto_ts + v.monto_pe)
code = None
if l.service_category_id.group_id.code == "UR":
if l.pe_level == 2:
code = "5010"
elif l.pe_level == 3:
code = "5020"
elif l.ts_level == 2:
if l.cons_ee <= 150:
code = "5500"
else:
code = "5510"
elif l.ts_level == 1:
if l.cons_ee <= 150:
code = "5500"
elif l.cons_ee <= 450:
code = "5530"
else:
code = "5540"
else:
code = "5000"
v.code = group + str(level) + "-" + code
else:
if group == "NR" and level == 3:
v.code = group + str(level) + \
"-" + l.service_category_id.code
else:
v.code = group + str(level)
if v.code in values:
values[v.code] += v
else:
values[v.code] = v
data.append(
{"doc": r,
"values": OrderedDict(sorted(values.items(),
key=lambda t: t[0])), })
return data
@api.multi
def render_html(self, data=None):
report_obj = self.env['report']
report = report_obj._get_report_from_name(
'infocoop_epec_consumos.report_example_report_view')
docs = self.env['infocoop_tab_fact'].browse(self._ids)
data = self.get_epec_data(docs)
docargs = {
'doc_ids': self._ids,
'doc_model': report.model,
'docs': docs,
'data': data,
}
return report_obj.render(
'infocoop_epec_consumos.report_example_report_view', docargs)
| gpl-3.0 | -7,498,251,093,839,075,000 | 33.85906 | 78 | 0.426646 | false |
frzdian/jaksafe-engine | jaksafe/jaksafe/jakservice/jaksafe.py | 1 | 3403 | import ConfigParser
import os
import sys
#### import postgresql and mysql package
import MySQLdb
import psycopg2
from sqlalchemy import create_engine
global_conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'global_conf.cfg')
def get_database_connection(ip_address,user,paswd,database):
con = MySQLdb.connect(ip_address,user,paswd,database)
return con
#def get_pgsql_database_connection(user,paswd,database,port_number):
# psql_con = None
# try:
# psql_con = psycopg2.connect(database = database,\
# user = user,\
# password = paswd,\
# port = port_number)
#
# except psycopg2.DatabaseError, e:
# print 'Error %s' % e
# print 'Program is terminating ....'
# sys.exit(1)
# return psql_con
def get_pgsql_database_connection(user,paswd,database,port_number,host):
psql_con = None
try:
psql_con = psycopg2.connect(database = database,\
user = user,\
password = paswd,\
port = port_number,\
host = host)
except psycopg2.DatabaseError, e:
print 'Error %s' % e
print 'Program is terminating ....'
sys.exit(1)
return psql_con
def get_pgsql_database_engine(ip_address,user,paswd,database,port_number):
## example of psql url => 'postgresql://jaksafe:password@localhost:5432/test_db')
psql_url = 'postgresql://' + user + ':' + paswd + '@' + ip_address + ':' + str(port_number) + '/' + database
print 'creating postgresql engine at => %s'%(psql_url)
pg_engine = create_engine(psql_url)
return pg_engine
## Defining the parser
global_conf_parser = ConfigParser.SafeConfigParser()
global_conf_parser.read(global_conf_file)
## Dims configuration
dims_url_base = global_conf_parser.get('dims_conf','url_dims')
## MySQL Database configuration
database_url_address = global_conf_parser.get('database_configuration','url_address')
user = global_conf_parser.get('database_configuration','user')
paswd = global_conf_parser.get('database_configuration','paswd')
database_name = global_conf_parser.get('database_configuration','database_name')
## PostgreSQL Database configuration
pgsql_address = global_conf_parser.get('psql_database_configuration','ip_address')
pgsql_user = global_conf_parser.get('psql_database_configuration','user')
pgsql_paswd = global_conf_parser.get('psql_database_configuration','passwd')
pgsql_database_name = global_conf_parser.get('psql_database_configuration','database_name')
pgsql_database_port = global_conf_parser.get('psql_database_configuration','port')
## Initialize QGIS installation path
qgis_install_path = global_conf_parser.get('qgis_conf','qgis_install_path')
## Initialize Open MySQL database connection
db_con = get_database_connection(database_url_address,user,paswd,database_name)
## Initialize Open Postgresql database connection with psyopg2
psql_db_con = get_pgsql_database_connection(pgsql_user,pgsql_paswd,pgsql_database_name,pgsql_database_port,pgsql_address)
## Initialize Create postgresql engine with SQLAlchemy
psql_engine = get_pgsql_database_engine(pgsql_address,pgsql_user,pgsql_paswd,pgsql_database_name,pgsql_database_port)
| gpl-2.0 | 3,715,504,238,457,003,000 | 38.114943 | 121 | 0.667058 | false |
ufal/ker | server.py | 1 | 9594 | #!/usr/bin/env python
import flask
from flask import Flask
from flask import request
from werkzeug import secure_filename
import os, random, datetime, codecs
import sys, json, magic
import cPickle as pickle
import regex as re
import keywords
import argparse
import xml.etree.ElementTree
import zipfile
app = Flask(__name__)
upload_dir = "uploads"
cs_tagger = None
cs_idf_doc_count = None
cs_idf_table = None
en_tagger = None
en_idf_doc_count = None
en_idf_table = None
@app.route('/')
def index():
return "{}\n"
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
def get_file(file_name):
try:
src = os.path.join(root_dir(), file_name)
return open(src).read()
except IOError as exc:
return str(exc)
@app.route('/web', methods=['GET'])
def show_web():
content = get_file("web.html")
print content
return flask.Response(content, mimetype="text/html")
@app.route('/demo', methods=['GET'])
def show_simple_demo():
content = get_file("web.html")
content = re.sub(r"\$\(\'#header", "//", content)
content = re.sub(r"\$\(\'#footer", "//", content)
return flask.Response(content, mimetype="text/html")
@app.route('/', methods=['POST'])
def post_request():
start_time = datetime.datetime.now()
if 'file' in request.files:
file = request.files['file']
else:
class _file_wrapper(object):
def __init__(self, data):
self._data = data
import uuid
self.filename = str(uuid.uuid4())
def save(self, path):
with codecs.open(path, mode="w+", encoding="utf-8") as fout:
fout.write(self._data)
file = _file_wrapper(request.form["data"])
tagger = cs_tagger
idf_doc_count = cs_idf_doc_count
idf_table = cs_idf_table
json_response = None
try:
post_id = datetime.datetime.now().strftime("%Y-%m-%d/%H/%M-%S-")+\
str(random.randint(10000, 99999))
post_dir = os.path.join(upload_dir, post_id)
os.makedirs(post_dir)
if request.args.get('language') == 'en':
tagger = en_tagger
idf_doc_count = en_idf_doc_count
idf_table = en_idf_table
elif request.args.get('language') == 'cs':
pass
elif request.args.get('language'):
raise Exception('Unsupported language {}'.format(request.args.get('language')))
if request.args.get('threshold'):
try:
threshold = float(request.args.get('threshold'))
except:
raise Exception("Threshold \"{}\" is not valid float.".format(request.args.get("threshold")))
else:
threshold = 0.2
if request.args.get("maximum-words"):
try:
maximum_words = int(request.args.get('maximum-words'))
except:
raise Exception("Maximum number of words \"{}\" is not an integer.".format(request.args.get("maximum-words")))
else:
maximum_words = 15
file_name = secure_filename(file.filename)
file_path = os.path.join(post_dir, file_name)
file.save(os.path.join(file_path))
data, code = \
process_file(file_path, tagger, idf_doc_count, idf_table, threshold, maximum_words)
except Exception as e:
code = 400
data = {"error": e.message}
finally:
json_response = json.dumps(data)
print json_response.encode('unicode-escape')
log = {}
log['remote_addr'] = request.remote_addr
log['response_json'] = data
log['response_code'] = code
log['time'] = start_time.strftime("%Y-%m-%d %H:%M:%S")
log['duration'] = (datetime.datetime.now() - start_time).total_seconds()
f_log = open(os.path.join(post_dir, "log.json"), 'w')
json.dump(log, f_log)
f_log.close()
response = flask.Response(json_response,
content_type='application/json; charset=utf-8')
response.headers.add('content-length', len(json_response.encode('utf-8')))
response.status_code = code
return response
def process_file(file_path, tagger, idf_doc_count, idf_table, threshold, maximum_words):
"""
Takes the uploaded file, detecs its type (plain text, alto XML, zip)
and calls a parsing function accordingly. If everything succeeds it
returns keywords and 200 code, returns an error otherwise.
"""
file_info = magic.from_file(file_path)
lines = []
if re.match("^UTF-8 Unicode (with BOM) text", file_info):
lines = lines_from_txt_file(file_path, encoding='utf-8-sig')
elif re.match("^UTF-8 Unicode", file_info):
lines = lines_from_txt_file(file_path, encoding='utf-8')
elif re.match("^ASCII text", file_info):
lines = lines_from_txt_file(file_path, encoding='utf-8')
elif re.match('^XML 1.0 document', file_info) and \
(file_path.endswith('.alto') or file_path.endswith('.xml')):
lines = lines_from_alto_file(file_path)
elif re.match('^Zip archive data', file_info):
lines = lines_from_zip_file(file_path)
else:
return {"eror": "Unsupported file type: {}".format(file_info)}, 400
if not lines:
return {"error": "Empty file"}, 400
return keywords.get_keywords(lines, tagger, idf_doc_count, idf_table, threshold, maximum_words), 200
def lines_from_txt_file(file_path, encoding='utf-8'):
"""
Loads lines of text from a plain text file.
:param file_path: Path to the alto file or a file-like object.
"""
if type(file_path) is str:
f = codecs.open(file_path, 'r', encoding)
else:
f = file_path
content = [l.strip() for l in f]
f.close()
return content
def lines_from_alto_file(file_path):
"""
Loads lines of text from a provided alto file.
:param file_path: Path to the alto file or a file-like object.
"""
e = xml.etree.ElementTree.parse(file_path).getroot()
layout = None
for c in e.getchildren():
if c.tag.endswith('Layout'):
layout = c
break
if layout is None:
raise Exception("XML is not ALTO file (does not contain layout object).")
for page in layout.getchildren():
if not page.tag.endswith("Page"):
continue
text_lines = layout.findall(".//{http://www.loc.gov/standards/alto/ns-v2#}TextLine")
for text_line in text_lines:
line_words = []
for string in text_line.getchildren():
if not string.tag.endswith('String'):
continue
line_words.append(string.attrib['CONTENT'])
yield " ".join(line_words)
def lines_from_zip_file(file_path):
"""
Loads lines of text from a provided zip file. If it contains alto file, it
uses them, otherwise looks for txt files. Files can in an arbitrary depth.
:param file_path: Path to the uploaded zip file.
:type file_path: str
"""
archive = zipfile.ZipFile(file_path)
alto_files = [n for n in archive.namelist() if n.endswith(".alto") or n.endswith(".xml")]
if alto_files:
for f_name in alto_files:
for line in lines_from_alto_file(archive.open(f_name)):
yield line
else:
txt_files = [n for n in archive.namelist() if n.endswith(".txt")]
if not txt_files:
raise Exception("Archive contains neither alto files nor text files.")
for f_name in txt_files:
for line in lines_from_txt_file(archive.open(f_name)):
yield line
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs the KER server.')
parser.add_argument("--cs-morphodita", help="Path to a Czech tagger model for Morphodita.", required=True)
parser.add_argument("--cs-idf", help="Czech idf model.", required=True)
parser.add_argument("--en-morphodita", help="Path to a English tagger model for Morphodita.", required=True)
parser.add_argument("--en-idf", help="English idf model.", required=True)
parser.add_argument("--port", help="Port the server runs on", type=int, default=5000)
parser.add_argument("--host", help="IP address the server will run at", type=str, default="127.0.0.1")
args = parser.parse_args()
if os.path.exists(args.cs_morphodita):
cs_tagger = keywords.Morphodita(args.cs_morphodita)
else:
print >> sys.stderr, "File with Czech Morphodita model does not exist: {}".format(args.cs_morphodita)
exit(1)
if os.path.exists(args.cs_idf):
f_idf = open(args.cs_idf, 'rb')
cs_idf_doc_count = float(pickle.load(f_idf))
cs_idf_table = pickle.load(f_idf)
f_idf.close()
else:
print >> sys.stderr, "File with Czech IDF model does not exist: {}".format(args.cs_idf)
exit(1)
if os.path.exists(args.en_morphodita):
en_tagger = keywords.Morphodita(args.en_morphodita)
else:
print >> sys.stderr, "File with English Morphodita model does not exist: {}".format(args.en_morphodita)
exit(1)
if os.path.exists(args.en_idf):
f_idf = open(args.en_idf, 'rb')
en_idf_doc_count = float(pickle.load(f_idf))
en_idf_table = pickle.load(f_idf)
f_idf.close()
else:
print >> sys.stderr, "File with English IDF model does not exist: {}".format(args.en_idf)
exit(1)
app.run(debug=True, host=args.host, port=args.port)
| lgpl-3.0 | 3,862,969,899,623,233,500 | 34.142857 | 126 | 0.603294 | false |
Juanlu001/CBC.Solve | cbc/swing/fsinewton/solver/boundary_conditions.py | 1 | 4016 | """Module containing implementation of monolithic FSI boundary conditions"""
__author__ = "Gabriel Balaban"
__copyright__ = "Copyright (C) 2010 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from dolfin import *
class FSIBC(object):
"""
Boundary Conditions class for Monolithic FSI
Arguments
problem
object of type pfsi.FsiNewtonTest
spaces
object of type FSISpaces
"""
def __init__(self,problem,spaces):
self.problem = problem
self.spaces = spaces
#Time dependant BC are set on the intial guess and at each time step.
self.bcallU1_ini = self.create_all_dirichlet_conditions("Initial guess")
#Newton Increment BC are homogeneous
self.bcallI = self.create_all_dirichlet_conditions("Newton Step")
[bc.homogenize() for bc in self.bcallI]
def create_all_dirichlet_conditions(self, bcsetname = ""):
info_blue("\nCreating Dirichlet Boundary Conditions " + bcsetname)
return self.create_fluid_bc() + self.create_structure_bc() + \
self.create_mesh_bc()
def create_bc(self,space,boundaries,values,bcname):
#If Boundaries specified without values assume homogeneous
if boundaries is not None and (values == [] or values is None):
dim = space.num_sub_spaces()
#A Function Space returns dim 0 but really has dim 1.
if dim == 0:
dim = 1
zeros = tuple(["0.0" for i in range(dim)])
values = [zeros for i in range(len(boundaries))]
#Try to generate the BC
bcs = []
## try:
for boundary,value in zip(boundaries,values):
if boundary == 'GammaFSI':
fsibounds = self.problem.interiorboundarynums["FSI_bound"]
interiormeshfunc = self.problem.meshfunctions["interiorfacet"]
for fsibound in fsibounds:
print fsibound
bcs += [DirichletBC(space, value,interiormeshfunc,fsibound)]
else:
bcs += [DirichletBC(space, value, boundary)]
info("Created bc %s"%bcname)
## except:
## info("No Dirichlet bc created for %s"%bcname)
return bcs
def create_fluid_bc(self):
bcv = self.create_bc(self.spaces.V_F,self.problem.fluid_velocity_dirichlet_boundaries(),\
self.problem.fluid_velocity_dirichlet_values(),"Fluid Velocity")
bcp = self.create_fluid_pressure_bc()
return bcv + bcp
def create_fluid_pressure_bc(self):
return self.create_bc(self.spaces.Q_F,self.problem.fluid_pressure_dirichlet_boundaries(),\
self.problem.fluid_pressure_dirichlet_values(),"Fluid Pressure")
def create_structure_bc(self):
bcU = self.create_bc(self.spaces.C_S,self.problem.structure_dirichlet_boundaries(),\
self.problem.structure_dirichlet_values(),"Structure Displacement")
bcP = self.create_bc(self.spaces.V_S,self.problem.structure_velocity_dirichlet_boundaries(),\
self.problem.structure_velocity_dirichlet_values(),"Structure Velocity")
return bcU + bcP
def create_mesh_bc(self):
#If no Mesh BC specified assume domain boundary and fixed"
if self.problem.mesh_dirichlet_boundaries() is None:
#The value will be set to zero in self.create_bc
return self.create_bc(self.self.spaces.C_F,["on_boundary"],None,"Mesh Displacement")
#Allow the user to explicitly create no mesh bc whatsoever.
elif self.problem.mesh_dirichlet_boundaries() == "NoBC":
return []
else:
return self.create_bc(self.spaces.C_F,self.problem.mesh_dirichlet_boundaries(),\
self.problem.mesh_dirichlet_values(),"Mesh Displacement")
| gpl-3.0 | -6,300,717,971,602,277,000 | 43.131868 | 101 | 0.608317 | false |
timopulkkinen/BubbleFish | tools/telemetry/telemetry/core/chrome/win_platform_backend.py | 1 | 3723 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes
import subprocess
try:
import win32api # pylint: disable=F0401
import win32con # pylint: disable=F0401
import win32process # pylint: disable=F0401
except ImportError:
win32api = None
win32con = None
win32process = None
from telemetry.core.chrome import platform_backend
class WinPlatformBackend(platform_backend.PlatformBackend):
def _GetProcessHandle(self, pid):
mask = (win32con.PROCESS_QUERY_INFORMATION |
win32con.PROCESS_VM_READ)
return win32api.OpenProcess(mask, False, pid)
# pylint: disable=W0613
def StartRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def StopRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def GetRawDisplayFrameRateMeasurements(self):
raise NotImplementedError()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
class PerformanceInfo(ctypes.Structure):
"""Struct for GetPerformanceInfo() call
http://msdn.microsoft.com/en-us/library/ms683210
"""
_fields_ = [('size', ctypes.c_ulong),
('CommitTotal', ctypes.c_size_t),
('CommitLimit', ctypes.c_size_t),
('CommitPeak', ctypes.c_size_t),
('PhysicalTotal', ctypes.c_size_t),
('PhysicalAvailable', ctypes.c_size_t),
('SystemCache', ctypes.c_size_t),
('KernelTotal', ctypes.c_size_t),
('KernelPaged', ctypes.c_size_t),
('KernelNonpaged', ctypes.c_size_t),
('PageSize', ctypes.c_size_t),
('HandleCount', ctypes.c_ulong),
('ProcessCount', ctypes.c_ulong),
('ThreadCount', ctypes.c_ulong)]
def __init__(self):
self.size = ctypes.sizeof(self)
super(PerformanceInfo, self).__init__()
performance_info = PerformanceInfo()
ctypes.windll.psapi.GetPerformanceInfo(
ctypes.byref(performance_info), performance_info.size)
return performance_info.CommitTotal * performance_info.PageSize / 1024
def GetMemoryStats(self, pid):
memory_info = win32process.GetProcessMemoryInfo(
self._GetProcessHandle(pid))
return {'VM': memory_info['PagefileUsage'],
'VMPeak': memory_info['PeakPagefileUsage'],
'WorkingSetSize': memory_info['WorkingSetSize'],
'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
def GetIOStats(self, pid):
io_stats = win32process.GetProcessIoCounters(
self._GetProcessHandle(pid))
return {'ReadOperationCount': io_stats['ReadOperationCount'],
'WriteOperationCount': io_stats['WriteOperationCount'],
'ReadTransferCount': io_stats['ReadTransferCount'],
'WriteTransferCount': io_stats['WriteTransferCount']}
def GetChildPids(self, pid):
"""Retunds a list of child pids of |pid|."""
child_pids = []
pid_ppid_list = subprocess.Popen(['wmic', 'process', 'get',
'ParentProcessId,ProcessId'],
stdout=subprocess.PIPE).communicate()[0]
for pid_ppid in pid_ppid_list.splitlines()[1:]: #skip header
if not pid_ppid:
continue
curr_ppid, curr_pid = pid_ppid.split()
if int(curr_ppid) == pid:
child_pids.append(int(curr_pid))
child_pids.extend(self.GetChildPids(int(curr_pid)))
return child_pids
| bsd-3-clause | 2,244,057,969,717,524,000 | 36.606061 | 77 | 0.640612 | false |
Jajcus/pyxmpp2 | pyxmpp2/mainloop/wait.py | 1 | 2125 | #
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# pylint: disable-msg=W0201
"""Utility functions to wait until a socket (or object implementing .fileno()
in POSIX) is ready for input or output."""
from __future__ import absolute_import, division
__docformat__ = "restructuredtext en"
import select
if hasattr(select, "poll"):
def wait_for_read(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for reading.
"""
if timeout is not None:
timeout *= 1000
poll = select.poll()
poll.register(socket, select.POLLIN)
events = poll.poll(timeout)
return bool(events)
def wait_for_write(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for writing.
"""
if timeout is not None:
timeout *= 1000
poll = select.poll()
poll.register(socket, select.POLLOUT)
events = poll.poll(timeout)
return bool(events)
else:
def wait_for_read(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for reading.
"""
readable = select.select([socket], [], [], timeout)[0]
return bool(readable)
def wait_for_write(socket, timeout = None):
"""Wait up to `timeout` seconds until `socket` is ready for writing.
"""
writable = select.select([], [socket], [], timeout)[1]
return bool(writable)
| lgpl-2.1 | -7,421,785,645,248,673,000 | 35.637931 | 77 | 0.660235 | false |
kamitchell/py2app | py2app/filters.py | 1 | 1048 | from pkg_resources import require
require("macholib")
import os
import sys
from macholib.util import has_filename_filter, in_system_path
def not_stdlib_filter(module, prefix=None):
"""
Return False if the module is located in the standard library
"""
if prefix is None:
prefix = sys.prefix
prefix = os.path.join(os.path.realpath(prefix), '')
rp = os.path.realpath(module.filename)
if rp.startswith(prefix):
rest = rp[len(prefix):]
if '/site-python/' in rest:
return True
elif '/site-packages/' in rest:
return True
else:
return False
return True
def not_system_filter(module):
"""
Return False if the module is located in a system directory
"""
return not in_system_path(module.filename)
def bundle_or_dylib_filter(module):
"""
Return False if the module does not have a filetype attribute
corresponding to a Mach-O bundle or dylib
"""
return getattr(module, 'filetype', None) in ('bundle', 'dylib')
| mit | 183,278,461,600,789,730 | 27.324324 | 67 | 0.645992 | false |
schieb/angr | tests/test_baseptr_save_simplifier.py | 1 | 2862 |
import os.path
import nose.tools
import angr
import ailment
from angr.analyses.decompiler.optimization_passes.base_ptr_save_simplifier import BasePointerSaveSimplifier
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def _get_block(clinic, addr):
for (block_addr, _), block in clinic._blocks.items():
if block_addr == addr:
return block
return None
def test_baseptr_save_simplifier_amd64():
# decompile all:main and make sure the first and the last blocks do not save or restore to rbp
bin_path = os.path.join(test_location, "x86_64", "all")
proj = angr.Project(bin_path, auto_load_libs=False, load_debug_info=True)
cfg = proj.analyses.CFG(data_references=True, normalize=True)
optimization_passes = [ BasePointerSaveSimplifier ]
main_func = cfg.functions['main']
dec = proj.analyses.Decompiler(main_func, cfg=cfg, optimization_passes=optimization_passes)
entry_block = _get_block(dec.clinic, main_func.addr)
endpoint_block = _get_block(dec.clinic, next(iter(main_func.endpoints)).addr)
nose.tools.assert_is_not_none(entry_block)
nose.tools.assert_is_not_none(endpoint_block)
for stmt in entry_block.statements:
if isinstance(stmt, ailment.Stmt.Store) \
and isinstance(stmt.data, ailment.Expr.StackBaseOffset):
assert False, "Found a base-pointer saving statement in the first block."
for stmt in endpoint_block.statements:
if isinstance(stmt, ailment.Stmt.Assignment) \
and isinstance(stmt.dst, ailment.Expr.Register) \
and stmt.dst.reg_offset == proj.arch.bp_offset:
assert False, "Found a base-pointer restoring statement in the last block."
def check_bp_save_fauxware(arch):
p = angr.Project(os.path.join(test_location, arch, 'fauxware'), auto_load_libs=False)
cfg = p.analyses.CFGFast(normalize=True)
main = p.kb.functions['main']
optimization_passes = [ BasePointerSaveSimplifier ]
dra = p.analyses.Decompiler(main, cfg=cfg, optimization_passes=optimization_passes)
first_block_stmts = dra.codegen._sequence.nodes[0].nodes[0].statements
for stmt in first_block_stmts:
if isinstance(stmt, ailment.Stmt.Store):
nose.tools.assert_false(
(isinstance(stmt.data, ailment.Expr.Register)
and stmt.data.reg_offset == p.arch.bp_offset)
or (isinstance(stmt.data, ailment.Expr.StackBaseOffset)
and stmt.data.offset == 0))
def test_bp_save_amd64_fauxware():
check_bp_save_fauxware('x86_64')
def test_bp_save_armel_fauxware():
check_bp_save_fauxware('armel')
if __name__ == "__main__":
test_baseptr_save_simplifier_amd64()
test_bp_save_amd64_fauxware()
test_bp_save_armel_fauxware()
| bsd-2-clause | -5,038,531,602,819,677,000 | 35.692308 | 107 | 0.676799 | false |
tuukka/sonata-svn-test | sonata/plugins/test.py | 1 | 2113 |
# this is the magic interpreted by Sonata, referring to on_enable etc. below:
### BEGIN PLUGIN INFO
# [plugin]
# plugin_format: 0, 0
# name: Test plugin
# version: 0, 0, 1
# description: A simple test plugin.
# author: Tuukka Hastrup
# author_email: [email protected]
# url: http://sonata.berlios.de
# license: GPL v3 or later
# [capabilities]
# enablables: on_enable
# tabs: construct_tab
# playing_song_observers: on_song_change
# lyrics_fetching: on_lyrics_fetch
### END PLUGIN INFO
# nothing magical from here on
import gobject, gtk, pango
from sonata.misc import escape_html
songlabel = None
lyricslabel = None
# this gets called when the plugin is loaded, enabled, or disabled:
def on_enable(state):
global songlabel, lyricslabel
if state:
songlabel = gtk.Label("No song info received yet.")
songlabel.props.ellipsize = pango.ELLIPSIZE_END
lyricslabel = gtk.Label("No lyrics requests yet.")
lyricslabel.props.ellipsize = pango.ELLIPSIZE_END
else:
songlabel = None
lyricslabel = None
# this constructs the parts of the tab when called:
def construct_tab():
vbox = gtk.VBox()
vbox.pack_start(gtk.Label("Hello world!"))
vbox.pack_start(songlabel)
vbox.pack_start(lyricslabel)
vbox.pack_start(gtk.Label("(You can modify me at %s)" %
__file__.rstrip("c")))
vbox.show_all()
# the return value goes off to Base.new_tab(page, stock, text, focus):
# (tab content, icon name, tab name, the widget to focus on tab switch)
return (vbox, None, "Test plugin", None)
# this gets called when a new song is playing:
def on_song_change(songinfo):
if songinfo:
songlabel.set_markup("<b>Info for currently playing song:</b>"+
"\n%s" % escape_html(repr(songinfo)))
else:
songlabel.set_text("Currently not playing any song.")
songlabel.show()
# this gets requests for lyrics:
def on_lyrics_fetch(callback, artist, title):
lyricslabel.set_markup(
"Got request for lyrics for artist %r title %r." %
(artist, title))
# callback(lyrics, error)
gobject.timeout_add(0, callback, None,
"%s doesn't have lyrics for %r." %
(__name__, (artist, title)))
| gpl-3.0 | 4,461,166,768,552,095,000 | 27.554054 | 77 | 0.705632 | false |
datacommonsorg/data | scripts/us_bjs/nps/preprocess_data.py | 1 | 19122 | import pandas as pd
from absl import flags
from absl import app
FLAGS = flags.FLAGS
flags.DEFINE_string('preprocess_file',
'NPS_1978-2018_Data.tsv',
'file path to tsv file with data to proess',
short_name='p')
def convert_nan_for_calculation(value):
if pd.isna(value):
return 0
else:
return value
def total_jurisdiction_columns_helper(df):
"""calculation to include private facility numbers"""
df["PVINF_Temp"] = df["PVINF"].apply(convert_nan_for_calculation)
df["PVOTHF_Temp"] = df["PVOTHF"].apply(convert_nan_for_calculation)
df["PVINM_Temp"] = df["PVINM"].apply(convert_nan_for_calculation)
df["PVOTHM_Temp"] = df["PVOTHM"].apply(convert_nan_for_calculation)
df["Female_Total_Temp"] = df[["JURTOTF", "PVINF_Temp", "PVOTHF_Temp"
]].sum(axis=1).where(df["PVINCLF"] == 2,
df["JURTOTF"])
df["Male_Total_Temp"] = df[["JURTOTM", "PVINM_Temp", "PVOTHM_Temp"
]].sum(axis=1).where(df["PVINCLM"] == 2,
df["JURTOTM"])
"""calculation to include local facility numbers"""
df["LFF_Temp"] = df["LFF"].apply(convert_nan_for_calculation)
df["LFM_Temp"] = df["LFM"].apply(convert_nan_for_calculation)
df["Female_Total_Temp"] = df[["Female_Total_Temp", "LFF_Temp"
]].sum(axis=1).where(df["LFINCLF"] == 2,
df["Female_Total_Temp"])
df["Male_Total_Temp"] = df[["Male_Total_Temp", "LFM_Temp"
]].sum(axis=1).where(df["LFINCLM"] == 2,
df["Male_Total_Temp"])
"""calculation to include numbers from local facilities solely to ease crowding"""
df["LFCRSTF_Temp"] = df["LFCRSTF"].apply(convert_nan_for_calculation)
df["LFCRSTM_Temp"] = df["LFCRSTM"].apply(convert_nan_for_calculation)
df["Female_Total_Temp"] = df[["Female_Total_Temp", "LFCRSTF_Temp"
]].sum(axis=1).where(df["LFCRINCF"] == 2,
df["Female_Total_Temp"])
df["Male_Total_Temp"] = df[["Male_Total_Temp", "LFCRSTM_Temp"
]].sum(axis=1).where(df["LFCRINCM"] == 2,
df["Male_Total_Temp"])
"""calculation to include federal and other state facility numbers"""
df["FEDF_Temp"] = df["FEDF"].apply(convert_nan_for_calculation)
df["OTHSTF_Temp"] = df["OTHSTF"].apply(convert_nan_for_calculation)
df["FEDM_Temp"] = df["FEDM"].apply(convert_nan_for_calculation)
df["OTHSTM_Temp"] = df["OTHSTM"].apply(convert_nan_for_calculation)
df["Female_Total_Temp"] = df[[
"Female_Total_Temp", "FEDF_Temp", "OTHSTF_Temp"
]].sum(axis=1).where(df["FACINCLF"] == 2, df["Female_Total_Temp"])
df["Male_Total_Temp"] = df[["Male_Total_Temp", "FEDM_Temp", "OTHSTM_Temp"
]].sum(axis=1).where(df["FACINCLM"] == 2,
df["Male_Total_Temp"])
def get_columns(df):
df_out = {}
total_jurisdiction_columns_helper(df)
df_out["GeoId"] = df["GeoId"]
df_out["YEAR"] = df["YEAR"]
df_out["Count_Person_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"Female_Total_Temp"]
df_out[
"Count_Person_Female_Incarcerated_WhiteAlone_MeasuredBasedOnJurisdiction"] = df[
"WHITEF"]
df_out[
"Count_Person_BlackOrAfricanAmericanAlone_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"BLACKF"]
df_out[
"Count_Person_Female_HispanicOrLatino_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"HISPF"]
df_out[
"Count_Person_AmericanIndianOrAlaskaNativeAlone_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"AIANF"]
df_out[
"Count_Person_AsianAlone_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"ASIANF"]
df_out[
"Count_Person_Female_Incarcerated_NativeHawaiianOrOtherPacificIslanderAlone_MeasuredBasedOnJurisdiction"] = df[
"NHPIF"]
df_out[
"Count_Person_Female_Incarcerated_TwoOrMoreRaces_MeasuredBasedOnJurisdiction"] = df[
"TWORACEF"]
df_out[
"Count_MortalityEvent_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHTOTF"]
df_out[
"Count_MortalityEvent_Female_Incarcerated_JudicialExecution_MeasuredBasedOnJurisdiction"] = df[
"DTHEXECF"]
df_out[
"Count_MortalityEvent_Female_IllnessOrNaturalCause_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHILLNF"]
df_out[
"Count_MortalityEvent_AIDS_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHAIDSF"]
df_out[
"Count_MortalityEvent_Female_Incarcerated_IntentionalSelf-Harm(Suicide)_MeasuredBasedOnJurisdiction"] = df[
"DTHSUICF"]
df_out[
"Count_MortalityEvent_Accidents(UnintentionalInjuries)_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHACCF"]
df_out[
"Count_MortalityEvent_DeathDueToAnotherPerson_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHPERSF"]
df_out[
"Count_MortalityEvent_Assault(Homicide)_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHHOMIF"]
df_out[
"Count_MortalityEvent_Female_Incarcerated_NPSOtherCauseOfDeath_MeasuredBasedOnJurisdiction"] = df[
"DTHOTHF"]
df_out[
"Count_IncarcerationEvent_AdmittedToPrison_Female_Incarcerated_MaxSentenceGreaterThan1Year_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"ADTOTF"]
df_out[
"Count_IncarcerationEvent_Female_Incarcerated_MaxSentenceGreaterThan1Year_ReleasedFromPrison_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"RLTOTF"]
df_out[
"Count_Person_Female_Incarcerated_MaxSentenceGreaterThan1Year_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"JURGT1F"]
df_out[
"Count_Person_Female_Incarcerated_MaxSentence1YearOrLess_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"JURLT1F"]
df_out[
"Count_Person_Female_Incarcerated_Unsentenced_MeasuredBasedOnJurisdiction"] = df[
"JURUNSF"]
df_out[
"Count_Person_Female_Incarcerated_InState_PrivatelyOperated_MeasuredBasedOnJurisdiction"] = df[
"PVINF"]
df_out[
"Count_Person_Female_Incarcerated_OutOfState_PrivatelyOperated_MeasuredBasedOnJurisdiction"] = df[
"PVOTHF"]
df_out[
"Count_Person_Female_Incarcerated_Local_LocallyOperated_MeasuredBasedOnJurisdiction"] = df[
"LFF"]
df_out[
"Count_Person_FederallyOperated_Female_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"FEDF"]
df_out[
"Count_Person_Female_Incarcerated_OutOfState_StateOperated_MeasuredBasedOnJurisdiction"] = df[
"OTHSTF"]
df_out[
"Count_Person_Female_Incarcerated_NotAUSCitizen_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZTOTF"]
df_out[
"Count_Person_Female_Incarcerated_MaxSentenceGreaterThan1Year_NotAUSCitizen_Sentenced_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZGT1F"]
df_out[
"Count_Person_Female_Incarcerated_MaxSentence1YearOrLess_NotAUSCitizen_Sentenced_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZLE1F"]
df_out[
"Count_Person_Female_Incarcerated_NotAUSCitizen_StateOperated&FederallyOperated&PrivatelyOperated_Unsentenced_MeasuredBasedOnCustody"] = df[
"NCITZUNSF"]
df_out[
"Count_Person_Female_Incarcerated_Under18_MeasuredBasedOnCustody"] = df[
"CUSLT18F"]
df_out["Count_Person_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"Male_Total_Temp"]
df_out[
"Count_Person_Incarcerated_Male_WhiteAlone_MeasuredBasedOnJurisdiction"] = df[
"WHITEM"]
df_out[
"Count_Person_BlackOrAfricanAmericanAlone_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"BLACKM"]
df_out[
"Count_Person_HispanicOrLatino_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"HISPM"]
df_out[
"Count_Person_AmericanIndianOrAlaskaNativeAlone_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"AIANM"]
df_out[
"Count_Person_AsianAlone_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"ASIANM"]
df_out[
"Count_Person_Incarcerated_Male_NativeHawaiianOrOtherPacificIslanderAlone_MeasuredBasedOnJurisdiction"] = df[
"NHPIM"]
df_out[
"Count_Person_Incarcerated_Male_TwoOrMoreRaces_MeasuredBasedOnJurisdiction"] = df[
"TWORACEM"]
df_out[
"Count_MortalityEvent_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"DTHTOTM"]
df_out[
"Count_MortalityEvent_Incarcerated_JudicialExecution_Male_MeasuredBasedOnJurisdiction"] = df[
"DTHEXECM"]
df_out[
"Count_MortalityEvent_IllnessOrNaturalCause_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"DTHILLNM"]
df_out[
"Count_MortalityEvent_AIDS_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"DTHAIDSM"]
df_out[
"Count_MortalityEvent_Incarcerated_IntentionalSelf-Harm(Suicide)_Male_MeasuredBasedOnJurisdiction"] = df[
"DTHSUICM"]
df_out[
"Count_MortalityEvent_Accidents(UnintentionalInjuries)_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"DTHACCM"]
df_out[
"Count_MortalityEvent_DeathDueToAnotherPerson_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"DTHPERSM"]
df_out[
"Count_MortalityEvent_Assault(Homicide)_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"DTHHOMIM"]
df_out[
"Count_MortalityEvent_Incarcerated_Male_NPSOtherCauseOfDeath_MeasuredBasedOnJurisdiction"] = df[
"DTHOTHM"]
df_out[
"Count_IncarcerationEvent_AdmittedToPrison_Incarcerated_Male_MaxSentenceGreaterThan1Year_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"ADTOTM"]
df_out[
"Count_IncarcerationEvent_Incarcerated_Male_MaxSentenceGreaterThan1Year_ReleasedFromPrison_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"RLTOTM"]
df_out[
"Count_Person_Incarcerated_Male_MaxSentenceGreaterThan1Year_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"JURGT1M"]
df_out[
"Count_Person_Incarcerated_Male_MaxSentence1YearOrLess_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"JURLT1M"]
df_out[
"Count_Person_Incarcerated_Male_Unsentenced_MeasuredBasedOnJurisdiction"] = df[
"JURUNSM"]
df_out[
"Count_Person_Incarcerated_InState_Male_PrivatelyOperated_MeasuredBasedOnJurisdiction"] = df[
"PVINM"]
df_out[
"Count_Person_Incarcerated_Male_OutOfState_PrivatelyOperated_MeasuredBasedOnJurisdiction"] = df[
"PVOTHM"]
df_out[
"Count_Person_Incarcerated_Local_LocallyOperated_Male_MeasuredBasedOnJurisdiction"] = df[
"LFM"]
df_out[
"Count_Person_FederallyOperated_Incarcerated_Male_MeasuredBasedOnJurisdiction"] = df[
"FEDM"]
df_out[
"Count_Person_Incarcerated_Male_OutOfState_StateOperated_MeasuredBasedOnJurisdiction"] = df[
"OTHSTM"]
df_out[
"Count_Person_Incarcerated_Male_NotAUSCitizen_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZTOTM"]
df_out[
"Count_Person_Incarcerated_Male_MaxSentenceGreaterThan1Year_NotAUSCitizen_Sentenced_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZGT1M"]
df_out[
"Count_Person_Incarcerated_Male_MaxSentence1YearOrLess_NotAUSCitizen_Sentenced_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZLE1M"]
df_out[
"Count_Person_Incarcerated_Male_NotAUSCitizen_StateOperated&FederallyOperated&PrivatelyOperated_Unsentenced_MeasuredBasedOnCustody"] = df[
"NCITZUNSM"]
df_out[
"Count_Person_Incarcerated_Male_Under18_MeasuredBasedOnCustody"] = df[
"CUSLT18M"]
df_out["Count_Person_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"Female_Total_Temp"] + df["Male_Total_Temp"]
df_out[
"Count_Person_Incarcerated_WhiteAlone_MeasuredBasedOnJurisdiction"] = df[
"WHITEF"] + df["WHITEM"]
df_out[
"Count_Person_BlackOrAfricanAmericanAlone_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"BLACKF"] + df["BLACKM"]
df_out[
"Count_Person_HispanicOrLatino_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"HISPF"] + df["HISPM"]
df_out[
"Count_Person_AmericanIndianOrAlaskaNativeAlone_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"AIANF"] + df["AIANM"]
df_out[
"Count_Person_AsianAlone_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"ASIANF"] + df["ASIANM"]
df_out[
"Count_Person_Incarcerated_NativeHawaiianOrOtherPacificIslanderAlone_MeasuredBasedOnJurisdiction"] = df[
"NHPIF"] + df["NHPIM"]
df_out[
"Count_Person_Incarcerated_TwoOrMoreRaces_MeasuredBasedOnJurisdiction"] = df[
"TWORACEF"] + df["TWORACEM"]
df_out[
"Count_MortalityEvent_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHTOTF"] + df["DTHTOTM"]
df_out[
"Count_MortalityEvent_Incarcerated_JudicialExecution_MeasuredBasedOnJurisdiction"] = df[
"DTHEXECF"] + df["DTHEXECM"]
df_out[
"Count_MortalityEvent_IllnessOrNaturalCause_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHILLNF"] + df["DTHILLNM"]
df_out[
"Count_MortalityEvent_AIDS_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHAIDSF"] + df["DTHAIDSM"]
df_out[
"Count_MortalityEvent_Incarcerated_IntentionalSelf-Harm(Suicide)_MeasuredBasedOnJurisdiction"] = df[
"DTHSUICF"] + df["DTHSUICM"]
df_out[
"Count_MortalityEvent_Accidents(UnintentionalInjuries)_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHACCF"] + df["DTHACCM"]
df_out[
"Count_MortalityEvent_DeathDueToAnotherPerson_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHPERSF"] + df["DTHPERSM"]
df_out[
"Count_MortalityEvent_Assault(Homicide)_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"DTHHOMIF"] + df["DTHHOMIM"]
df_out[
"Count_MortalityEvent_Incarcerated_NPSOtherCauseOfDeath_MeasuredBasedOnJurisdiction"] = df[
"DTHOTHF"] + df["DTHOTHM"]
df_out[
"Count_IncarcerationEvent_AdmittedToPrison_Incarcerated_MaxSentenceGreaterThan1Year_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"ADTOTF"] + df["ADTOTM"]
df_out[
"Count_IncarcerationEvent_Incarcerated_MaxSentenceGreaterThan1Year_ReleasedFromPrison_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"RLTOTF"] + df["RLTOTM"]
df_out[
"Count_Person_Incarcerated_MaxSentenceGreaterThan1Year_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"JURGT1F"] + df["JURGT1M"]
df_out[
"Count_Person_Incarcerated_MaxSentence1YearOrLess_Sentenced_MeasuredBasedOnJurisdiction"] = df[
"JURLT1F"] + df["JURLT1M"]
df_out[
"Count_Person_Incarcerated_Unsentenced_MeasuredBasedOnJurisdiction"] = df[
"JURUNSF"] + df["JURUNSM"]
df_out[
"Count_Person_Incarcerated_InState_PrivatelyOperated_MeasuredBasedOnJurisdiction"] = df[
"PVINF"] + df["PVINM"]
df_out[
"Count_Person_Incarcerated_OutOfState_PrivatelyOperated_MeasuredBasedOnJurisdiction"] = df[
"PVOTHF"] + df["PVOTHM"]
df_out[
"Count_Person_Incarcerated_Local_LocallyOperated_MeasuredBasedOnJurisdiction"] = df[
"LFF"] + df["LFM"]
df_out[
"Count_Person_FederallyOperated_Incarcerated_MeasuredBasedOnJurisdiction"] = df[
"FEDF"] + df["FEDM"]
df_out[
"Count_Person_Incarcerated_OutOfState_StateOperated_MeasuredBasedOnJurisdiction"] = df[
"OTHSTF"] + df["OTHSTM"]
df_out[
"Count_Person_Incarcerated_NotAUSCitizen_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZTOTF"] + df["NCITZTOTM"]
df_out[
"Count_Person_Incarcerated_MaxSentenceGreaterThan1Year_NotAUSCitizen_Sentenced_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZGT1F"] + df["NCITZGT1M"]
df_out[
"Count_Person_Incarcerated_MaxSentence1YearOrLess_NotAUSCitizen_Sentenced_StateOperated&FederallyOperated&PrivatelyOperated_MeasuredBasedOnCustody"] = df[
"NCITZLE1F"] + df["NCITZLE1M"]
df_out[
"Count_Person_Incarcerated_NotAUSCitizen_StateOperated&FederallyOperated&PrivatelyOperated_Unsentenced_MeasuredBasedOnCustody"] = df[
"NCITZUNSF"] + df["NCITZUNSM"]
df_out["Count_Person_Incarcerated_Under18_MeasuredBasedOnCustody"] = df[
"CUSLT18F"] + df["CUSLT18M"]
return df_out
def convert_geoId(fips_code):
"""Creates geoId column"""
return 'geoId/' + str(fips_code).zfill(2)
def convert_missing_value_to_nan(value):
"""codes for missing values are always negative and actual data is always >= 0"""
if isinstance(value, int) and value < 0:
return float("nan")
else:
return value
def convert_nan_to_empty_cell(value):
if pd.isna(value):
return ''
else:
return value
def preprocess_df(raw_df):
"""cleans raw_df
Args:
raw_data: raw data frame to be used as starting point for cleaning
"""
df = raw_df.copy()
df['GeoId'] = df['STATEID'].apply(convert_geoId)
# convert missing values to NaN for aggregation
for column_name in list(df.columns):
df[column_name] = df[column_name].apply(convert_missing_value_to_nan)
#get columns matching stat var names and add aggregate columns
df_out = pd.DataFrame(get_columns(df))
#convert NaN to empty cell
for column_name in list(df_out.columns):
df_out[column_name] = df_out[column_name].apply(
convert_nan_to_empty_cell)
return df_out
def main(args):
filename = FLAGS.preprocess_file
print('Processing {0}'.format(filename))
df = pd.read_csv(filename, delimiter='\t')
processed_df = preprocess_df(df)
processed_df.to_csv(filename.replace('.tsv', '_processed.csv'), index=False)
print('Done processing {0}'.format(filename))
if __name__ == '__main__':
app.run(main)
| apache-2.0 | 8,942,910,421,515,007,000 | 45.867647 | 174 | 0.653488 | false |
seecr/meresco-distributed | meresco/distributed/failover/_conditionmet.py | 1 | 2572 | ## begin license ##
#
# "Meresco Distributed" has components for group management based on "Meresco Components."
#
# Copyright (C) 2018, 2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2021 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2021 SURF https://www.surf.nl
# Copyright (C) 2021 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Distributed"
#
# "Meresco Distributed" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Distributed" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Distributed"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from weightless.core import NoneOfTheObserversRespond, DeclineMessage
from meresco.core import Observable
class ConditionMet(Observable):
def __init__(self, condition, **kwargs):
Observable.__init__(self, **kwargs)
self._conditionMet = False
self._condition_test = condition
def updateConfig(self, **kwargs):
yield self.all.updateConfig(**kwargs)
self._conditionMet = self._condition_test(**kwargs)
def all_unknown(self, message, *args, **kwargs):
if self._conditionMet:
yield self.all.unknown(message, *args, **kwargs)
def any_unknown(self, message, *args, **kwargs):
if self._conditionMet:
try:
response = yield self.any.unknown(message, *args, **kwargs)
return response
except NoneOfTheObserversRespond:
pass
raise DeclineMessage
def do_unknown(self, message, *args, **kwargs):
if self._conditionMet:
self.do.unknown(message, *args, **kwargs)
def call_unknown(self, message, *args, **kwargs):
if self._conditionMet:
try:
return self.call.unknown(message, *args, **kwargs)
except NoneOfTheObserversRespond:
pass
raise DeclineMessage
| gpl-2.0 | 5,258,369,946,063,491,000 | 37.969697 | 92 | 0.680016 | false |
scotwk/cloud-custodian | tools/zerodark/zerodark/ipdb.py | 1 | 24394 | # Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Kapil Thangavelu <[email protected]>"
import boto3
import click
from c7n.credentials import SessionFactory
from c7n.sqsexec import MessageIterator
from collections import Counter
from concurrent.futures import ProcessPoolExecutor, as_completed
from datetime import timedelta
from dateutil.parser import parse as date_parse
import gzip
import json
import logging
import multiprocessing
import os
import sqlite3
import time
import yaml
from .constants import RESOURCE_KEY, REGION_KEY
from .metrics import Resource
from .utils import human_size, unwrap, get_dates
log = logging.getLogger('zerodark.ipdb')
APP_TAG = os.environ.get('APP_TAG', 'app')
ENV_TAG = os.environ.get('ENV_TAG', 'env')
CONTACT_TAG = os.environ.get('CONTACT_TAG', 'contact')
def download_config(
client, bucket, prefix, account_id, region, day, store, rtypes=()):
config_prefix = "%sAWSLogs/%s/Config/%s/%s/ConfigHistory/" % (
prefix,
account_id,
region,
day.strftime('%Y/%-m/%-d'))
results = client.list_objects_v2(
Bucket=bucket,
Prefix=config_prefix)
if not os.path.exists(store):
os.makedirs(store)
files = []
downloads = Counter()
for k in results.get('Contents', ()):
found = False
for rt in rtypes:
if rt in k['Key']:
found = True
if not found:
continue
fname = k['Key'].rsplit('/', 1)[-1]
fpath = os.path.join(store, fname)
files.append(fpath)
if os.path.exists(fpath):
downloads['Cached'] += 1
downloads['CacheSize'] += k['Size']
continue
downloads['Downloads'] += 1
downloads['DownloadSize'] += k['Size']
client.download_file(bucket, k['Key'], fpath)
log.debug(
"Downloaded:%d Size:%d Cached:%d Size:%s Prefix:%s",
downloads['Downloads'],
downloads['DownloadSize'],
downloads['Cached'],
downloads['CacheSize'],
config_prefix)
return files, downloads
def process_account_resources(
account_id, bucket, prefix, region,
store, start, end, resource='NetworkInterface'):
client = boto3.client('s3')
files = []
t = time.time()
period_stats = Counter()
period = (end - start).days
resource = RESOURCE_MAPPING[resource]
for i in range(period):
day = start + timedelta(i)
d_files, stats = download_config(
client, bucket, prefix, account_id, region, day, store,
rtypes=(resource,))
files.extend(d_files)
period_stats.update(stats)
period_stats['FetchTime'] = int(time.time() - t)
return files, period_stats
def resource_info(eni_cfg):
desc = eni_cfg.get('description')
instance_id = eni_cfg['attachment'].get('instanceId', '')
if instance_id:
rtype = RESOURCE_KEY['ec2']
rid = instance_id
elif desc.startswith('ELB app/'):
rtype = RESOURCE_KEY["alb"]
rid = desc.split('/')[1]
elif desc.startswith('ELB net/'):
rtype = RESOURCE_KEY["nlb"]
rid = desc.split('/')[1]
elif desc.startswith('ELB '):
rtype = RESOURCE_KEY['elb']
rid = desc.split(' ', 1)[1]
elif desc.startswith('AWS ElasticMapReduce'):
rtype = RESOURCE_KEY['emr']
rid = desc.rsplit(' ', 1)[1]
elif desc.startswith('AWS created network interface for directory'):
rtype = RESOURCE_KEY['dir']
rid = desc.rsplit(' ', 1)[1]
elif desc.startswith('AWS Lambda VPC ENI:'):
rtype = RESOURCE_KEY['lambda']
rid = eni_cfg['requesterId'].split(':', 1)[1]
elif desc == 'RDSNetworkInterface':
rtype = RESOURCE_KEY['rds']
rid = ''
elif desc == 'RedshiftNetworkInterface':
rtype = RESOURCE_KEY['redshift']
rid = ''
elif desc.startswith('ElastiCache '):
rtype = RESOURCE_KEY['elasticache']
rid = desc.split(' ', 1)[1]
elif desc.startswith('ElastiCache+'):
rtype = RESOURCE_KEY['elasticache']
rid = desc.split('+', 1)[1]
elif desc.startswith('Interface for NAT Gateway '):
rtype = RESOURCE_KEY['nat']
rid = desc.rsplit(' ', 1)[1]
elif desc.startswith('EFS mount target'):
rtype = RESOURCE_KEY['efs-mount']
fsid, fsmd = desc.rsplit(' ', 2)[1:]
rid = "%s:%s" % (fsid, fsmd[1:-1])
elif desc.startswith('CloudHSM Managed Interface'):
rtype = RESOURCE_KEY['hsm']
rid = ''
elif desc.startswith('CloudHsm ENI '):
rtype = RESOURCE_KEY['hsmv2']
rid = desc.rsplit(' ', 1)[1]
elif desc == 'DMSNetworkInterface':
rtype = RESOURCE_KEY['dms']
rid = ''
elif desc.startswith('DAX '):
rtype = RESOURCE_KEY['dax']
rid = desc.rsplit(' ', 1)[1]
elif desc.startswith('arn:aws:ecs:'):
# a running task with attached net
# 'arn:aws:ecs:us-east-1:0111111111110:attachment/37a927f2-a8d1-46d7-8f96-d6aef13cc5b0'
# also has public ip.
rtype = RESOURCE_KEY['ecs']
rid = desc.rsplit('/', 1)[1]
elif desc.startswith('VPC Endpoint Interface'):
# instanceOwnerId: amazon-aws
# interfaceType: 'vpc_endpoint'
rtype = RESOURCE_KEY['vpce']
rid = desc.rsplit(' ', 1)[1]
elif eni_cfg['attachment']['instanceOwnerId'] == 'aws-lambda':
rtype = RESOURCE_KEY['lambda']
rid = eni_cfg['requesterId'].split(':', 1)[1]
else:
rtype = RESOURCE_KEY['unknown']
rid = json.dumps(eni_cfg)
return rtype, rid
def resource_config_iter(files, batch_size=10000):
for f in files:
with gzip.open(f) as fh:
data = json.load(fh)
for config_set in chunks(data['configurationItems'], batch_size):
yield config_set
def record_stream_filter(record_stream, record_filter, batch_size=5000):
batch = []
for record_set in record_stream:
for r in record_set:
if record_filter(r):
batch.append(r)
if len(batch) % batch_size == 0:
yield batch
batch = []
if batch:
yield batch
EBS_SCHEMA = """
create table if not exists ebs (
volume_id text primary key,
instance_id text,
account_id text,
region text,
app text,
env text,
contact text,
start text,
end text
)
"""
def index_ebs_files(db, record_stream):
stats = Counter()
t = time.time()
with sqlite3.connect(db) as conn:
cursor = conn.cursor()
cursor.execute(EBS_SCHEMA)
rows = []
deletes = {}
skipped = 0
for record_set in record_stream:
for cfg in record_set:
stats['Records'] += 1
stats['Record%s' % cfg['configurationItemStatus']] += 1
if cfg['configurationItemStatus'] in ('ResourceDeleted',):
deletes[cfg['resourceId']] = cfg['configurationItemCaptureTime']
continue
if not cfg['configuration'].get('attachments'):
skipped += 1
continue
rows.append((
cfg['resourceId'],
cfg['configuration']['attachments'][0]['instanceId'],
cfg['awsAccountId'],
cfg['awsRegion'],
cfg['tags'].get(APP_TAG),
cfg['tags'].get(ENV_TAG),
cfg['tags'].get(CONTACT_TAG),
cfg['resourceCreationTime'],
None))
if rows:
for idx, r in enumerate(rows):
if r[0] in deletes:
rows[idx] = list(r)
rows[idx][-1] = deletes[r[0]]
cursor.executemany(
'''insert or replace into ebs values (?, ?, ?, ?, ?, ?, ?, ?, ?)''', rows)
stats['RowCount'] += len(rows)
log.debug("ebs stored:%d", len(rows))
stats['RowCount'] += len(rows)
stats['IndexTime'] = int(time.time() - t)
return stats
EC2_SCHEMA = """
create table if not exists ec2 (
instance_id text primary key,
account_id text,
region text,
ip_address text,
app text,
env text,
contact text,
asg text,
start datetime,
end datetime
"""
def index_ec2_files(db, record_stream):
stats = Counter()
t = time.time()
with sqlite3.connect(db) as conn:
cursor = conn.cursor()
cursor.execute(EC2_SCHEMA)
rows = []
deletes = []
for record_set in record_stream:
for cfg in record_set:
stats['Records'] += 1
stats['Record%s' % cfg['configurationItemStatus']] += 1
if cfg['configurationItemStatus'] in ('ResourceDeleted',):
deletes.append(((
cfg['configurationItemCaptureTime'], cfg['resourceId'])))
continue
if not cfg.get('tags'):
continue
rows.append((
cfg['resourceId'],
cfg['awsAccountId'],
cfg['awsRegion'],
cfg['configuration'].get('privateIpAddress', ''),
cfg['tags'].get(APP_TAG),
cfg['tags'].get(ENV_TAG),
cfg['tags'].get(CONTACT_TAG),
cfg['tags'].get('aws:autoscaling:groupName', ''),
cfg['resourceCreationTime'],
None))
if len(rows) % 1000 == 0:
stats['RowCount'] += len(rows)
cursor.executemany(
'''insert or replace into ec2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
rows)
rows = []
if deletes:
log.info("Delete count %d", len(deletes))
stmt = 'update ec2 set end = ? where instance_id = ?'
for p in deletes:
cursor.execute(stmt, p)
if rows:
cursor.executemany(
'''insert or replace into ec2 values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', rows)
log.debug("ec2s stored:%d", len(rows))
stats['RowCount'] += len(rows)
stats['IndexTime'] = int(time.time() - t)
return stats
S3_SCHEMA = """
create table if not exists buckets (
name text,
account_id text,
region text,
app text,
env text,
contact text,
start datetime,
end datetime,
resource text
)"""
def index_s3_files(db, record_stream):
stats = Counter()
t = time.time()
with sqlite3.connect(db) as conn:
cursor = conn.cursor()
cursor.execute(S3_SCHEMA)
deletes = {}
rows = []
for record_set in record_stream:
for cfg in record_set:
stats['Records'] += 1
stats['Record%s' % cfg['configurationItemStatus']] += 1
if cfg['configurationItemStatus'] == 'ResourceNotRecorded':
continue
if cfg['configurationItemStatus'] in ('ResourceDeleted'):
deletes[cfg['resourceId']] = cfg['configurationItemCaptureTime']
rows.append((
cfg['resourceId'], None, None, None, None, None, None,
cfg['configurationItemCaptureTime'], None))
continue
rows.append((
cfg['resourceId'],
cfg['awsAccountId'],
cfg['awsRegion'],
cfg['tags'].get(APP_TAG),
cfg['tags'].get(ENV_TAG),
cfg['tags'].get(CONTACT_TAG),
cfg['resourceCreationTime'],
None,
json.dumps(cfg)))
if len(rows) % 10000:
cursor.executemany(
'''insert or replace into buckets values (?, ?, ?, ?, ?, ?, ?, ?, ?)''', rows)
stats['RowCount'] += len(rows)
if rows:
cursor.executemany(
'''insert or replace into buckets values (?, ?, ?, ?, ?, ?, ?, ?, ?)''', rows)
stats['RowCount'] += len(rows)
stats['IndexTime'] = int(time.time() - t)
return stats
ELB_SCHEMA = """
create table if not exists elbs (
name text primary key,
account_id text,
region text,
app text,
env text,
contact text,
start datetime,
end datetime
)"""
def index_elb_files(db, record_stream):
stats = Counter()
t = time.time()
with sqlite3.connect(db) as conn:
cursor = conn.cursor()
cursor.execute(ELB_SCHEMA)
rows = []
deletes = {}
for record_set in record_stream:
for cfg in record_set:
stats['Records'] += 1
stats['Record%s' % cfg['configurationItemStatus']] += 1
if cfg['configurationItemStatus'] in ('ResourceDeleted',):
deletes[cfg['resourceId']] = cfg['configurationItemCaptureTime']
continue
rows.append((
cfg['resourceName'],
cfg['awsAccountId'],
cfg['awsRegion'],
cfg['tags'].get(APP_TAG),
cfg['tags'].get(ENV_TAG),
cfg['tags'].get(CONTACT_TAG),
cfg['resourceCreationTime'],
None))
if rows:
for idx, r in enumerate(rows):
if r[0] in deletes:
rows[idx] = list(r)
rows[idx][-1] = deletes[r[0]]
cursor.executemany(
'''insert or replace into elbs values (?, ?, ?, ?, ?, ?, ?, ?)''', rows)
stats['RowCount'] += len(rows)
log.debug("elbs stored:%d", len(rows))
stats['RowCount'] += len(rows)
stats['IndexTime'] = int(time.time() - t)
return stats
ENI_SCHEMA = """
create table if not exists enis (
eni_id text primary key,
ip_address text,
account_id text,
resource_id text,
resource_type integer,
subnet_id text,
region integer,
start datetime,
end datetime
)"""
def index_eni_files(db, record_stream):
stats = Counter()
t = time.time()
with sqlite3.connect(db) as conn:
cursor = conn.cursor()
cursor.execute(ENI_SCHEMA)
cursor.execute('create index if not exists eni_idx on enis(ip_address)')
rows = []
skipped = 0
deletes = {}
rids = set()
for record_set in record_stream:
for cfg in record_set:
stats['Records'] += 1
stats['Record%s' % cfg['configurationItemStatus']] += 1
if cfg['configurationItemStatus'] not in (
'ResourceDeleted', 'ResourceDiscovered', 'OK'):
raise ValueError(cfg)
if cfg['configurationItemStatus'] in ('ResourceDeleted',):
deletes[cfg['resourceId']] = cfg['configurationItemCaptureTime']
continue
eni = cfg['configuration']
if 'attachment' not in eni or cfg['resourceId'] in rids:
skipped += 1
continue
rids.add(cfg['resourceId'])
rtype, rid = resource_info(eni)
rows.append((
eni['networkInterfaceId'],
eni['privateIpAddress'],
cfg['awsAccountId'],
rid,
rtype,
eni['subnetId'],
REGION_KEY[cfg['awsRegion']],
eni['attachment'].get('attachTime') or cfg['configurationItemCaptureTime'],
None))
log.debug(
"Records:%d Insert:%d Deletes:%d Skipped:%d Discovered:%d Deleted:%d Ok:%d",
stats['Records'], len(rows), len(deletes), skipped,
stats['RecordResourceDiscovered'], stats['RecordResourceDeleted'],
stats['RecordOK'])
if rows:
for idx, r in enumerate(rows):
if r[0] in deletes:
rows[idx] = list(r)
rows[idx][-1] = deletes[r[0]]
del deletes[r[0]]
try:
cursor.executemany(
'''insert into enis values (?, ?, ?, ?, ?, ?, ?, ?, ?)''', rows)
except Exception:
log.error("Error inserting enis account:%s rows:%d",
cfg['awsAccountId'], len(rows))
stats['RowCount'] += len(rows)
# result = cursor.execute('select count(distinct ip_address) from enis').fetchone()
stats['SkipCount'] = skipped
stats['IndexTime'] = int(time.time() - t)
return stats
def chunks(iterable, size=50):
"""Break an iterable into lists of size"""
batch = []
for n in iterable:
batch.append(n)
if len(batch) % size == 0:
yield batch
batch = []
if batch:
yield batch
RESOURCE_MAPPING = {
'Instance': 'AWS::EC2::Instance',
'LoadBalancer': 'AWS::ElasticLoadBalancing',
'NetworkInterface': 'AWS::EC2::NetworkInterface',
'Volume': 'AWS::EC2::Volume',
'Bucket': 'AWS::S3::Bucket'
}
RESOURCE_FILE_INDEXERS = {
'Instance': index_ec2_files,
'NetworkInterface': index_eni_files,
'LoadBalancer': index_elb_files,
'Volume': index_ebs_files,
'Bucket': index_s3_files
}
@click.group()
def cli():
"""AWS Network Resource Database"""
@cli.command('worker')
@click.option('--queue')
@click.option('--s3-key')
@click.option('--period', default=60, type=click.INT)
@click.option('--verbose', default=False, is_flag=True)
def worker_config(queue, s3_key, period, verbose):
"""daemon queue worker for config notifications"""
logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('s3transfer').setLevel(logging.WARNING)
queue, region = get_queue(queue)
factory = SessionFactory(region)
session = factory()
client = session.client('sqs')
messages = MessageIterator(client, queue, timeout=20)
for m in messages:
msg = unwrap(m)
if 'configurationItemSummary' in msg:
rtype = msg['configurationItemSummary']['resourceType']
else:
rtype = msg['configurationItem']['resourceType']
if rtype not in RESOURCE_MAPPING.values():
log.info("skipping %s" % rtype)
messages.ack(m)
log.info("message received %s", m)
def get_queue(queue):
if queue.startswith('https://queue.amazonaws.com'):
region = 'us-east-1'
queue_url = queue
elif queue.startswith('https://sqs.'):
region = queue.split('.', 2)[1]
queue_url = queue
elif queue.startswith('arn:sqs'):
queue_arn_split = queue.split(':', 5)
region = queue_arn_split[3]
owner_id = queue_arn_split[4]
queue_name = queue_arn_split[5]
queue_url = "https://sqs.%s.amazonaws.com/%s/%s" % (
region, owner_id, queue_name)
return queue_url, region
@cli.command('list-app-resources')
@click.option('--app')
@click.option('--env')
@click.option('--cmdb')
@click.option('--start')
@click.option('--end')
@click.option('--tz')
@click.option(
'-r', '--resources', multiple=True,
type=click.Choice(['Instance', 'LoadBalancer', 'Volume']))
def list_app_resources(
app, env, resources, cmdb, start, end, tz):
"""Analyze flow log records for application and generate metrics per period"""
logging.basicConfig(level=logging.INFO)
start, end = get_dates(start, end, tz)
all_resources = []
for rtype_name in resources:
rtype = Resource.get_type(rtype_name)
resources = rtype.get_resources(cmdb, start, end, app, env)
all_resources.extend(resources)
print(json.dumps(all_resources, indent=2))
@cli.command('load-resources')
@click.option('--bucket', required=True, help="Config Bucket")
@click.option('--prefix', required=True, help="Config Bucket Prefix")
@click.option('--region', required=True, help="Load Config for Region")
@click.option('--account-config', type=click.File('rb'), required=True)
@click.option('-a', '--accounts', multiple=True)
@click.option('--assume', help="Assume role")
@click.option('--start')
@click.option('--end')
@click.option('-r', '--resources', multiple=True,
type=click.Choice(list(RESOURCE_FILE_INDEXERS.keys())))
@click.option('--store', type=click.Path())
@click.option('-f', '--db')
@click.option('-v', '--verbose', is_flag=True)
@click.option('--debug', is_flag=True)
def load_resources(bucket, prefix, region, account_config, accounts,
assume, start, end, resources, store, db, verbose, debug):
"""load resources into resource database."""
logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('s3transfer').setLevel(logging.WARNING)
start = date_parse(start)
end = date_parse(end)
if not resources:
resources = ['NetworkInterface', 'Instance', 'LoadBalancer']
account_map = {}
data = yaml.safe_load(account_config.read())
for a in data.get('accounts', ()):
if accounts and (a['name'] in accounts or a['account_id'] in accounts):
account_map[a['account_id']] = a
elif not accounts:
account_map[a['account_id']] = a
account_ids = list(account_map)
executor = ProcessPoolExecutor
if debug:
from c7n.executor import MainThreadExecutor
MainThreadExecutor.async = False
executor = MainThreadExecutor
stats = Counter()
t = time.time()
with executor(max_workers=multiprocessing.cpu_count()) as w:
futures = {}
for a in account_ids:
for r in resources:
futures[w.submit(
process_account_resources, a, bucket, prefix,
region, store, start, end, r)] = (a, r)
indexer = RESOURCE_FILE_INDEXERS[r]
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
log.error("account:%s error:%s", a, f.exception())
continue
files, dl_stats = f.result()
idx_stats = indexer(db, resource_config_iter(files))
log.info(
"loaded account:%s files:%d bytes:%s events:%d resources:%d idx-time:%d dl-time:%d",
account_map[a]['name'], len(files),
human_size(dl_stats['DownloadSize'] + dl_stats['CacheSize']),
idx_stats['Records'],
idx_stats['RowCount'],
idx_stats['IndexTime'],
dl_stats['FetchTime'])
stats.update(dl_stats)
stats.update(idx_stats)
log.info("Loaded %d resources across %d accounts in %0.2f",
stats['RowCount'], len(account_ids), time.time() - t)
if __name__ == '__main__':
try:
cli()
except Exception:
import pdb, traceback, sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
| apache-2.0 | -6,660,086,870,212,036,000 | 33.309423 | 100 | 0.540051 | false |
szlin/gitsome | tests/data/thread.py | 1 | 1319 | # -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
formatted_threads = u'\x1b[35m 1. \x1b[0mtitle1 \x1b[0m\x1b[35m(foo/bar/issues/1)\x1b[0m\n\x1b[32m Seen: False \x1b[0m\x1b[36mType: type1 \x1b[0m\x1b[33mUpdated: just now \x1b[0m\n\x1b[35m 2. \x1b[0mtitle2 \x1b[0m\x1b[35m(foo/bar/issues/1)\x1b[0m\n\x1b[32m Seen: True \x1b[0m\x1b[36mType: type2 \x1b[0m\x1b[33mUpdated: just now \x1b[0m\n\x1b[35m 3. \x1b[0mtitle3 \x1b[0m\x1b[35m(foo/bar/issues/1)\x1b[0m\n\x1b[32m Seen: False \x1b[0m\x1b[36mType: type3 \x1b[0m\x1b[33mUpdated: just now \x1b[0m\n View the page for \x1b[0m\x1b[35m1 through \x1b[0m\x1b[35m3\x1b[0m with the following command:\n\x1b[0m\x1b[35m gh view [#] \x1b[0moptional: [-b/--browser] [--help]\n\x1b[0m\x1b[0m'
| gpl-3.0 | -6,820,701,067,351,513,000 | 81.4375 | 739 | 0.692191 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/codehosting/tests/test_safe_open.py | 1 | 16078 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for the safe branch open code."""
__metaclass__ = type
from bzrlib.branch import (
Branch,
BranchReferenceFormat,
BzrBranchFormat7,
)
from bzrlib.bzrdir import (
BzrDir,
BzrDirMetaFormat1,
BzrProber,
)
from bzrlib.controldir import ControlDirFormat
from bzrlib.errors import NotBranchError
from bzrlib.repofmt.knitpack_repo import RepositoryFormatKnitPack1
from bzrlib.tests import TestCaseWithTransport
from bzrlib.transport import chroot
from lazr.uri import URI
from lp.codehosting.safe_open import (
BadUrl,
BlacklistPolicy,
BranchLoopError,
BranchReferenceForbidden,
safe_open,
SafeBranchOpener,
WhitelistPolicy,
)
from lp.codehosting.tests.helpers import force_stacked_on_url
from lp.testing import TestCase
class TestSafeBranchOpenerCheckAndFollowBranchReference(TestCase):
"""Unit tests for `SafeBranchOpener.checkAndFollowBranchReference`."""
def setUp(self):
super(TestSafeBranchOpenerCheckAndFollowBranchReference, self).setUp()
SafeBranchOpener.install_hook()
class StubbedSafeBranchOpener(SafeBranchOpener):
"""SafeBranchOpener that provides canned answers.
We implement the methods we need to to be able to control all the
inputs to the `BranchMirrorer.checkSource` method, which is what is
being tested in this class.
"""
def __init__(self, references, policy):
parent_cls = TestSafeBranchOpenerCheckAndFollowBranchReference
super(parent_cls.StubbedSafeBranchOpener, self).__init__(policy)
self._reference_values = {}
for i in range(len(references) - 1):
self._reference_values[references[i]] = references[i + 1]
self.follow_reference_calls = []
def followReference(self, url):
self.follow_reference_calls.append(url)
return self._reference_values[url]
def makeBranchOpener(self, should_follow_references, references,
unsafe_urls=None):
policy = BlacklistPolicy(should_follow_references, unsafe_urls)
opener = self.StubbedSafeBranchOpener(references, policy)
return opener
def testCheckInitialURL(self):
# checkSource rejects all URLs that are not allowed.
opener = self.makeBranchOpener(None, [], set(['a']))
self.assertRaises(
BadUrl, opener.checkAndFollowBranchReference, 'a')
def testNotReference(self):
# When branch references are forbidden, checkAndFollowBranchReference
# does not raise on non-references.
opener = self.makeBranchOpener(False, ['a', None])
self.assertEquals(
'a', opener.checkAndFollowBranchReference('a'))
self.assertEquals(['a'], opener.follow_reference_calls)
def testBranchReferenceForbidden(self):
# checkAndFollowBranchReference raises BranchReferenceForbidden if
# branch references are forbidden and the source URL points to a
# branch reference.
opener = self.makeBranchOpener(False, ['a', 'b'])
self.assertRaises(
BranchReferenceForbidden,
opener.checkAndFollowBranchReference, 'a')
self.assertEquals(['a'], opener.follow_reference_calls)
def testAllowedReference(self):
# checkAndFollowBranchReference does not raise if following references
# is allowed and the source URL points to a branch reference to a
# permitted location.
opener = self.makeBranchOpener(True, ['a', 'b', None])
self.assertEquals(
'b', opener.checkAndFollowBranchReference('a'))
self.assertEquals(['a', 'b'], opener.follow_reference_calls)
def testCheckReferencedURLs(self):
# checkAndFollowBranchReference checks if the URL a reference points
# to is safe.
opener = self.makeBranchOpener(
True, ['a', 'b', None], unsafe_urls=set('b'))
self.assertRaises(
BadUrl, opener.checkAndFollowBranchReference, 'a')
self.assertEquals(['a'], opener.follow_reference_calls)
def testSelfReferencingBranch(self):
# checkAndFollowBranchReference raises BranchReferenceLoopError if
# following references is allowed and the source url points to a
# self-referencing branch reference.
opener = self.makeBranchOpener(True, ['a', 'a'])
self.assertRaises(
BranchLoopError, opener.checkAndFollowBranchReference, 'a')
self.assertEquals(['a'], opener.follow_reference_calls)
def testBranchReferenceLoop(self):
# checkAndFollowBranchReference raises BranchReferenceLoopError if
# following references is allowed and the source url points to a loop
# of branch references.
references = ['a', 'b', 'a']
opener = self.makeBranchOpener(True, references)
self.assertRaises(
BranchLoopError, opener.checkAndFollowBranchReference, 'a')
self.assertEquals(['a', 'b'], opener.follow_reference_calls)
class TrackingProber(BzrProber):
"""Subclass of BzrProber which tracks URLs it has been asked to open."""
seen_urls = []
@classmethod
def probe_transport(klass, transport):
klass.seen_urls.append(transport.base)
return BzrProber.probe_transport(transport)
class TestSafeBranchOpenerStacking(TestCaseWithTransport):
def setUp(self):
super(TestSafeBranchOpenerStacking, self).setUp()
SafeBranchOpener.install_hook()
def makeBranchOpener(self, allowed_urls, probers=None):
policy = WhitelistPolicy(True, allowed_urls, True)
return SafeBranchOpener(policy, probers)
def makeBranch(self, path, branch_format, repository_format):
"""Make a Bazaar branch at 'path' with the given formats."""
bzrdir_format = BzrDirMetaFormat1()
bzrdir_format.set_branch_format(branch_format)
bzrdir = self.make_bzrdir(path, format=bzrdir_format)
repository_format.initialize(bzrdir)
return bzrdir.create_branch()
def testProbers(self):
# Only the specified probers should be used
b = self.make_branch('branch')
opener = self.makeBranchOpener([b.base], probers=[])
self.assertRaises(NotBranchError, opener.open, b.base)
opener = self.makeBranchOpener([b.base], probers=[BzrProber])
self.assertEquals(b.base, opener.open(b.base).base)
def testDefaultProbers(self):
# If no probers are specified to the constructor
# of SafeBranchOpener, then a safe set will be used,
# rather than all probers registered in bzr.
self.addCleanup(ControlDirFormat.unregister_prober, TrackingProber)
ControlDirFormat.register_prober(TrackingProber)
# Open a location without any branches, so that all probers are
# tried.
# First, check that the TrackingProber tracks correctly.
TrackingProber.seen_urls = []
opener = self.makeBranchOpener(["."], probers=[TrackingProber])
self.assertRaises(NotBranchError, opener.open, ".")
self.assertEquals(1, len(TrackingProber.seen_urls))
TrackingProber.seen_urls = []
# And make sure it's registered in such a way that BzrDir.open would
# use it.
self.assertRaises(NotBranchError, BzrDir.open, ".")
self.assertEquals(1, len(TrackingProber.seen_urls))
TrackingProber.seen_urls = []
# Make sure that SafeBranchOpener doesn't use it if no
# probers were specified
opener = self.makeBranchOpener(["."])
self.assertRaises(NotBranchError, opener.open, ".")
self.assertEquals(0, len(TrackingProber.seen_urls))
def testAllowedURL(self):
# checkSource does not raise an exception for branches stacked on
# branches with allowed URLs.
stacked_on_branch = self.make_branch('base-branch')
stacked_branch = self.make_branch('stacked-branch')
stacked_branch.set_stacked_on_url(stacked_on_branch.base)
opener = self.makeBranchOpener(
[stacked_branch.base, stacked_on_branch.base])
# This doesn't raise an exception.
opener.open(stacked_branch.base)
def testUnstackableRepository(self):
# checkSource treats branches with UnstackableRepositoryFormats as
# being not stacked.
branch = self.makeBranch(
'unstacked', BzrBranchFormat7(), RepositoryFormatKnitPack1())
opener = self.makeBranchOpener([branch.base])
# This doesn't raise an exception.
opener.open(branch.base)
def testAllowedRelativeURL(self):
# checkSource passes on absolute urls to checkOneURL, even if the
# value of stacked_on_location in the config is set to a relative URL.
stacked_on_branch = self.make_branch('base-branch')
stacked_branch = self.make_branch('stacked-branch')
stacked_branch.set_stacked_on_url('../base-branch')
opener = self.makeBranchOpener(
[stacked_branch.base, stacked_on_branch.base])
# Note that stacked_on_branch.base is not '../base-branch', it's an
# absolute URL.
self.assertNotEqual('../base-branch', stacked_on_branch.base)
# This doesn't raise an exception.
opener.open(stacked_branch.base)
def testAllowedRelativeNested(self):
# Relative URLs are resolved relative to the stacked branch.
self.get_transport().mkdir('subdir')
a = self.make_branch('subdir/a')
b = self.make_branch('b')
b.set_stacked_on_url('../subdir/a')
c = self.make_branch('subdir/c')
c.set_stacked_on_url('../../b')
opener = self.makeBranchOpener([c.base, b.base, a.base])
# This doesn't raise an exception.
opener.open(c.base)
def testForbiddenURL(self):
# checkSource raises a BadUrl exception if a branch is stacked on a
# branch with a forbidden URL.
stacked_on_branch = self.make_branch('base-branch')
stacked_branch = self.make_branch('stacked-branch')
stacked_branch.set_stacked_on_url(stacked_on_branch.base)
opener = self.makeBranchOpener([stacked_branch.base])
self.assertRaises(BadUrl, opener.open, stacked_branch.base)
def testForbiddenURLNested(self):
# checkSource raises a BadUrl exception if a branch is stacked on a
# branch that is in turn stacked on a branch with a forbidden URL.
a = self.make_branch('a')
b = self.make_branch('b')
b.set_stacked_on_url(a.base)
c = self.make_branch('c')
c.set_stacked_on_url(b.base)
opener = self.makeBranchOpener([c.base, b.base])
self.assertRaises(BadUrl, opener.open, c.base)
def testSelfStackedBranch(self):
# checkSource raises StackingLoopError if a branch is stacked on
# itself. This avoids infinite recursion errors.
a = self.make_branch('a')
force_stacked_on_url(a, a.base)
opener = self.makeBranchOpener([a.base])
self.assertRaises(BranchLoopError, opener.open, a.base)
def testLoopStackedBranch(self):
# checkSource raises StackingLoopError if a branch is stacked in such
# a way so that it is ultimately stacked on itself. e.g. a stacked on
# b stacked on a.
a = self.make_branch('a')
b = self.make_branch('b')
a.set_stacked_on_url(b.base)
b.set_stacked_on_url(a.base)
opener = self.makeBranchOpener([a.base, b.base])
self.assertRaises(BranchLoopError, opener.open, a.base)
self.assertRaises(BranchLoopError, opener.open, b.base)
def testCustomOpener(self):
# A custom function for opening a control dir can be specified.
a = self.make_branch('a')
b = self.make_branch('b')
b.set_stacked_on_url(a.base)
TrackingProber.seen_urls = []
opener = self.makeBranchOpener(
[a.base, b.base], probers=[TrackingProber])
opener.open(b.base)
self.assertEquals(
set(TrackingProber.seen_urls), set([b.base, a.base]))
def testCustomOpenerWithBranchReference(self):
# A custom function for opening a control dir can be specified.
a = self.make_branch('a')
b_dir = self.make_bzrdir('b')
b = BranchReferenceFormat().initialize(b_dir, target_branch=a)
TrackingProber.seen_urls = []
opener = self.makeBranchOpener(
[a.base, b.base], probers=[TrackingProber])
opener.open(b.base)
self.assertEquals(
set(TrackingProber.seen_urls), set([b.base, a.base]))
def test_ignore_fallbacks(self):
""""Cross-format stacking doesn't error with ignore_fallbacks."""
stacked, stacked_on = make_cross_format_stacked(self)
opener = self.makeBranchOpener([stacked.base, stacked_on.base])
opener.open(stacked.base, ignore_fallbacks=True)
def make_cross_format_stacked(test_case):
test_case.get_transport().mkdir('inside')
stacked = test_case.make_branch('inside/stacked', format='1.6')
stacked_on = test_case.make_branch('inside/stacked-on', format='2a')
force_stacked_on_url(stacked, stacked_on.base)
return stacked, stacked_on
class TestSafeOpen(TestCaseWithTransport):
"""Tests for `safe_open`."""
def setUp(self):
super(TestSafeOpen, self).setUp()
SafeBranchOpener.install_hook()
def test_hook_does_not_interfere(self):
# The transform_fallback_location hook does not interfere with regular
# stacked branch access outside of safe_open.
self.make_branch('stacked')
self.make_branch('stacked-on')
Branch.open('stacked').set_stacked_on_url('../stacked-on')
Branch.open('stacked')
def get_chrooted_scheme(self, relpath):
"""Create a server that is chrooted to `relpath`.
:return: ``(scheme, get_url)`` where ``scheme`` is the scheme of the
chroot server and ``get_url`` returns URLs on said server.
"""
transport = self.get_transport(relpath)
chroot_server = chroot.ChrootServer(transport)
chroot_server.start_server()
self.addCleanup(chroot_server.stop_server)
def get_url(relpath):
return chroot_server.get_url() + relpath
return URI(chroot_server.get_url()).scheme, get_url
def test_stacked_within_scheme(self):
# A branch that is stacked on a URL of the same scheme is safe to
# open.
self.get_transport().mkdir('inside')
self.make_branch('inside/stacked')
self.make_branch('inside/stacked-on')
scheme, get_chrooted_url = self.get_chrooted_scheme('inside')
Branch.open(get_chrooted_url('stacked')).set_stacked_on_url(
get_chrooted_url('stacked-on'))
safe_open(scheme, get_chrooted_url('stacked'))
def test_stacked_outside_scheme(self):
# A branch that is stacked on a URL that is not of the same scheme is
# not safe to open.
self.get_transport().mkdir('inside')
self.get_transport().mkdir('outside')
self.make_branch('inside/stacked')
self.make_branch('outside/stacked-on')
scheme, get_chrooted_url = self.get_chrooted_scheme('inside')
Branch.open(get_chrooted_url('stacked')).set_stacked_on_url(
self.get_url('outside/stacked-on'))
self.assertRaises(
BadUrl, safe_open, scheme, get_chrooted_url('stacked'))
def test_ignore_fallbacks(self):
""""Cross-format stacking doesn't error with ignore_fallbacks."""
scheme, get_chrooted_url = self.get_chrooted_scheme('inside')
stacked, stacked_on = make_cross_format_stacked(self)
force_stacked_on_url(stacked, get_chrooted_url('stacked-on'))
safe_open(scheme, get_chrooted_url('stacked'), ignore_fallbacks=True)
| agpl-3.0 | -2,065,448,850,034,553,000 | 41.089005 | 78 | 0.662085 | false |
jjdmol/LOFAR | LTA/LTAIngest/dav/webdav/acp/Acl.py | 1 | 10978 | # pylint: disable-msg=W0622
#
# Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ACL object handling according to WebDAV ACP specification.
"""
from webdav.acp.Ace import ACE
from webdav import Constants
from webdav.Connection import WebdavError
from davlib import XML_DOC_HEADER
__version__ = "$LastChangedRevision$"
class ACL(object):
"""
This class provides access to Access Control List funcionality
as specified in the WebDAV ACP.
@ivar aces: ACEs in ACL
@type aces: C{list} of L{ACE} objects
@ivar withInherited: Flag indicating whether ACL contains inherited ACEs.
@type withInherited: C{bool}
"""
# restrict instance variables
__slots__ = ('aces', 'withInherited')
def __init__(self, domroot=None, aces=None):
"""
Constructor should be called with either no parameters (create blank ACE),
or one parameter (a DOM tree or ACE list).
@param domroot: A DOM tree (default: None).
@type domroot: L{webdav.WebdavResponse.Element} object
@param aces: ACE objects (default: None)
@type aces: C{list} of L{ACE} objects
@raise WebdavError: When non-valid parameters are passed a L{WebdavError} is raised.
"""
self.withInherited = None
self.aces = []
if domroot:
for child in domroot.children:
if child.name == Constants.TAG_ACE and child.ns == Constants.NS_DAV:
self.addAce(ACE(child))
else:
# This shouldn't happen, someone screwed up with the params ...
raise WebdavError('Non-ACE tag handed to ACL constructor: ' + child.ns + child.name)
elif isinstance(aces, list) or isinstance(aces, tuple):
self.addAces(aces)
elif domroot == None and aces == None:
# no param ==> blank object
pass
else:
# This shouldn't happen, someone screwed up with the params ...
raise WebdavError('non-valid parameters handed to ACL constructor')
def __cmp__(self, other):
if not isinstance(other, ACL):
return 1
if self.withInherited == other.withInherited:
equal = 1
for ace in self.aces:
inList = 0
for otherAce in other.aces:
if ace == otherAce:
inList = 1
if inList == 0:
equal = 0
return not equal
else:
return 1
def __repr__(self):
repr = '<class ACL: '
if self.withInherited:
repr += 'with inherited, '
first = 1
repr += 'aces: ['
for ace in self.aces:
if first:
repr += '%s' % ace
first = 0
else:
repr += ', %s' % ace
return '%s]>' % (repr)
def copy(self, other):
'''Copy an ACL object.
@param other: Another ACL to copy.
@type other: L{ACL} object
@raise WebdavError: When an object that is not an L{ACL} is passed
a L{WebdavError} is raised.
'''
if not isinstance(other, ACL):
raise WebdavError('Non-ACL object passed to copy method: %s' % other.__class__)
self.withInherited = other.withInherited
if other.aces:
self.addAces(other.aces)
def toXML(self):
"""
Returns ACL content as a string of valid XML as described in WebDAV ACP.
"""
aclTag = 'D:' + Constants.TAG_ACL
return XML_DOC_HEADER +\
'<' + aclTag + ' xmlns:D="DAV:">' + reduce(lambda xml, ace: xml + ace.toXML() + '\n', [''] + self.aces) +\
'</' + aclTag + '>'
def addAce(self, ace):
'''
Adds the passed ACE object to list if it's not in it, yet.
@param ace: An ACE.
@type ace: L{ACE} object
'''
newAce = ACE()
newAce.copy(ace)
# only add it if it's not in the list, yet ...
inList = 0
for element in self.aces:
if element == ace:
inList = 1
if not inList:
self.aces.append(newAce)
def addAces(self, aces):
'''Adds the list of passed ACE objects to list.
@param aces: ACEs
@type aces: sequence of L{ACE} objects
'''
for ace in aces:
self.addAce(ace)
def delAce(self, ace):
'''Deletes the passed ACE object from list.
@param ace: An ACE.
@type ace: L{ACE} object
@raise WebdavError: When the ACE to be deleted is not within the ACL
a L{WebdavError} is raised.
'''
# find where it is and delete it ...
count = 0
index = 0
for element in self.aces:
count += 1
if element == ace:
index = count
if index:
self.aces.pop(index - 1)
else:
raise WebdavError('ACE to be deleted not in list: %s.' % ace)
def delAces(self, aces):
'''Deletes the list of passed ACE objects from list.
@param aces: ACEs
@type aces: sequence of L{ACE} objects
'''
for ace in aces:
self.delAce(ace)
def delPrincipalsAces(self, principal):
"""
Deletes all ACEs in ACL by given principal.
@param principal: A principal.
@type principal: L{Principal} object
"""
# find where it is and delete it ...
index = 0
while index < len(self.aces):
if self.aces[index].principal.principalURL == principal.principalURL:
self.aces.pop(index)
else:
index += 1
def joinGrantDeny(self):
"""
Returns a "refined" ACL of the ACL for ease of use in the UI.
The purpose is to post the user an ACE that can contain both, granted
and denied, privileges. So possible pairs of grant and deny ACEs are joined
to return them in one ACE. This resulting ACE then of course IS NOT valid
for setting ACLs anymore. They will have to be reconverted to yield valid
ACLs for the ACL method.
@return: A (non-valid) ACL that contains both grant and deny clauses in an ACE.
@rtype: L{ACL} object
"""
joinedAces = {}
for ace in self.aces:
if not ace.principal.principalURL is None:
principalKey = ace.principal.principalURL
elif not ace.principal.property is None:
principalKey = ace.principal.property
else:
principalKey = None
if ace.inherited:
principalKey = ace.inherited + ":" + principalKey
if principalKey in joinedAces:
joinedAces[principalKey].addGrantDenies(ace.grantDenies)
else:
joinedAces[principalKey] = ACE()
joinedAces[principalKey].copy(ace)
newAcl = ACL()
newAcl.addAces(joinedAces.values())
return newAcl
def splitGrantDeny(self):
"""
Returns a "refined" ACL of the ACL for ease of use in the UI.
The purpose is to post the user an ACE that can contain both, granted
and denied, privileges. So possible joined grant and deny clauses in ACEs
splitted to return them in separate ACEs. This resulting ACE then is valid
for setting ACLs again. This method is to be seen in conjunction with the
method joinGrantDeny as it reverts its effect.
@return: A valid ACL that contains only ACEs with either grant or deny clauses.
@rtype: L{ACL} object
"""
acesGrant = {}
acesDeny = {}
for ace in self.aces:
for grantDeny in ace.grantDenies:
if grantDeny.isGrant():
if ace.principal.principalURL in acesGrant:
ace.addGrantDeny(grantDeny)
else:
acesGrant[ace.principal.principalURL] = ACE()
acesGrant[ace.principal.principalURL].copy(ace)
acesGrant[ace.principal.principalURL].grantDenies = []
acesGrant[ace.principal.principalURL].addGrantDeny(grantDeny)
else:
if ace.principal.principalURL in acesDeny:
ace.addGrantDeny(grantDeny)
else:
acesDeny[ace.principal.principalURL] = ACE()
acesDeny[ace.principal.principalURL].copy(ace)
acesDeny[ace.principal.principalURL].grantDenies = []
acesDeny[ace.principal.principalURL].addGrantDeny(grantDeny)
newAcl = ACL()
newAcl.addAces(acesGrant.values())
newAcl.addAces(acesDeny.values())
return newAcl
def isValid(self):
"""
Returns true (1) if all contained ACE objects are valid,
otherwise false (0) is returned.
@return: Validity of ACL.
@rtype: C{bool}
"""
valid = 1
if len(self.aces):
for ace in self.aces:
if not ace.isValid():
valid = 0
return valid
def stripAces(self, inherited=True, protected=True):
"""
Returns an ACL object with all ACEs stripped that are inherited
and/or protected.
@param inherited: Flag to indicate whether inherited ACEs should
be stripped (default: True).
@type inherited: C{bool}
@param protected: Flag to indicate whether protected ACEs should
be stripped (default: True).
@type protected: C{bool}
@return: An ACL without the stripped ACEs.
@rtype: L{ACL} object
"""
newAcl = ACL()
if len(self.aces):
for ace in self.aces:
keep = 1
if inherited and ace.inherited:
keep = 0
elif protected and ace.protected:
keep = 0
if keep:
newAcl.addAce(ace)
return newAcl
| gpl-3.0 | 5,891,104,285,362,545,000 | 34.299035 | 118 | 0.55092 | false |
thundernet8/WRGameVideos-API | venv/lib/python2.7/site-packages/flask_jsonpify.py | 1 | 2543 | from flask import current_app, json, request
def __pad(strdata):
""" Pads `strdata` with a Request's callback argument, if specified, or does
nothing.
"""
if request.args.get('callback'):
return "%s(%s);" % (request.args.get('callback'), strdata)
else:
return strdata
def __mimetype():
if request.args.get('callback'):
return 'application/javascript'
else:
return 'application/json'
def __dumps(*args, **kwargs):
""" Serializes `args` and `kwargs` as JSON. Supports serializing an array
as the top-level object, if it is the only argument.
"""
indent = None
if (current_app.config.get('JSONIFY_PRETTYPRINT_REGULAR', False)
and not request.is_xhr):
indent = 2
return json.dumps(args[0] if len(args) is 1 else dict(*args, **kwargs),
indent=indent)
def jsonpify(*args, **kwargs):
"""Creates a :class:`~flask.Response` with the JSON or JSON-P
representation of the given arguments with an `application/json`
or `application/javascript` mimetype, respectively. The arguments
to this function are the same as to the :class:`dict` constructor,
but also accept an array. If a `callback` is specified in the
request arguments, the response is JSON-Padded.
Example usage::
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
GET /_get_current_user:
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
or, if a callback is specified,
GET /_get_current_user?callback=displayUsers
Will result in a JSON response like this to the browser::
displayUsers({
"username": "admin",
"email": "admin@localhost",
"id": 42
});
This requires Python 2.6 or an installed version of simplejson. For
security reasons only objects are supported toplevel. For more
information about this, have a look at :ref:`json-security`.
.. versionadded:: 0.2
"""
return current_app.response_class(__pad(__dumps(*args, **kwargs)),
mimetype=__mimetype())
jsonify = jsonpify # allow override of Flask's jsonify.
| gpl-2.0 | -717,886,847,209,968,100 | 29.395062 | 80 | 0.583956 | false |
DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/core/tests/test_records.py | 1 | 14928 | from __future__ import division, absolute_import, print_function
import collections
import pickle
import sys
from os import path
import numpy as np
from numpy.compat import asbytes
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_raises, assert_warns
)
class TestFromrecords(TestCase):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
assert_equal(r['col1'].dtype.kind, 'i')
if sys.version_info[0] >= 3:
assert_equal(r['col2'].dtype.kind, 'U')
assert_equal(r['col2'].dtype.itemsize, 12)
else:
assert_equal(r['col2'].dtype.kind, 'S')
assert_equal(r['col2'].dtype.itemsize, 3)
assert_equal(r['col3'].dtype.kind, 'f')
def test_method_array(self):
r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big')
assert_equal(r[1].item(), (25444, asbytes('efg'), 1633837924))
def test_method_array2(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1].item(), (2, 22.0, asbytes('b')))
def test_recarray_slices(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1::2][1].item(), (4, 44.0, asbytes('d')))
def test_recarray_fromarrays(self):
x1 = np.array([1, 2, 3, 4])
x2 = np.array(['a', 'dd', 'xyz', '12'])
x3 = np.array([1.1, 2, 3, 4])
r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
assert_equal(r[1].item(), (2, 'dd', 2.0))
x1[1] = 34
assert_equal(r.a, np.array([1, 2, 3, 4]))
def test_recarray_fromfile(self):
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, 'recarray_from_file.fits')
fd = open(filename, 'rb')
fd.seek(2880 * 2)
r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek(2880 * 2)
r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.close()
assert_equal(r1, r2)
def test_recarray_from_obj(self):
count = 10
a = np.zeros(count, dtype='O')
b = np.zeros(count, dtype='f8')
c = np.zeros(count, dtype='f8')
for i in range(len(a)):
a[i] = list(range(1, 10))
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
for i in range(len(a)):
assert_((mine.date[i] == list(range(1, 10))))
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
def test_recarray_from_repr(self):
a = np.array([(1, 'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
recordarr = np.rec.array(a)
recarr = a.view(np.recarray)
recordview = a.view(np.dtype((np.record, a.dtype)))
recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np})
recarr_r = eval("numpy." + repr(recarr), {'numpy': np})
recordview_r = eval("numpy." + repr(recordview), {'numpy': np})
assert_equal(type(recordarr_r), np.recarray)
assert_equal(recordarr_r.dtype.type, np.record)
assert_equal(recordarr, recordarr_r)
assert_equal(type(recarr_r), np.recarray)
assert_equal(recarr_r.dtype.type, np.record)
assert_equal(recarr, recarr_r)
assert_equal(type(recordview_r), np.ndarray)
assert_equal(recordview.dtype.type, np.record)
assert_equal(recordview, recordview_r)
def test_recarray_views(self):
a = np.array([(1, 'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
b = np.array([1, 2, 3, 4, 5], dtype=np.int64)
# check that np.rec.array gives right dtypes
assert_equal(np.rec.array(a).dtype.type, np.record)
assert_equal(type(np.rec.array(a)), np.recarray)
assert_equal(np.rec.array(b).dtype.type, np.int64)
assert_equal(type(np.rec.array(b)), np.recarray)
# check that viewing as recarray does the same
assert_equal(a.view(np.recarray).dtype.type, np.record)
assert_equal(type(a.view(np.recarray)), np.recarray)
assert_equal(b.view(np.recarray).dtype.type, np.int64)
assert_equal(type(b.view(np.recarray)), np.recarray)
# check that view to non-structured dtype preserves type=np.recarray
r = np.rec.array(np.ones(4, dtype="f4,i4"))
rv = r.view('f8').view('f4,i4')
assert_equal(type(rv), np.recarray)
assert_equal(rv.dtype.type, np.record)
# check that getitem also preserves np.recarray and np.record
r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
('c', 'i4,i4')]))
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
# suppress deprecation warning in 1.12 (remove in 1.13)
with assert_warns(FutureWarning):
assert_equal(r[['a', 'b']].dtype.type, np.record)
assert_equal(type(r[['a', 'b']]), np.recarray)
# and that it preserves subclasses (gh-6949)
class C(np.recarray):
pass
c = r.view(C)
assert_equal(type(c['c']), C)
# check that accessing nested structures keep record type, but
# not for subarrays, non-void structures, non-structured voids
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4', 2)),
('d', ('i8', 'i4,i4'))]
r = np.rec.array([((1, 1), b'11111111', [1, 1], 1),
((1, 1), b'11111111', [1, 1], 1)], dtype=test_dtype)
assert_equal(r.a.dtype.type, np.record)
assert_equal(r.b.dtype.type, np.void)
assert_equal(r.c.dtype.type, np.float32)
assert_equal(r.d.dtype.type, np.int64)
# check the same, but for views
r = np.rec.array(np.ones(4, dtype='i4,i4'))
assert_equal(r.view('f4,f4').dtype.type, np.record)
assert_equal(r.view(('i4', 2)).dtype.type, np.int32)
assert_equal(r.view('V8').dtype.type, np.void)
assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64)
# check that we can undo the view
arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
for arr in arrs:
rec = np.rec.array(arr)
# recommended way to view as an ndarray:
arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray)
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
def test_recarray_repr(self):
# make sure non-structured dtypes also show up as rec.array
a = np.array(np.ones(4, dtype='f8'))
assert_(repr(np.rec.array(a)).startswith('rec.array'))
# check that the 'np.record' part of the dtype isn't shown
a = np.rec.array(np.ones(3, dtype='i4,i4'))
assert_equal(repr(a).find('numpy.record'), -1)
a = np.rec.array(np.ones(3, dtype='i4'))
assert_(repr(a).find('dtype=int32') != -1)
def test_recarray_from_names(self):
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
pa = np.rec.fromrecords([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
assert_(ra.dtype == pa.dtype)
assert_(ra.shape == pa.shape)
for k in range(len(ra)):
assert_(ra[k].item() == pa[k].item())
def test_recarray_conflict_fields(self):
ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2),
(3, 'wrs', 1.3)],
names='field, shape, mean')
ra.mean = [1.1, 2.2, 3.3]
assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
assert_(type(ra.mean) is type(ra.var))
ra.shape = (1, 3)
assert_(ra.shape == (1, 3))
ra.shape = ['A', 'B', 'C']
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
assert_(isinstance(ra.field, collections.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
dtype=[('a', int), ('b', np.object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
ndtype = np.dtype([('a', int), ('b', np.object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
def test_recarray_stringtypes(self):
# Issue #3993
a = np.array([('abc ', 1), ('abc', 2)],
dtype=[('foo', 'S4'), ('bar', int)])
a = a.view(np.recarray)
assert_equal(a.foo[0] == a.foo[1], False)
def test_recarray_returntypes(self):
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
a = np.rec.array([('abc ', (1, 1), 1, ('abcde', 'fgehi')),
('abc', (2, 3), 1, ('abcde', 'jklmn'))],
dtype=[('foo', 'S4'),
('bar', [('A', int), ('B', int)]),
('baz', int), ('qux', qux_fields)])
assert_equal(type(a.foo), np.ndarray)
assert_equal(type(a['foo']), np.ndarray)
assert_equal(type(a.bar), np.recarray)
assert_equal(type(a['bar']), np.recarray)
assert_equal(a.bar.dtype.type, np.record)
assert_equal(type(a['qux']), np.recarray)
assert_equal(a.qux.dtype.type, np.record)
assert_equal(dict(a.qux.dtype.fields), qux_fields)
assert_equal(type(a.baz), np.ndarray)
assert_equal(type(a['baz']), np.ndarray)
assert_equal(type(a[0].bar), np.record)
assert_equal(type(a[0]['bar']), np.record)
assert_equal(a[0].bar.A, 1)
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
assert_equal(a[0].qux.D, asbytes('fgehi'))
assert_equal(a[0].qux['D'], asbytes('fgehi'))
assert_equal(a[0]['qux'].D, asbytes('fgehi'))
assert_equal(a[0]['qux']['D'], asbytes('fgehi'))
def test_zero_width_strings(self):
# Test for #6430, based on the test case from #1901
cols = [['test'] * 3, [''] * 3]
rec = np.rec.fromarrays(cols)
assert_equal(rec['f0'], ['test', 'test', 'test'])
assert_equal(rec['f1'], ['', '', ''])
dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
rec = np.rec.fromarrays(cols, dtype=dt)
assert_equal(rec.itemsize, 4)
assert_equal(rec['f0'], [b'test', b'test', b'test'])
assert_equal(rec['f1'], [b'', b'', b''])
class TestRecord(TestCase):
def setUp(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
("col3", "<i4")])
def test_assignment1(self):
a = self.data
assert_equal(a.col1[0], 1)
a[0].col1 = 0
assert_equal(a.col1[0], 0)
def test_assignment2(self):
a = self.data
assert_equal(a.col1[0], 1)
a.col1[0] = 0
assert_equal(a.col1[0], 0)
def test_invalid_assignment(self):
a = self.data
def assign_invalid_column(x):
x[0].col5 = 1
self.assertRaises(AttributeError, assign_invalid_column, a)
def test_nonwriteable_setfield(self):
# gh-8171
r = np.rec.array([(0,), (1,)], dtype=[('f', 'i4')])
r.flags.writeable = False
with assert_raises(ValueError):
r.f = [2, 3]
with assert_raises(ValueError):
r.setfield([2, 3], *r.dtype.fields['f'])
def test_out_of_order_fields(self):
"""Ticket #1431."""
# this test will be invalid in 1.13
# suppress deprecation warning in 1.12 (remove in 1.13)
with assert_warns(FutureWarning):
x = self.data[['col1', 'col2']]
y = self.data[['col2', 'col1']]
assert_equal(x[0][0], y[0][1])
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
def test_pickle_2(self):
a = self.data
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
def test_pickle_3(self):
# Issue #7140
a = self.data
pa = pickle.loads(pickle.dumps(a[0]))
assert_(pa.flags.c_contiguous)
assert_(pa.flags.f_contiguous)
assert_(pa.flags.writeable)
assert_(pa.flags.aligned)
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
r = np.zeros((1, 3), dtype=dt).view(np.recarray)
r.foo = np.array([1, 2, 3]) # TypeError?
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
with assert_warns(FutureWarning):
ra[['x', 'y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
rec = np.recarray(1, dtype=[('x', float, 5)])
rec[0].x = 1
assert_equal(rec[0].x, np.ones(5))
def test_missing_field(self):
# https://github.com/numpy/numpy/issues/4806
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
assert_raises(ValueError, lambda: arr[['nofield']])
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
assert_(np.rec.find_duplicate(l1) == [])
l2 = [1, 2, 1, 4, 5, 6]
assert_(np.rec.find_duplicate(l2) == [1])
l3 = [1, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [1, 2])
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [2, 1])
if __name__ == "__main__":
run_module_suite()
| mit | 2,026,631,021,244,819,700 | 38.492063 | 96 | 0.518288 | false |
taoliu/taolib | Scripts/ce_histone_matrix.py | 1 | 17749 | #!/usr/bin/env python
# Time-stamp: <2010-09-08 02:38:38 Tao Liu>
"""Module Description
Copyright (c) 2008 Tao Liu <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: Tao Liu
@contact: [email protected]
"""
# ------------------------------------
# python modules
# ------------------------------------
import os
import sys
import re
import csv
import logging
from optparse import OptionParser
import reportlab
import Bio
from taolib.CoreLib.FeatIO import WigTrackI
from taolib.CoreLib.BasicStat.Func import mean,median,std
# ------------------------------------
# constants
# ------------------------------------
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Misc functions
# ------------------------------------
def andfilter ( cvsfile, write_func, *args ):
"""
"""
argv = args[0]
if len(argv) < 2:
sys.stderr.write("Need two extra arguments for 'organize', e.g. command: <1,2,3> <4,5,6> means the first 1,2,3 will be used as dependent variables/response, and 4,5,6 will be used as independent variables/terms/predictors.\n")
sys.exit()
responses_num = map(int,argv[0].split(","))
predictors_num = map(int,argv[1].split(","))
fields = cvsfile.fieldnames
responses_label = map(lambda x:"res."+fields[x],responses_num)
predictors_label = map(lambda x:"pre."+fields[x],predictors_num)
responses_name = map(lambda x:fields[x],responses_num)
predictors_name = map(lambda x:fields[x],predictors_num)
#write_func( "#%s\t%s\n" \
# % ( ",".join(map( lambda x:str(x[0])+":"+str(x[1]) , zip(responses_num,responses_name) )),
# ",".join(map( lambda x:str(x[0])+":"+str(x[1]) , zip(predictors_num,predictors_name) ))) )
write_func( "%s\t%s\n" \
% ( ",".join(map( lambda x:str(x) , responses_name )),
",".join(map( lambda x:str(x) , predictors_name ))) )
for l in cvsfile:
# for responses
t_str_list = []
for t in responses_name:
t_str_list.append(l.setdefault(t,"NA"))
# for predictors
v_str_list = []
for v in predictors_name:
v_str_list.append(l.setdefault(v,"NA"))
write_func( "\t".join( (",".join(t_str_list),",".join(v_str_list)) ) )
write_func( "\n" )
def combcall2draw ( cvsfile, write_func, *args ):
"""User specifies several columns to consider, this tool will call
regions where either of the column is above its threshold.
"""
argv = args[0]
if len(argv) < 6:
sys.stderr.write("Need 6 extra arguments for 'combcall2draw', options <loc column> <score column1[,score column2,...]> <cutoff1[,cutoff2,cutoff3]> <min length> <max gap> <pdf filename>\ne.g. command: <0> <1,2,3> <0.5,0.6,0.7> <10000> <2000> <a.pdf>, means to use the first column as genome coordinations to call enriched regions from the combinition of #1, #2 and #3, the thresholds to call enriched region are 0.5 for column 1, 0.6 for column 2 and 0.7 for column 3, the minimum length of region is 10k, and the maximum gap to link two nearby regions is 2k. Then the figure will be saved in a.pdf.\n")
sys.exit()
cor_column = cvsfile.fieldnames[int(argv[0])]
var_columns = map(lambda x:cvsfile.fieldnames[int(x)],argv[1].split(","))
cutoffs = map(float,argv[2].split(","))
min_len = int(argv[3])
max_gap = int(argv[4])
wtrack = WigTrackI() # combined track containing 1 if either of track is above cutoff
add_func = wtrack.add_loc
for l in cvsfile:
cor = l.setdefault(cor_column,None)
if not cor or cor =="NA":
continue
for i in range(len(var_columns)):
var_column = var_columns[i]
cutoff = cutoffs[i]
var = l.setdefault(var_column,None)
if var and var != "NA" and float(var) > cutoff:
(chrom,start,end) = cor.split(".")
add_func(chrom,int(start),1.1)
break
wtrack.span = int(end)-int(start)
bpeaks = wtrack.call_peaks(cutoff=1.0,min_length=min_len,max_gap=max_gap)
#f = argv[5]
fhd = open(argv[5].replace("pdf","bed"),"w")
fhd.write(bpeaks.tobed())
from Bio.Graphics import BasicChromosome
from reportlab.lib.colors import gray, black, white
entries = [("chrI", 15072419),
("chrII", 15279316),
("chrIII", 13783681),
("chrIV", 17493784),
("chrV", 20919398),
("chrX", 17718852)]
max_length = max([x[1] for x in entries])
chr_diagram = BasicChromosome.Organism()
for name, length in entries:
cur_chromosome = BasicChromosome.Chromosome(name)
#Set the length, adding and extra 20 percent for the tolomeres:
cur_chromosome.scale_num = max_length * 1.1
# Add an opening telomere
start = BasicChromosome.TelomereSegment()
start.scale = 0.05 * max_length
start.fill_color=gray
cur_chromosome.add(start)
#Add a body - using bp as the scale length here.
try:
cpeaks = bpeaks.peaks[name]
except:
cpeaks = []
body_regions = []
last_pos = 0
for p in cpeaks:
body_regions.append( (p[0]-last_pos,white) ) # outside regions
body_regions.append( (p[1]-p[0],black) ) # enriched regions
last_pos = p[1]
assert p[1] < length
body_regions.append( (length-last_pos,white) ) # last part
for b,c in body_regions:
body = BasicChromosome.ChromosomeSegment()
body.fill_color= c
body.scale = b
cur_chromosome.add(body)
#Add a closing telomere
end = BasicChromosome.TelomereSegment(inverted=True)
end.scale = 0.05 * max_length
end.fill_color=gray
cur_chromosome.add(end)
#This chromosome is done
chr_diagram.add(cur_chromosome)
chr_diagram.draw(argv[5], "Highlight regions in Caenorhabditis elegans" )
def call1draw ( cvsfile, write_func, *args ):
"""Call regions, then plot it in chromosome figure.
A combination of drawchrom and call1
"""
argv = args[0]
if len(argv) < 6:
sys.stderr.write("Need 6 extra arguments for 'call1draw', options <loc column> <score column> <cutoff> <min length> <max gap> <pdf filename>\ne.g. command: <0> <1> <0.5> <10000> <2000> <a.pdf>, means to use the first column as genome coordinations to call enriched regions from the second column, the threshold to call enriched region is 0.5, the minimum length of region is 10k, and the maximum gap to link two nearby regions is 2k. Then the figure will be saved in a.pdf.\n")
sys.exit()
cor_column = cvsfile.fieldnames[int(argv[0])]
var_column = cvsfile.fieldnames[int(argv[1])]
cutoff = float(argv[2])
min_len = int(argv[3])
max_gap = int(argv[4])
wtrack = WigTrackI()
add_func = wtrack.add_loc
for l in cvsfile:
cor = l.setdefault(cor_column,None)
var = l.setdefault(var_column,None)
if cor and var and cor != "NA" and var != "NA":
(chrom,start,end) = cor.split(".")
add_func(chrom,int(start),float(var))
wtrack.span = int(end)-int(start)
bpeaks = wtrack.call_peaks(cutoff=cutoff,min_length=min_len,max_gap=max_gap)
fhd = open(argv[5].replace("pdf","bed"),"w")
fhd.write(bpeaks.tobed())
from Bio.Graphics import BasicChromosome
from reportlab.lib.colors import gray, black, white
entries = [("chrI", 15072419),
("chrII", 15279316),
("chrIII", 13783681),
("chrIV", 17493784),
("chrV", 20919398),
("chrX", 17718852)]
max_length = max([x[1] for x in entries])
chr_diagram = BasicChromosome.Organism()
for name, length in entries:
cur_chromosome = BasicChromosome.Chromosome(name)
#Set the length, adding and extra 20 percent for the tolomeres:
cur_chromosome.scale_num = max_length * 1.1
# Add an opening telomere
start = BasicChromosome.TelomereSegment()
start.scale = 0.05 * max_length
start.fill_color=gray
cur_chromosome.add(start)
#Add a body - using bp as the scale length here.
try:
cpeaks = bpeaks.peaks[name]
except:
cpeaks = []
body_regions = []
last_pos = 0
for p in cpeaks:
body_regions.append( (p[0]-last_pos,white) ) # outside regions
body_regions.append( (p[1]-p[0],black) ) # enriched regions
last_pos = p[1]
assert p[1] < length
body_regions.append( (length-last_pos,white) ) # last part
for b,c in body_regions:
body = BasicChromosome.ChromosomeSegment()
body.fill_color= c
body.scale = b
cur_chromosome.add(body)
#Add a closing telomere
end = BasicChromosome.TelomereSegment(inverted=True)
end.scale = 0.05 * max_length
end.fill_color=gray
cur_chromosome.add(end)
#This chromosome is done
chr_diagram.add(cur_chromosome)
chr_diagram.draw(argv[5], "%s regions in Caenorhabditis elegans" % (var_column) )
def drawchrom ( cvsfile, write_func, *args ):
"""Draw CE chromosome tool.
Doesn't need any parameters.
"""
from Bio.Graphics import BasicChromosome
from reportlab.lib.colors import gray, black
entries = [("chrI", 15072419),
("chrII", 15279316),
("chrIII", 13783681),
("chrIV", 17493784),
("chrV", 20919398),
("chrX", 17718852)]
max_length = max([x[1] for x in entries])
chr_diagram = BasicChromosome.Organism()
for name, length in entries:
cur_chromosome = BasicChromosome.Chromosome(name)
#Set the length, adding and extra 20 percent for the tolomeres:
cur_chromosome.scale_num = max_length * 1.1
# Add an opening telomere
start = BasicChromosome.TelomereSegment()
start.scale = 0.05 * max_length
start.fill_color=black
cur_chromosome.add(start)
#Add a body - using bp as the scale length here.
body = BasicChromosome.ChromosomeSegment()
body.fill_color=gray
body.scale = length
cur_chromosome.add(body)
#Add a closing telomere
end = BasicChromosome.TelomereSegment(inverted=True)
end.scale = 0.05 * max_length
end.fill_color=black
cur_chromosome.add(end)
#This chromosome is done
chr_diagram.add(cur_chromosome)
chr_diagram.draw("simple_chrom.pdf", "Caenorhabditis elegans" )
def summary ( cvsfile, write_func, *args ):
"""Show the column names.
"""
fsnames = cvsfile.fieldnames
data_dict = {}
for f in fsnames:
data_dict[f]=[]
#print "\n".join(map( lambda x:":".join(map(str,x)) ,enumerate(fsnames)) )
for l in cvsfile:
for f in fsnames:
v = l.setdefault(f,None)
if v and v!="NA":
data_dict[f].append(v)
write_func( "colnum:colname\tsum,mean,median,std,cutoff\n" )
for (i,f) in enumerate(fsnames):
try:
v_array = map(float,data_dict[f])
v_sum = "%.2f" % sum(v_array)
v_mean = "%.2f" % mean(v_array)
v_median = "%.2f" % median(v_array)
v_std = "%.2f" % std(v_array, float(v_mean))
v_cutoff = "%.2f" % (float(v_mean)+float(v_std))
except ValueError:
(v_sum,v_mean,v_median,v_std,v_cutoff)=["NA"]*5
write_func( "%d:%s\t%s,%s,%s,%s,%s\n" % (i,f,v_sum,v_mean,v_median,v_std,v_cutoff ))
def organize ( cvsfile, write_func, *args ):
"""Re-organize the columns for data-mining.
"""
argv = args[0]
if len(argv) < 2:
sys.stderr.write("Need two extra arguments for 'organize', e.g. command: <1,2,3> <4,5,6> means the first 1,2,3 will be used as dependent variables/response, and 4,5,6 will be used as independent variables/terms/predictors.\n")
sys.exit()
responses_num = map(int,argv[0].split(","))
predictors_num = map(int,argv[1].split(","))
fields = cvsfile.fieldnames
responses_label = map(lambda x:"res."+fields[x],responses_num)
predictors_label = map(lambda x:"pre."+fields[x],predictors_num)
responses_name = map(lambda x:fields[x],responses_num)
predictors_name = map(lambda x:fields[x],predictors_num)
#write_func( "#%s\t%s\n" \
# % ( ",".join(map( lambda x:str(x[0])+":"+str(x[1]) , zip(responses_num,responses_name) )),
# ",".join(map( lambda x:str(x[0])+":"+str(x[1]) , zip(predictors_num,predictors_name) ))) )
write_func( "%s\t%s\n" \
% ( ",".join(map( lambda x:str(x) , responses_name )),
",".join(map( lambda x:str(x) , predictors_name ))) )
for l in cvsfile:
# for responses
t_str_list = []
for t in responses_name:
t_str_list.append(l.setdefault(t,"NA"))
# for predictors
v_str_list = []
for v in predictors_name:
v_str_list.append(l.setdefault(v,"NA"))
write_func( "\t".join( (",".join(t_str_list),",".join(v_str_list)) ) )
write_func( "\n" )
def call1 (cvsfile, write_func, *args ):
"""Call enrich regions from certain column
"""
argv = args[0]
if len(argv) < 5:
sys.stderr.write("Need 5 extra arguments for 'call', options <loc column> <score column> <cutoff> <min length> <max gap>\ne.g. command: <0> <1> <0.5> <10000> <2000>, means to use the first column as genome coordinations to call enriched regions from the second column, the threshold to call enriched region is 0.5, the minimum length of region is 10k, and the maximum gap to link two nearby regions is 2k.\n")
sys.exit()
cor_column = cvsfile.fieldnames[int(argv[0])]
var_column = cvsfile.fieldnames[int(argv[1])]
cutoff = float(argv[2])
min_len = int(argv[3])
max_gap = int(argv[4])
wtrack = WigTrackI()
add_func = wtrack.add_loc
for l in cvsfile:
cor = l.setdefault(cor_column,None)
var = l.setdefault(var_column,None)
if cor and var and cor != "NA" and var != "NA":
(chrom,start,end) = cor.split(".")
add_func(chrom,int(start),float(var))
wtrack.span = int(end)-int(start)
write_func( "# regions called from %s:%s\n" % (argv[1],var_column) )
bpeaks = wtrack.call_peaks(cutoff=cutoff,min_length=min_len,max_gap=max_gap)
write_func( bpeaks.tobed() )
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def main():
usage = "usage: %prog [options]"
description = "Script to analyze C. elegans histone marks data matrix."
optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-i","--ifile",dest="ifile",type="string",
help="input file")
optparser.add_option("-o","--ofile",dest="ofile",
help="output file, default: stdout")
(options,args) = optparser.parse_args()
command_list = {"summary":summary,
"organize":organize,
"call1":call1,
"drawchrom":drawchrom,
"call1draw":call1draw,
"combcall2draw":combcall2draw,
}
command_des = {"summary":"Show the column names.",
"organize":"Re-organize the file for data-mining.",
"call1":"Call enriched regions for certain column.",
"drawchrom":"Draw ce chromosomes.",
"call1draw":"Call enriched regions and then draw chromosome figures.",
"combcall2draw":"Call enriched regions where any of the tracks is above threshold and draw them on chromosome figures.",
}
if not options.ifile or not args:
optparser.print_help()
sys.exit()
if options.ofile:
write_func = open(options.ofile,"w").write
else:
write_func = sys.stdout.write
if command_list.has_key(args[0]):
com = command_list[args[0]]
com_args = args[1:]
else:
optparser.print_help()
sys.stderr.write("Avialable Commands:\n\n")
for c in command_list.keys():
sys.stderr.write(c+": "+command_des[c]+"\n")
sys.exit()
cvsfilereader = csv.DictReader(open(options.ifile,"r"),delimiter="\t")
# run commands
com(cvsfilereader,write_func,com_args)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.stderr.write("User interrupt me! ;-) See you!\n")
sys.exit(0)
| bsd-3-clause | -3,209,012,394,225,596,000 | 38.267699 | 610 | 0.57423 | false |
antechrestos/cf-python-client | main/cloudfoundry_client/v2/service_brokers.py | 1 | 1120 | from cloudfoundry_client.v2.entities import EntityManager
class ServiceBrokerManager(EntityManager):
def __init__(self, target_endpoint, client):
super(ServiceBrokerManager, self).__init__(target_endpoint, client, '/v2/service_brokers')
def create(self, broker_url, broker_name, auth_username, auth_password, space_guid=None):
request = self._request(broker_url=broker_url, name=broker_name,
auth_username=auth_username, auth_password=auth_password)
request['space_guid'] = space_guid
return super(ServiceBrokerManager, self)._create(request)
def update(self, broker_guid, broker_url=None, broker_name=None, auth_username=None, auth_password=None):
request = self._request()
request['broker_url'] = broker_url
request['name'] = broker_name
request['auth_username'] = auth_username
request['auth_password'] = auth_password
return super(ServiceBrokerManager, self)._update(broker_guid, request)
def remove(self, broker_guid):
super(ServiceBrokerManager, self)._remove(broker_guid)
| apache-2.0 | -4,444,524,997,635,367,400 | 47.695652 | 109 | 0.68125 | false |
tonybaloney/st2 | st2common/tests/unit/test_util_loader.py | 1 | 6525 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import os
import mock
import unittest2
from oslo_config import cfg
from st2common import config
from st2common.util import loader
CURRENT_DIR = os.path.dirname(__file__)
ST2CONTENT_DIR = os.path.join(CURRENT_DIR, '../fixtures')
MOCK_RUNNER_NAME = 'mock_runner'
MOCK_RUNNER_PATH = '{0}/{1}/{1}.py'.format(ST2CONTENT_DIR, MOCK_RUNNER_NAME)
MOCK_RUNNER_MODULE = imp.load_source(MOCK_RUNNER_NAME, MOCK_RUNNER_PATH)
MOCK_QUERIER_PATH = '{0}/{1}/query/{1}.py'.format(ST2CONTENT_DIR, MOCK_RUNNER_NAME)
MOCK_QUERIER_MODULE = imp.load_source(MOCK_RUNNER_NAME, MOCK_QUERIER_PATH)
MOCK_CALLBACK_PATH = '{0}/{1}/callback/{1}.py'.format(ST2CONTENT_DIR, MOCK_RUNNER_NAME)
MOCK_CALLBACK_MODULE = imp.load_source(MOCK_RUNNER_NAME, MOCK_CALLBACK_PATH)
class PluginLoaderTestCase(unittest2.TestCase):
@classmethod
def setUpClass(cls):
super(PluginLoaderTestCase, cls).setUpClass()
# Check to see if configs are already registered.
# The register_opts is needed when running tests individually.
if 'system' not in cfg.CONF:
config.register_opts()
def setUp(self):
super(PluginLoaderTestCase, self).setUp()
loader.RUNNER_MODULES_CACHE = {}
loader.QUERIER_MODULES_CACHE = {}
loader.CALLBACK_MODULES_CACHE = {}
@mock.patch.object(
imp,
'load_source',
mock.MagicMock(return_value=MOCK_RUNNER_MODULE)
)
def test_register_runner(self):
runner = loader.register_runner(MOCK_RUNNER_NAME)
self.assertIsNotNone(runner)
self.assertEqual(MOCK_RUNNER_NAME, runner.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.RUNNER_MODULES_CACHE)
self.assertEqual(runner, loader.RUNNER_MODULES_CACHE[MOCK_RUNNER_NAME])
@mock.patch.object(
imp,
'load_source',
mock.MagicMock(return_value=MOCK_RUNNER_MODULE)
)
def test_register_runner_again(self):
runner1 = loader.register_runner(MOCK_RUNNER_NAME)
self.assertEqual(1, imp.load_source.call_count)
self.assertIsNotNone(runner1)
self.assertEqual(MOCK_RUNNER_NAME, runner1.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.RUNNER_MODULES_CACHE)
self.assertEqual(runner1, loader.RUNNER_MODULES_CACHE[MOCK_RUNNER_NAME])
runner2 = loader.register_runner(MOCK_RUNNER_NAME)
self.assertEqual(1, imp.load_source.call_count)
self.assertEqual(runner1, runner2)
self.assertIsNotNone(runner2)
self.assertEqual(MOCK_RUNNER_NAME, runner2.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.RUNNER_MODULES_CACHE)
self.assertEqual(runner2, loader.RUNNER_MODULES_CACHE[MOCK_RUNNER_NAME])
@mock.patch.object(
imp,
'load_source',
mock.MagicMock(return_value=MOCK_QUERIER_MODULE)
)
def test_register_query_module(self):
querier = loader.register_query_module(MOCK_RUNNER_NAME)
self.assertIsNotNone(querier)
self.assertEqual(MOCK_RUNNER_NAME, querier.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.QUERIER_MODULES_CACHE)
self.assertEqual(querier, loader.QUERIER_MODULES_CACHE[MOCK_RUNNER_NAME])
@mock.patch.object(
imp,
'load_source',
mock.MagicMock(return_value=MOCK_QUERIER_MODULE)
)
def test_register_query_module_again(self):
querier1 = loader.register_query_module(MOCK_RUNNER_NAME)
self.assertEqual(1, imp.load_source.call_count)
self.assertIsNotNone(querier1)
self.assertEqual(MOCK_RUNNER_NAME, querier1.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.QUERIER_MODULES_CACHE)
self.assertEqual(querier1, loader.QUERIER_MODULES_CACHE[MOCK_RUNNER_NAME])
querier2 = loader.register_query_module(MOCK_RUNNER_NAME)
self.assertEqual(1, imp.load_source.call_count)
self.assertEqual(querier1, querier2)
self.assertIsNotNone(querier2)
self.assertEqual(MOCK_RUNNER_NAME, querier2.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.QUERIER_MODULES_CACHE)
self.assertEqual(querier2, loader.QUERIER_MODULES_CACHE[MOCK_RUNNER_NAME])
@mock.patch.object(
imp,
'load_source',
mock.MagicMock(return_value=MOCK_CALLBACK_MODULE)
)
def test_register_callback_module(self):
callback_module = loader.register_callback_module(MOCK_RUNNER_NAME)
self.assertIsNotNone(callback_module)
self.assertEqual(MOCK_RUNNER_NAME, callback_module.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.CALLBACK_MODULES_CACHE)
self.assertEqual(callback_module, loader.CALLBACK_MODULES_CACHE[MOCK_RUNNER_NAME])
@mock.patch.object(
imp,
'load_source',
mock.MagicMock(return_value=MOCK_CALLBACK_MODULE)
)
def test_register_callback_module_again(self):
callback_module1 = loader.register_callback_module(MOCK_RUNNER_NAME)
self.assertEqual(1, imp.load_source.call_count)
self.assertIsNotNone(callback_module1)
self.assertEqual(MOCK_RUNNER_NAME, callback_module1.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.CALLBACK_MODULES_CACHE)
self.assertEqual(callback_module1, loader.CALLBACK_MODULES_CACHE[MOCK_RUNNER_NAME])
callback_module2 = loader.register_callback_module(MOCK_RUNNER_NAME)
self.assertEqual(1, imp.load_source.call_count)
self.assertEqual(callback_module1, callback_module2)
self.assertIsNotNone(callback_module2)
self.assertEqual(MOCK_RUNNER_NAME, callback_module2.__name__)
self.assertIn(MOCK_RUNNER_NAME, loader.CALLBACK_MODULES_CACHE)
self.assertEqual(callback_module2, loader.CALLBACK_MODULES_CACHE[MOCK_RUNNER_NAME])
| apache-2.0 | -6,562,999,816,449,155,000 | 39.52795 | 91 | 0.69931 | false |
Comunitea/CMNT_004_15 | project-addons/custom_account/models/payment.py | 1 | 3019 | # Copyright 2019 Omar Castiñeira, Comunitea Servicios Tecnológicos S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api
class PaymentOrderLine(models.Model):
_inherit = 'account.payment.line'
_order = 'partner_name'
partner_name = fields.Char(related='partner_id.name', store=True)
@api.model
def create(self, vals):
partner_bank_id = vals.get('partner_bank_id')
move_line_id = vals.get('move_line_id')
partner_id = vals.get('partner_id')
order_id = vals.get('order_id')
if order_id:
order = self.env['account.payment.order'].browse(order_id)
if order.payment_method_id.mandate_required and not \
vals.get('mandate_id'):
if move_line_id:
line = self.env['account.move.line'].browse(move_line_id)
if line.invoice_id and \
line.invoice_id.type == 'out_invoice' and \
line.invoice_id.mandate_id:
if line.invoice_id.mandate_id.state == 'valid':
vals.update({
'mandate_id': line.invoice_id.mandate_id.id,
'partner_bank_id':
line.invoice_id.mandate_id.partner_bank_id.id})
if partner_bank_id and not vals.get('mandate_id'):
mandates = self.env['account.banking.mandate'].search_read(
[('partner_bank_id', '=', partner_bank_id),
('state', '=', 'valid')], ['id'])
if mandates:
vals['mandate_id'] = mandates[0]['id']
else:
banking_mandate_valid = \
self.env['account.banking.mandate'].\
search_read([('partner_id', '=', partner_id),
('state', '=', 'valid')],
['id', 'partner_bank_id'])
if banking_mandate_valid:
vals.update({
'mandate_id': banking_mandate_valid[0]['id'],
'partner_bank_id':
banking_mandate_valid[0]['partner_bank_id'][0],
})
return super().create(vals)
class BankPaymentLine(models.Model):
_inherit = "bank.payment.line"
mandate_id = fields.Many2one("account.banking.mandate", "Mandate",
related="payment_line_ids.mandate_id",
readonly=True)
mandate_scheme = fields.Selection([('CORE', 'Basic (CORE)'),
('B2B', 'Enterprise (B2B)')],
string='Scheme', readonly=True,
related="mandate_id.scheme")
| agpl-3.0 | -3,272,857,600,221,413,000 | 44.712121 | 79 | 0.46006 | false |
project-owner/Peppy | ui/container.py | 1 | 6297 | # Copyright 2016-2021 Peppy Player [email protected]
#
# This file is part of Peppy Player.
#
# Peppy Player is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Peppy Player is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Peppy Player. If not, see <http://www.gnu.org/licenses/>.
from ui.component import Component
class Container(Component):
""" This container class keeps the list of components and executes group methods on them """
def __init__(self, util, bounding_box=None, background=None, visible=True, content=None, image_filename=None):
""" Initializer
:param util: utility object
:param bounding_box: container bounding box
:param background: container background color
:param visible: visibility flag, True - visible, False - invisible
"""
if content:
cnt = content
else:
cnt = bounding_box
Component.__init__(self, util, c=cnt, bb=bounding_box, bgr=background, v=visible)
self.components = list()
if image_filename:
self.image_filename = image_filename
self.exit_top_y = self.exit_bottom_y = self.exit_left_x = self.exit_right_x = None
def add_component(self, component):
""" Add component to the container
:param component: component to add
"""
self.components.append(component)
def set_parent_screen(self, scr):
""" Add parent screen
:param scr: parent screen
"""
if self.is_empty(): return
self.parent_screen = scr
for c in self.components:
if c:
c.parent_screen = scr
def draw(self):
""" Draw all components in container. Doesn't draw invisible container. """
if not self.visible: return
Component.draw(self)
if self.is_empty(): return
for comp in self.components:
if comp: comp.draw()
def draw_area(self, bb):
if not self.visible: return
Component.draw(self, bb)
def is_empty(self):
""" Check if container has components
:return: True - container doesn't have components, False - container has components
"""
return not hasattr(self, "components")
def clean_draw_update(self):
""" Clean, draw and update container """
self.clean()
self.draw()
self.update()
def handle_event(self, event):
""" Handle container event. Don't handle event if container is invisible.
:param event: the event to handle
"""
if not self.visible or len(self.components) == 0: return
for i in range(len(self.components) - 1, -1, -1):
try:
comp = self.components[i]
if not hasattr(comp, "handle_event"):
continue
if getattr(comp, "popup", None) == True:
if comp.visible == True:
comp.handle_event(event)
break
else:
comp.handle_event(event)
except:
pass
def set_current(self, state=None):
""" Set container as current. Used by screens
:param state: button state (if any)
"""
pass
def set_visible(self, flag):
""" Set container visible/invisible. Set all components in container visible/invisible.
:param flag: True - visible, False - invisible
"""
Component.set_visible(self, flag)
if self.is_empty(): return
for comp in self.components:
if not comp: continue
if getattr(comp, "popup", None) == True:
if not comp.visible:
continue
else:
comp.set_visible(flag)
def refresh(self):
""" Refresh container. Used for periodical updates for example for animation.
This method will be called from the main event loop.
"""
if not self.visible: return
for comp in self.components:
try:
comp.refresh()
except AttributeError:
pass
def is_selected(self):
""" Check if conatiner has selected component
:return: True - container has selected component, False - doesn't have
"""
s = False
for c in self.components:
if c and getattr(c, "selected", False):
s = True
break
return s
def items_per_line(self, width):
""" Return the number of items in line for specified screen width
:param width: screen width
:return: number of items per line
"""
if width <= 102:
return 1
elif width <= 203:
return 2
elif width <= 304:
return 3
elif width <= 405:
return 4
elif width <= 506:
return 5
else:
return 6
def add_button_observers(self, button, update_observer, redraw_observer=None, press=True, release=True):
""" Add button observers
:param button: button to observer
:param update_observer: observer for updating the button
:param redraw_observer: observer to redraw the whole screen
"""
if press and update_observer: button.add_press_listener(update_observer)
if release and update_observer: button.add_release_listener(update_observer)
if redraw_observer and redraw_observer:
button.add_release_listener(redraw_observer)
button.redraw_observer = redraw_observer
| gpl-3.0 | -7,975,863,792,035,716,000 | 31.626943 | 114 | 0.570907 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.