repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
opencolorado/OpenColorado-Tools-and-Utilities
|
Scripts/Harvest/Drcog/ckanclient/__init__.py
|
4
|
21170
|
__version__ = '0.9'
__description__ = 'The CKAN client Python package.'
__long_description__ = \
'''The CKAN client software may be used to make requests on the Comprehensive
Knowledge Archive Network (CKAN) API including its REST interface to all
primary objects (packages, groups, tags) and its search interface.
Synopsis
========
The simplest way to make CKAN requests is:
import ckanclient
# Instantiate the CKAN client.
ckan = ckanclient.CkanClient(api_key=my_key)
# Get the package list.
package_list = ckan.package_register_get()
print package_list
# Get the tag list.
tag_list = ckan.tag_register_get()
print tag_list
# Collect the package metadata.
package_entity = {
'name': my_package_name,
'url': my_package_url,
'download_url': my_package_download_url,
'tags': my_package_keywords,
'notes': my_package_long_description,
}
# Register the package.
ckan.package_register_post(package_entity)
# Get the details of a package.
ckan.package_entity_get(package_name)
package_entity = ckan.last_message
print package_entity
# Update the details of a package.
ckan.package_entity_get(package_name)
package_entity = ckan.last_message
package_entity['url'] = new_package_url
package_entity['notes'] = new_package_notes
ckan.package_entity_put(package_entity)
# List groups
group_list = ckan.group_register_get()
print group_list
# Create a new group
group_entity = {
'name': my_group_name,
'title': my_group_title,
'description': my_group_description,
'packages': group_package_names,
}
ckan.group_register_post(group_entity)
# Get the details of a group.
print ckan.group_entity_get(group_name)
# Update the group details
group_entity = ckan.last_message
group_entity['title'] = new_group_title
group_entity['packages'] = new_group_packages
ckan.group_entity_put(group_entity)
Changelog
=========
v0.9 2011-08-09
---------------
* Default URL changed to thedatahub.org
* Guard against 301 redirection, which loses POST contents
v0.8 2011-07-20
---------------
* More detailed exceptions added
* Some Python 3 compatibility
v0.7 2011-01-27
---------------
* Package search returns results as a generator
(rather than a list that needs to be paged)
v0.5 2010-12-15
---------------
* Exception raised on error (more Pythonic)
v0.4 2010-10-07
---------------
* Form API added
* Package name editing
* Groups added
* Output can be verbose and use logger
* Query API version
* Sends API key via additional header
v0.3 2010-04-28
---------------
* General usability improvements especially around error messages.
* Package Relationships added
* Package deletion fixed
* Changeset entities added
* Improved httpauth (thanks to will waites)
v0.2 2009-11-05
---------------
* Search API support added
* Improved package support to include additional fields such as 'extras'
* Support tag and group entities in addition to package
* Compatibility changes: CkanClient base_location (now should point to base
api e.g. http://ckan.net/api rather than http://ckan.net/api/rest)
v0.1 2008-04
------------
* Fully functional implementation for REST interface to packages
'''
__license__ = 'MIT'
import os
import re
try:
str = unicode
from urllib2 import (urlopen, build_opener, install_opener,
HTTPBasicAuthHandler,
HTTPPasswordMgrWithDefaultRealm,
Request,
HTTPError, URLError)
from urllib import urlencode
except NameError:
# Forward compatibility with Py3k
from urllib.error import HTTPError, URLError
from urllib.parse import urlencode
from urllib.request import (build_opener, install_opener, urlopen,
HTTPPasswordMgrWithDefaultRealm,
HTTPBasicAuthHandler,
Request)
try: # since python 2.6
import json
except ImportError:
import simplejson as json
import logging
logger = logging.getLogger('ckanclient')
PAGE_SIZE = 10
class CkanApiError(Exception): pass
class CkanApiNotFoundError(CkanApiError): pass
class CkanApiNotAuthorizedError(CkanApiError): pass
class CkanApiConflictError(CkanApiError): pass
class ApiRequest(Request):
def __init__(self, url, data=None, headers={}, method=None):
Request.__init__(self, url, data, headers)
self._method = method
def get_method(self):
if self.has_data():
if not self._method:
return 'POST'
assert self._method in ('POST', 'PUT'), 'Invalid method "%s" for request with data.' % self._method
return self._method
else:
if not self._method:
return 'GET'
assert self._method in ('GET', 'DELETE'), 'Invalid method "%s" for request without data.' % self._method
return self._method
class ApiClient(object):
def reset(self):
self.last_location = None
self.last_status = None
self.last_body = None
self.last_headers = None
self.last_message = None
self.last_http_error = None
self.last_url_error = None
def open_url(self, location, data=None, headers={}, method=None):
if self.is_verbose:
self._print("ckanclient: Opening %s" % location)
self.last_location = location
try:
if data != None:
data = urlencode({data: 1})
req = ApiRequest(location, data, headers, method=method)
self.url_response = urlopen(req)
if data and self.url_response.geturl() != location:
redirection = '%s -> %s' % (location, self.url_response.geturl())
raise URLError("Got redirected to another URL, which does not work with POSTS. Redirection: %s" % redirection)
except HTTPError, inst:
self._print("ckanclient: Received HTTP error code from CKAN resource.")
self._print("ckanclient: location: %s" % location)
self._print("ckanclient: response code: %s" % inst.fp.code)
self._print("ckanclient: request headers: %s" % headers)
self._print("ckanclient: request data: %s" % data)
self._print("ckanclient: error: %s" % inst)
self.last_http_error = inst
self.last_status = inst.code
self.last_message = inst.read()
except URLError, inst:
self._print("ckanclient: Unable to progress with URL.")
self._print("ckanclient: location: %s" % location)
self._print("ckanclient: request headers: %s" % headers)
self._print("ckanclient: request data: %s" % data)
self._print("ckanclient: error: %s" % inst)
self.last_url_error = inst
if isinstance(inst.reason, tuple):
self.last_status,self.last_message = inst.reason
else:
self.last_message = inst.reason
self.last_status = inst.errno
else:
self._print("ckanclient: OK opening CKAN resource: %s" % location)
self.last_status = self.url_response.code
self._print('ckanclient: last status %s' % self.last_status)
self.last_body = self.url_response.read()
self._print('ckanclient: last body %s' % self.last_body)
self.last_headers = self.url_response.headers
self._print('ckanclient: last headers %s' % self.last_headers)
content_type = self.last_headers['Content-Type']
self._print('ckanclient: content type: %s' % content_type)
is_json_response = False
if 'json' in content_type:
is_json_response = True
if is_json_response:
self.last_message = self._loadstr(self.last_body)
else:
self.last_message = self.last_body
self._print('ckanclient: last message %s' % self.last_message)
def get_location(self, resource_name, entity_id=None, subregister=None, entity2_id=None):
base = self.base_location
path = self.resource_paths[resource_name]
if entity_id != None:
path += '/' + entity_id
if subregister != None:
path += '/' + subregister
if entity2_id != None:
path += '/' + entity2_id
return base + path
def _dumpstr(self, data):
return json.dumps(data)
def _loadstr(self, string):
try:
if string == '':
data = None
else:
data = json.loads(string)
except ValueError, exception:
msg = "Couldn't decode data from JSON string: '%s': %s" % (string, exception)
raise ValueError, msg
return data
def _print(self, msg):
'''Print depending on self.is_verbose and log at the same time.'''
return
logger.debug(msg)
if self.is_verbose:
print(msg)
class CkanClient(ApiClient):
"""
Client API implementation for CKAN.
:param base_location: default *http://thedatahub.org/api*
:param api_key: default *None*
:param is_verbose: default *False*
:param http_user: default *None*
:param http_pass: default *None*
"""
base_location = 'http://thedatahub.org/api'
resource_paths = {
'Base': '',
'Changeset Register': '/rest/changeset',
'Changeset Entity': '/rest/changeset',
'Package Register': '/rest/package',
'Package Entity': '/rest/package',
'Tag Register': '/rest/tag',
'Tag Entity': '/rest/tag',
'Group Register': '/rest/group',
'Group Entity': '/rest/group',
'Package Search': '/search/package',
'Package Create Form': '/form/package/create',
'Package Edit Form': '/form/package/edit',
}
def __init__(self, base_location=None, api_key=None, is_verbose=False,
http_user=None, http_pass=None):
if base_location is not None:
self.base_location = base_location
self.api_key = api_key
self.is_verbose = is_verbose
if http_user and http_pass:
password_mgr = HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, base_location,
http_user, http_pass)
handler = HTTPBasicAuthHandler(password_mgr)
opener = build_opener(handler)
install_opener(opener)
def _auth_headers(self):
return {
'Authorization': self.api_key,
'X-CKAN-API-Key': self.api_key
}
def open_url(self, url, *args, **kwargs):
result = super(CkanClient, self).open_url(url, *args, **kwargs)
if self.last_status not in (200, 201):
if self.last_status == 404:
raise CkanApiNotFoundError(self.last_status)
elif self.last_status == 403:
raise CkanApiNotAuthorizedError(self.last_status)
elif self.last_status == 409:
raise CkanApiConflictError(self.last_status)
else:
raise CkanApiError(self.last_message)
return result
def api_version_get(self):
self.reset()
url = self.get_location('Base')
self.open_url(url)
version = self.last_message['version']
return version
#
# Model API
#
def package_register_get(self):
self.reset()
url = self.get_location('Package Register')
self.open_url(url)
return self.last_message
def package_register_post(self, package_dict):
self.reset()
url = self.get_location('Package Register')
data = self._dumpstr(package_dict)
headers = self._auth_headers()
self.open_url(url, data, headers)
return self.last_message
def package_entity_get(self, package_name):
self.reset()
url = self.get_location('Package Entity', package_name)
headers = self._auth_headers()
self.open_url(url, headers=headers)
return self.last_message
def package_entity_put(self, package_dict, package_name=None):
# You only need to specify the current package_name if you
# are giving it a new package_name in the package_dict.
self.reset()
if not package_name:
package_name = package_dict['name']
url = self.get_location('Package Entity', package_name)
data = self._dumpstr(package_dict)
headers = self._auth_headers()
self.open_url(url, data, headers, method='PUT')
return self.last_message
def package_entity_delete(self, package_name):
self.reset()
url = self.get_location('Package Register', package_name)
headers = self._auth_headers()
self.open_url(url, headers=headers, method='DELETE')
return self.last_message
def package_relationship_register_get(self, package_name,
relationship_type='relationships',
relationship_with_package_name=None):
self.reset()
url = self.get_location('Package Entity',
entity_id=package_name,
subregister=relationship_type,
entity2_id=relationship_with_package_name)
headers = self._auth_headers()
self.open_url(url, headers=headers)
return self.last_message
def package_relationship_entity_post(self, subject_package_name,
relationship_type, object_package_name, comment=u''):
self.reset()
url = self.get_location('Package Entity',
entity_id=subject_package_name,
subregister=relationship_type,
entity2_id=object_package_name)
data = self._dumpstr({'comment':comment})
headers = self._auth_headers()
self.open_url(url, data, headers, method='POST')
return self.last_message
def package_relationship_entity_put(self, subject_package_name,
relationship_type, object_package_name, comment=u''):
self.reset()
url = self.get_location('Package Entity',
entity_id=subject_package_name,
subregister=relationship_type,
entity2_id=object_package_name)
data = self._dumpstr({'comment':comment})
headers = self._auth_headers()
self.open_url(url, data, headers, method='PUT')
return self.last_message
def package_relationship_entity_delete(self, subject_package_name,
relationship_type, object_package_name):
self.reset()
url = self.get_location('Package Entity',
entity_id=subject_package_name,
subregister=relationship_type,
entity2_id=object_package_name)
headers = self._auth_headers()
self.open_url(url, headers=headers, method='DELETE')
return self.last_message
def tag_register_get(self):
self.reset()
url = self.get_location('Tag Register')
self.open_url(url)
return self.last_message
def tag_entity_get(self, tag_name):
self.reset()
url = self.get_location('Tag Entity', tag_name)
self.open_url(url)
return self.last_message
def group_register_post(self, group_dict):
self.reset()
url = self.get_location('Group Register')
data = self._dumpstr(group_dict)
headers = self._auth_headers()
self.open_url(url, data, headers)
return self.last_message
def group_register_get(self):
self.reset()
url = self.get_location('Group Register')
self.open_url(url)
return self.last_message
def group_entity_get(self, group_name):
self.reset()
url = self.get_location('Group Entity', group_name)
self.open_url(url)
return self.last_message
def group_entity_put(self, group_dict, group_name=None):
# You only need to specify the current group_name if you
# are giving it a new group_name in the group_dict.
self.reset()
if not group_name:
group_name = group_dict['name']
url = self.get_location('Group Entity', group_name)
data = self._dumpstr(group_dict)
headers = self._auth_headers()
self.open_url(url, data, headers, method='PUT')
return self.last_message
#
# Search API
#
def package_search(self, q, search_options=None):
self.reset()
search_options = search_options.copy() if search_options else {}
url = self.get_location('Package Search')
search_options['q'] = q
if not search_options.get('limit'):
search_options['limit'] = PAGE_SIZE
data = self._dumpstr(search_options)
headers = self._auth_headers()
self.open_url(url, data, headers)
result_dict = self.last_message
if not search_options.get('offset'):
result_dict['results'] = self._result_generator(result_dict['count'], result_dict['results'], self.package_search, q, search_options)
return result_dict
def _result_generator(self, count, results, func, q, search_options):
'''Returns a generator that will make the necessary calls to page
through results.'''
page = 0
num_pages = int(count / search_options['limit'] + 0.9999)
while True:
for res in results:
yield res
# go to next page?
page += 1
if page >= num_pages:
break
# retrieve next page
search_options['offset'] = page * search_options['limit']
result_dict = func(q, search_options)
results = result_dict['results']
#
# Form API
#
def package_create_form_get(self):
self.reset()
url = self.get_location('Package Create Form')
self.open_url(url)
return self.last_message
def package_create_form_post(self, form_submission):
self.reset()
url = self.get_location('Package Create Form')
data = self._dumpstr(form_submission)
headers = self._auth_headers()
self.open_url(url, data, headers)
return self.last_message
def package_edit_form_get(self, package_ref):
self.reset()
url = self.get_location('Package Edit Form', package_ref)
self.open_url(url)
return self.last_message
def package_edit_form_post(self, package_ref, form_submission):
self.reset()
url = self.get_location('Package Edit Form', package_ref)
data = self._dumpstr(form_submission)
headers = self._auth_headers()
self.open_url(url, data, headers)
return self.last_message
#
# Changeset API
#
def changeset_register_get(self):
self.reset()
url = self.get_location('Changeset Register')
self.open_url(url)
return self.last_message
def changeset_entity_get(self, changeset_name):
self.reset()
url = self.get_location('Changeset Entity', changeset_name)
self.open_url(url)
return self.last_message
#
# data API
#
def _storage_metadata_url(self, path):
url = self.base_location
if not url.endswith("/"): url += "/"
url += "storage/metadata"
if not path.startswith("/"): url += "/"
url += path
return url
def storage_metadata_get(self, path):
url = self._storage_metadata_url(path)
self.open_url(url)
return self._loadstr(self.last_message)
def storage_metadata_set(self, path, metadata):
url = self._storage_metadata_url(path)
payload = self._dumpstr(metadata)
self.open_url(url, payload, method="PUT")
return self._loadstr(self.last_message)
def storage_metadata_update(self, path, metadata):
url = self._storage_metadata_url(path)
payload = self._dumpstr(metadata)
self.open_url(url, payload, method="POST")
return self._loadstr(self.last_message)
def _storage_auth_url(self, path):
url = self.base_location
if not url.endswith("/"): url += "/"
url += "storage/auth"
if not path.startswith("/"): url += "/"
url += path
return url
def storage_auth_get(self, path, headers):
url = self._storage_auth_url(path)
payload = self._dumpstr(headers)
self.open_url(url, payload, method="POST")
return self._loadstr(self.last_message)
#
# Utils
#
def is_id(self, id_string):
'''Tells the client if the string looks like an id or not'''
return bool(re.match('^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', id_string))
|
apache-2.0
|
afrendeiro/pipelines
|
lib/fix_bedfile_genome_boundaries.py
|
1
|
1086
|
#!/usr/bin/env python
import csv
import sys
def getChrSizes(chrmFile):
"""
Reads tab-delimiter file with two rows describing the chromossomes and its lengths.
Returns dictionary of chr:sizes.
"""
with open(chrmFile, 'r') as f:
chrmSizes = {}
for line in enumerate(f):
row = line[1].strip().split('\t')
chrmSizes[str(row[0])] = int(row[1])
return chrmSizes
chrSizes = {
"hg19": "/fhgfs/groups/lab_bock/arendeiro/share/hg19.chrom.sizes",
"mm10": "/fhgfs/groups/lab_bock/arendeiro/share/mm10.chrom.sizes",
"dr7": "/fhgfs/groups/lab_bock/arendeiro/share/danRer7.chrom.sizes"
}
genome = sys.argv[1]
chrms = getChrSizes(chrSizes[genome]) # get size of chromosomes
wr = csv.writer(sys.stdout, delimiter='\t', lineterminator='\n')
for row in csv.reader(iter(sys.stdin.readline, ''), delimiter='\t'):
chrm = row[0]
start = int(row[1])
end = int(row[2])
if chrm in chrms.keys(): # skip weird chromosomes
if start >= 1 and end <= chrms[chrm] and start < end:
wr.writerow(row)
|
gpl-2.0
|
hgrif/incubator-airflow
|
airflow/www/validators.py
|
28
|
1875
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from wtforms.validators import EqualTo
from wtforms.validators import ValidationError
class GreaterEqualThan(EqualTo):
"""Compares the values of two fields.
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error. Can be
interpolated with `%(other_label)s` and `%(other_name)s` to provide a
more helpful error.
"""
def __call__(self, form, field):
try:
other = form[self.fieldname]
except KeyError:
raise ValidationError(
field.gettext("Invalid field name '%s'." % self.fieldname)
)
if field.data is None or other.data is None:
return
if field.data < other.data:
d = {
'other_label': hasattr(other, 'label') and other.label.text
or self.fieldname,
'other_name': self.fieldname,
}
message = self.message
if message is None:
message = field.gettext('Field must be greater than or equal '
'to %(other_label)s.' % d)
else:
message = message % d
raise ValidationError(message)
|
apache-2.0
|
mysql/mysql-utilities
|
mysql/utilities/common/charsets.py
|
4
|
4465
|
#
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This module contains the charset_info class designed to read character set
and collation information from /share/charsets/index.xml.
"""
import sys
from mysql.utilities.common.format import print_list
_CHARSET_INDEXES = ID, CHARACTER_SET_NAME, COLLATION_NAME, MAXLEN, IS_DEFAULT \
= range(0, 5)
_CHARSET_QUERY = """
SELECT CL.ID,CL.CHARACTER_SET_NAME,CL.COLLATION_NAME,CS.MAXLEN, CL.IS_DEFAULT
FROM INFORMATION_SCHEMA.CHARACTER_SETS CS, INFORMATION_SCHEMA.COLLATIONS CL
WHERE CS.CHARACTER_SET_NAME=CL.CHARACTER_SET_NAME ORDER BY CHARACTER_SET_NAME
"""
class CharsetInfo(object):
"""
Read character set information for lookup. Methods include:
- get_charset_name(id) : get the name for a characterset id
- get_default_collation(name) : get default collation name
- get_name_by_collation(name) : given collation, find charset name
- print_charsets() : print the character set map
"""
def __init__(self, options=None):
"""Constructor
options[in] array of general options
"""
if options is None:
options = {}
self.verbosity = options.get("verbosity", 0)
self.format = options.get("format", "grid")
self.server = options.get("server", None)
self.charset_map = None
if self.server:
self.charset_map = self.server.exec_query(_CHARSET_QUERY)
def print_charsets(self):
"""Print the character set list
"""
print_list(sys.stdout, self.format,
["id", "character_set_name", "collation_name",
"maxlen", "is_default"],
self.charset_map)
print len(self.charset_map), "rows in set."
def get_name(self, chr_id):
"""Get the character set name for the given id
chr_id[in] id for character set (as read from .frm file)
Returns string - character set name or None if not found.
"""
for cs in self.charset_map:
if int(chr_id) == int(cs[ID]):
return cs[CHARACTER_SET_NAME]
return None
def get_collation(self, col_id):
"""Get the collation name for the given id
col_id[in] id for collation (as read from .frm file)
Returns string - collation name or None if not found.
"""
for cs in self.charset_map:
if int(col_id) == int(cs[ID]):
return cs[COLLATION_NAME]
return None
def get_name_by_collation(self, colname):
"""Get the character set name for the given collation
colname[in] collation name
Returns string - character set name or None if not found.
"""
for cs in self.charset_map:
if cs[COLLATION_NAME] == colname:
return cs[CHARACTER_SET_NAME]
return None
def get_default_collation(self, col_id):
"""Get the default collation for the character set
col_id[in] id for collation (as read from .frm file)
Returns tuple - (default collation id, name) or None if not found.
"""
# Exception for utf8
if col_id == 83:
return "utf8_bin"
for cs in self.charset_map:
if int(cs[ID]) == int(col_id) and cs[IS_DEFAULT].upper() == "YES":
return cs[COLLATION_NAME]
return None
def get_maxlen(self, col_id):
"""Get the maximum length for the character set
col_id[in] id for collation (as read from .frm file)
Returns int - max length or 1 if not found.
"""
for cs in self.charset_map:
if int(cs[ID]) == int(col_id):
return int(cs[MAXLEN])
return int(1)
|
gpl-2.0
|
tsai1993/aisixiang
|
01.download_1.py
|
1
|
2415
|
#!/usr/bin/python3
import os
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas
import time
# 读取 00.get_metadata.R 获取的相关目录信息
D0 = pandas.read_csv("all_aisixiang_2017-05-24.csv")
# 意外中断时,可以修改 j 的值
j = 0
D = D0[j:]
for i in D['ID']:
Url = "http://www.aisixiang.com/data/" + str(i) + ".html"
print(Url)
try:
html = urlopen(Url)
except:
f1 = open("broken-new.txt", 'a')
Broken = str(i) + '-' + Url + ',' + '\n'
f1.write(Broken)
f1.close
print(Broken)
j += 1
Availability = 3
f2 = open("Av.txt", 'a')
f2.write(str(Availability) + '_' + str(i) + ',' + '\n')
f2.close
else:
Soup = BeautifulSoup(html, "html.parser")
Article = Soup.find(id = "content2")
Article_page = ''
if type(Article) == type(None):
Availability = 0
else:
Availability = 1
Page = Soup.find(class_ = "list_page")
if type(Page) == type(None):
Article_page = Article_page + Article.get_text()
else:
Page_number = Page.find_all("a")
N = int(Page_number[-2].get_text())
for k in range(1, N+1):
Url2 = Url[:(len(Url)-5)] + '-' + str(k) + '.html'
print(Url2)
try:
html2 = urlopen(Url2)
except:
k += 1
ft2 = open("broken2.txt", 'a')
Broken2 = str(i) + '-' + Url2 + ',' + '\n'
ft2.write(Broken2)
ft2.close
print(Broken2)
else:
Soup2 = BeautifulSoup(html2, "html.parser")
Article = Soup2.find(id = "content2")
Article_page = Article_page + Article.get_text()
time.sleep(1)
Name = str(Availability) + '-' + str(i) + '-' + D0.iloc[j,0] + '.txt'
Name = Name.replace('/','')
f = open(Name, 'w')
f.write(Article_page)
f.close()
print(Name + '\n')
j += 1
time.sleep(1)
f2 = open("Av.txt", 'a')
f2.write(str(Availability) + '_' + str(i) + ',' + '\n')
f2.close
|
mpl-2.0
|
rvalyi/l10n-brazil
|
l10n_br_account_service/models/res_company.py
|
1
|
2477
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion #
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
from openerp.addons.l10n_br_account.models.l10n_br_account import (
L10nBrTaxDefinition
)
class ResCompany(models.Model):
_inherit = 'res.company'
@api.one
@api.depends('service_tax_definition_line.tax_id')
def _compute_service_taxes(self):
service_taxes = self.env['account.tax']
for tax in self.service_tax_definition_line:
service_taxes += tax.tax_id
self.product_tax_ids = service_taxes
service_invoice_id = fields.Many2one(
'l10n_br_account.fiscal.document',
'Documento Fiscal')
document_serie_service_id = fields.Many2one(
'l10n_br_account.document.serie', u'Série Fiscais para Serviço',
domain="[('company_id', '=', active_id),('active','=',True),"
"('fiscal_type','=','service')]")
nfse_version = fields.Selection(
[('100', '1.00')], 'Versão NFse', required=True, default="100")
nfse_import_folder = fields.Char('Pasta de Origem', size=254)
nfse_export_folder = fields.Char('Pasta de Destino', size=254)
nfse_backup_folder = fields.Char('Pasta de Backup', size=254)
service_tax_definition_line = fields.One2many(
'l10n_br_tax.definition.company.service',
'company_id', 'Taxes Definitions')
service_tax_ids = fields.Many2many('account.tax',
string='Service Taxes',
compute='_compute_service_taxes',
store=True)
in_invoice_service_fiscal_category_id = fields.Many2one(
'l10n_br_account.fiscal.category',
'Categoria Fiscal Padrão de Aquisição de Serviço',
domain="[('journal_type','=','purchase'), "
" ('fiscal_type','=','service'), ('type','=','input'),"
" ('state', '=', 'approved')]")
out_invoice_service_fiscal_category_id = fields.Many2one(
'l10n_br_account.fiscal.category',
'Categoria Fiscal Padrão de Prestação de Serviço',
domain="""[('journal_type','=','sale'),
('fiscal_type','=','service'), ('type','=','output'),
('state', '=', 'approved')]""")
class L10nbrTaxDefinitionCompanyService(L10nBrTaxDefinition, models.Model):
_name = 'l10n_br_tax.definition.company.service'
|
agpl-3.0
|
rossasa/server-tools
|
sql_export/sql_export.py
|
9
|
2103
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 Akretion (<http://www.akretion.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class SqlExport(models.Model):
_name = "sql.export"
_inherit = ['sql.request.mixin']
_description = "SQL export"
_sql_request_groups_relation = 'groups_sqlquery_rel'
_sql_request_users_relation = 'users_sqlquery_rel'
_check_execution_enabled = False
copy_options = fields.Char(
string='Copy Options', required=True,
default="CSV HEADER DELIMITER ';'")
field_ids = fields.Many2many(
'ir.model.fields',
'fields_sqlquery_rel',
'sql_id',
'field_id',
'Parameters',
domain=[('model', '=', 'sql.file.wizard')])
@api.multi
def export_sql_query(self):
self.ensure_one()
wiz = self.env['sql.file.wizard'].create({
'sql_export_id': self.id})
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sql.file.wizard',
'res_id': wiz.id,
'type': 'ir.actions.act_window',
'target': 'new',
'context': self._context,
'nodestroy': True,
}
|
agpl-3.0
|
znoland3/zachdemo
|
venvdir/lib/python3.4/site-packages/setuptools/command/alias.py
|
455
|
2426
|
from distutils.errors import DistutilsOptionError
from setuptools.extern.six.moves import map
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source + name + ' ' + command
|
mit
|
VitalPet/odoo
|
addons/hr_expense/report/hr_expense_report.py
|
52
|
6694
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_expense_report(osv.osv):
_name = "hr.expense.report"
_description = "Expenses Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date ', readonly=True),
'year': fields.char('Year', size=4, readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month',readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Force Journal', readonly=True),
'product_qty':fields.float('Qty', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee's Name", readonly=True),
'date_confirm': fields.date('Confirmation Date', readonly=True),
'date_valid': fields.date('Validation Date', readonly=True),
'voucher_id': fields.many2one('account.voucher', 'Receipt', readonly=True),
'department_id':fields.many2one('hr.department','Department', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Validation User', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'price_total':fields.float('Total Price', readonly=True, digits_compute=dp.get_precision('Account')),
'delay_valid':fields.float('Delay to Valid', readonly=True),
'delay_confirm':fields.float('Delay to Confirm', readonly=True),
'analytic_account': fields.many2one('account.analytic.account','Analytic account',readonly=True),
'price_average':fields.float('Average Price', readonly=True, digits_compute=dp.get_precision('Account')),
'nbr':fields.integer('# of Lines', readonly=True),
'no_of_products':fields.integer('# of Products', readonly=True),
'no_of_account':fields.integer('# of Accounts', readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('confirm', 'Waiting confirmation'),
('accepted', 'Accepted'),
('done', 'Done'),
('cancelled', 'Cancelled')],
'Status', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_expense_report')
cr.execute("""
create or replace view hr_expense_report as (
select
min(l.id) as id,
date_trunc('day',s.date) as date,
s.employee_id,
s.journal_id,
s.currency_id,
to_date(to_char(s.date_confirm, 'dd-MM-YYYY'),'dd-MM-YYYY') as date_confirm,
to_date(to_char(s.date_valid, 'dd-MM-YYYY'),'dd-MM-YYYY') as date_valid,
s.voucher_id,
s.user_valid as user_id,
s.department_id,
to_char(date_trunc('day',s.create_date), 'YYYY') as year,
to_char(date_trunc('day',s.create_date), 'MM') as month,
to_char(date_trunc('day',s.create_date), 'YYYY-MM-DD') as day,
avg(extract('epoch' from age(s.date_valid,s.date)))/(3600*24) as delay_valid,
avg(extract('epoch' from age(s.date_valid,s.date_confirm)))/(3600*24) as delay_confirm,
l.product_id as product_id,
l.analytic_account as analytic_account,
sum(l.unit_quantity * u.factor) as product_qty,
s.company_id as company_id,
sum(l.unit_quantity*l.unit_amount) as price_total,
(sum(l.unit_quantity*l.unit_amount)/sum(case when l.unit_quantity=0 or u.factor=0 then 1 else l.unit_quantity * u.factor end))::decimal(16,2) as price_average,
count(*) as nbr,
(select unit_quantity from hr_expense_line where id=l.id and product_id is not null) as no_of_products,
(select analytic_account from hr_expense_line where id=l.id and analytic_account is not null) as no_of_account,
s.state
from hr_expense_line l
left join hr_expense_expense s on (s.id=l.expense_id)
left join product_uom u on (u.id=l.uom_id)
group by
date_trunc('day',s.date),
to_char(date_trunc('day',s.create_date), 'YYYY'),
to_char(date_trunc('day',s.create_date), 'MM'),
to_char(date_trunc('day',s.create_date), 'YYYY-MM-DD'),
to_date(to_char(s.date_confirm, 'dd-MM-YYYY'),'dd-MM-YYYY'),
to_date(to_char(s.date_valid, 'dd-MM-YYYY'),'dd-MM-YYYY'),
l.product_id,
l.analytic_account,
s.voucher_id,
s.currency_id,
s.user_valid,
s.department_id,
l.uom_id,
l.id,
s.state,
s.journal_id,
s.company_id,
s.employee_id
)
""")
hr_expense_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
slightperturbation/Cobalt
|
ext/emsdk_portable/emscripten/tag-1.34.1/tools/jsrun.py
|
2
|
3613
|
import time, os, sys, logging
from subprocess import Popen, PIPE, STDOUT
TRACK_PROCESS_SPAWNS = True if (os.getenv('EM_BUILD_VERBOSE') and int(os.getenv('EM_BUILD_VERBOSE')) >= 3) else False
def timeout_run(proc, timeout=None, note='unnamed process', full_output=False):
start = time.time()
if timeout is not None:
while time.time() - start < timeout and proc.poll() is None:
time.sleep(0.1)
if proc.poll() is None:
proc.kill() # XXX bug: killing emscripten.py does not kill it's child process!
raise Exception("Timed out: " + note)
out = proc.communicate()
out = map(lambda o: '' if o is None else o, out)
if TRACK_PROCESS_SPAWNS:
logging.info('Process ' + str(proc.pid) + ' finished after ' + str(time.time() - start) + ' seconds. Exit code: ' + str(proc.returncode))
return '\n'.join(out) if full_output else out[0]
def make_command(filename, engine=None, args=[]):
if type(engine) is not list:
engine = [engine]
# Emscripten supports multiple javascript runtimes. The default is nodejs but
# it can also use d8 (the v8 engine shell) or jsc (JavaScript Core aka
# Safari). Both d8 and jsc require a '--' to delimit arguments to be passed
# to the executed script from d8/jsc options. Node does not require a
# delimeter--arguments after the filename are passed to the script.
#
# Check only the last part of the engine path to ensure we don't accidentally
# label a path to nodejs containing a 'd8' as spidermonkey instead.
jsengine = os.path.split(engine[0])[-1]
# Use "'d8' in" because the name can vary, e.g. d8_g, d8, etc.
return engine + [filename] + (['--'] if 'd8' in jsengine or 'jsc' in jsengine else []) + args
def run_js(filename, engine=None, args=[], check_timeout=False, stdin=None, stdout=PIPE, stderr=None, cwd=None, full_output=False, assert_returncode=0, error_limit=-1):
# # code to serialize out the test suite files
# # XXX make sure to disable memory init files, and clear out the base_dir. you may also need to manually grab e.g. paper.pdf.js from a run of test_poppler
# import shutil, json
# base_dir = '/tmp/emscripten_suite'
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# commands_file = os.path.join(base_dir, 'commands.txt')
# commands = ''
# if os.path.exists(commands_file):
# commands = open(commands_file).read()
# i = 0
# while True:
# curr = os.path.join(base_dir, str(i) + '.js')
# if not os.path.exists(curr): break
# i += 1
# shutil.copyfile(filename, curr)
# commands += os.path.basename(curr) + ',' + json.dumps(args) + '\n'
# open(commands_file, 'w').write(commands)
command = make_command(filename, engine, args)
try:
if cwd is not None: os.environ['EMCC_BUILD_DIR'] = os.getcwd()
proc = Popen(
command,
stdin=stdin,
stdout=stdout,
stderr=stderr,
cwd=cwd)
finally:
if cwd is not None: del os.environ['EMCC_BUILD_DIR']
timeout = 15*60 if check_timeout else None
if TRACK_PROCESS_SPAWNS:
logging.info('Blocking on process ' + str(proc.pid) + ': ' + str(command) + (' for ' + str(timeout) + ' seconds' if timeout else ' until it finishes.'))
ret = timeout_run(
proc,
timeout,
'Execution',
full_output=full_output)
if assert_returncode is not None and proc.returncode is not assert_returncode:
raise Exception('Expected the command ' + str(command) + ' to finish with return code ' + str(assert_returncode) + ', but it returned with code ' + str(proc.returncode) + ' instead! Output: ' + str(ret)[:error_limit])
return ret
|
apache-2.0
|
grzes/djangae
|
djangae/db/backends/appengine/compiler.py
|
7
|
3820
|
#LIBRARIES
import django
from django.db.models.sql import compiler
from django.db.models.expressions import Value, OrderBy
from django.db.models.sql.query import get_order_dir
#DJANGAE
from .commands import (
SelectCommand,
InsertCommand,
UpdateCommand,
DeleteCommand
)
class SQLCompiler(compiler.SQLCompiler):
def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None):
"""
Overridden just for the __scatter__ property ordering
"""
# This allow special appengine properties (e.g. __scatter__) to be supplied as an ordering
# even though they don't (and can't) exist as Django model fields
if name.startswith("__") and name.endswith("__"):
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
return [(OrderBy(Value('__scatter__'), descending=descending), False)]
return super(SQLCompiler, self).find_ordering_name(
name,
opts,
alias=alias,
default_order=default_order,
already_seen=already_seen
)
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
self.pre_sql_setup()
self.refcounts_before = self.query.alias_refcount.copy()
select = SelectCommand(
self.connection,
self.query
)
return (select, tuple())
def get_select(self):
self.query.select_related = False # Make sure select_related is disabled for all queries
return super(SQLCompiler, self).get_select()
class SQLInsertCompiler(SQLCompiler, compiler.SQLInsertCompiler):
def __init__(self, *args, **kwargs):
self.return_id = None
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
self.pre_sql_setup()
from djangae.db.utils import get_concrete_fields
# Always pass down all the fields on an insert
return [(InsertCommand(
self.connection, self.query.model, self.query.objs,
list(self.query.fields) + list(get_concrete_fields(self.query.model, ignore_leaf=True)),
self.query.raw), tuple())
]
class SQLDeleteCompiler(SQLCompiler, compiler.SQLDeleteCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
return (DeleteCommand(self.connection, self.query), tuple())
class SQLUpdateCompiler(SQLCompiler, compiler.SQLUpdateCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
self.pre_sql_setup()
return (UpdateCommand(self.connection, self.query), tuple())
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
if self.query.subquery:
self.query.high_mark = self.query.subquery.query.high_mark
self.query.low_mark = self.query.subquery.query.low_mark
return SQLCompiler.as_sql(self, with_limits, with_col_aliases, subquery)
if django.VERSION < (1, 8):
from django.db.models.sql.compiler import (
SQLDateCompiler as DateCompiler,
SQLDateTimeCompiler as DateTimeCompiler
)
class SQLDateCompiler(DateCompiler, SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
return SQLCompiler.as_sql(self, with_limits, with_col_aliases, subquery)
class SQLDateTimeCompiler(DateTimeCompiler, SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
return SQLCompiler.as_sql(self, with_limits, with_col_aliases, subquery)
|
bsd-3-clause
|
jasonflorack/OS-tweets
|
app/UserInterface.py
|
1
|
13307
|
import os
import webbrowser
import json
from app.ListenerInterface import ListenerInterface
from app.SearcherInterface import SearcherInterface
class UserInterface:
def __init__(self):
super(UserInterface, self).__init__()
self._auth = None
self._search_terms = ''
self._num_tweets = None
self._incl_retweets = 0
self._news_org = None
self._search_term_dict = {1: '#Election2016',
2: ['Hillary', 'Clinton', '#ImWithHer', '#HillaryClinton', '#Hillary2016'],
3: ['Donald Trump', 'Trump', '#MakeAmericaGreatAgain', '#Trump', '#trumptrain',
'#donaldtrump', '#Trump2016'],
4: ['Bernie', 'Sanders', '#Bernie2016', '#berniesanders', '#feelthebern'],
5: ['Ted Cruz', 'Cruz', '#ChooseCruz', '#tedcruz', '#cruz', '#CruzFiorina2016',
'#Cruz2016'],
6: ['John Kasich', '#JohnKasich', '#Kasich2016', '#Kasich4Us', 'Kasich'],
7: ['Democrat', 'Democrats', '#democrat', '#left', '#Democratic', '#liberal'],
8: ['Republican', 'Republicans', '#GOP', '#Republican', '#rightwing',
'#conservative']}
self._news_org_dict = {1: 'nytimes', 2: 'CNN', 3: 'WSJ', 4: 'TIME', 5: 'FoxNews',
6: 'washingtonpost', 7: 'ABC', 8: 'CBSNews', 9: 'NBCNews', 10: 'Newsweek'}
@property
def auth(self):
return self._auth
@auth.setter
def auth(self, auth):
self._auth = auth
@property
def search_terms(self):
return self._search_terms
@search_terms.setter
def search_terms(self, search_terms):
self._search_terms = search_terms
@property
def num_tweets(self):
return self._num_tweets
@num_tweets.setter
def num_tweets(self, num_tweets):
self._num_tweets = num_tweets
@property
def incl_retweets(self):
return self._incl_retweets
@incl_retweets.setter
def incl_retweets(self, incl_retweets):
self._incl_retweets = incl_retweets
@property
def news_org(self):
return self._news_org
@news_org.setter
def news_org(self, news_org):
self._news_org = news_org
@staticmethod
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
@staticmethod
def greeting():
print("******************************************************************************************************")
print(' Election 2016 Twitter Tracker')
print("******************************************************************************************************")
print('This app uses the Twitter API (often via Tweepy) to search for tweets related to the 2016 US')
print('Presidential Election. The search can be done either from the recent past, or from the live Twitter')
print('Stream. After a search is completed, you will be presented the tweets that met the search criteria,')
print('along with an option to view one or more of the tweets in your browser.')
def pick_recent_or_live_tweets(self):
print()
print('OPTIONS:')
print('--------')
print('1) Search for recent news-oriented Election 2016 tweets')
print('2) Collect live Election 2016 tweets from the Twitter stream')
print()
choice = input('PICK 1 or 2: ')
self.handle_recent_or_live_choice(choice)
def handle_recent_or_live_choice(self, choice):
"""Take in the user's choice from the top menu and handle it accordingly."""
if choice == '1':
self.ask_for_org()
self.present_search_term_options()
self.ask_for_search_terms(org=self._news_org)
self.ask_num_tweets_search()
self.activate_news_org_search(self._num_tweets)
elif choice == '2':
print()
print("You chose to collect live tweets concerning Election 2016.")
self.present_search_term_options()
self.ask_for_search_terms(org=None)
self.ask_num_tweets_live()
self.activate_stream_listener(self._num_tweets)
else:
self.invalid_choice()
def invalid_choice(self):
"""Handle an invalid choice off of the main menu."""
new_choice = input('Invalid choice. Please select 1 or 2: ')
self.handle_recent_or_live_choice(new_choice)
@staticmethod
def present_search_term_options():
"""Present user with terms that can be included in the Twitter search
Method for RECENT or LIVE tweets
"""
print()
print("Here are eight groups of terms related to the 2016 US Presidential Election:")
print("1) #Election2016")
print("2) Hillary, Clinton, #ImWithHer, #HillaryClinton, #Hillary2016")
print("3) 'Donald Trump', Trump, #MakeAmericaGreatAgain, #Trump, #trumptrain, #donaldtrump, "
"#Trump2016")
print("4) Bernie, Sanders, #Bernie2016, #berniesanders, #feelthebern")
print("5) 'Ted Cruz', Cruz, #ChooseCruz, #tedcruz, #cruz, #CruzFiorina2016, #Cruz2016")
print("6) 'John Kasich', Kasich, #JohnKasich, #Kasich2016, #Kasich4Us")
print("7) Democrat, Democrats, #democrat, #left, #Democratic, #liberal")
print("8) Republican, Republicans, #GOP, #Republican, #rightwing, #conservative")
def ask_for_search_terms(self, org):
"""Ask user which search term they want to use, and set their choice
Method for RECENT or LIVE tweets
"""
print()
term = input("Which term do you want to add to the search? Pick one: ")
# Handle invalid responses
while not term.isdigit() or '-' in term or int(term) > 8 or int(term) <= 0:
term = input('Invalid choice. '
'Please enter a digit corresponding to the search term you want to add to the search: ')
# User response is accepted; pick search term from the search_term_dict
search_term = self._search_term_dict[int(term)]
# Create search string and store in self._search_terms variable
if type(search_term) is list and org:
for term in search_term[:-1]:
self._search_terms = self._search_terms + term + ' ' + org + ' OR '
self._search_terms = self._search_terms + search_term[-1] + ' ' + org
else:
self._search_terms = search_term
def ask_for_org(self):
"""Ask user which news organization they want to include as a search term
Method for RECENT tweets only
"""
print()
print('You chose to search for recent news-oriented Election 2016 tweets. In order to increase the chance')
print('that quality content will be found, the search will look specifically for tweets that mention a ')
print('popular news organization, or are from a news organization.')
print()
print("Here are ten top news organizations you can include in the search:")
print("1) The New York Times")
print("2) CNN")
print("3) Wall Street Journal")
print("4) TIME.com")
print("5) Fox News")
print("6) Washington Post")
print("7) ABC News")
print("8) CBS News")
print("9) NBC News")
print("10) Newsweek")
print()
org_id = input("Which news organization do you want (enter number)? ")
# Handle invalid responses
while not org_id.isdigit() or '-' in org_id or int(org_id) > 10 or int(org_id) <= 0:
org_id = input('Invalid choice. '
'Please enter a digit corresponding to the news organization '
'you want to include in the search: ')
# User response is accepted; pick news organization's Twitter username from news_org_dict
news_org = self._news_org_dict[int(org_id)]
# Store selected news organization's Twitter username
self._news_org = news_org
def ask_num_tweets_search(self):
"""Ask user how many tweets to search for
Method for RECENT tweets only
"""
print()
tweets_wanted = input("How many election-related tweets do you want to obtain that are from, "
"or mention, @{0} (MAX=100)? ".format(self._news_org))
# Handle invalid responses
while not tweets_wanted.isdigit() or not 0 < int(tweets_wanted) < 101:
tweets_wanted = input('Invalid choice. Please enter a digit between 1 and 100: ')
# Store user's desired number of tweets
self._num_tweets = tweets_wanted
# Ask user if they want to include RTs or not
incl_retweets = input("Include retweets (enter Y or N)? ")
# Handle invalid responses
while incl_retweets != 'y' and incl_retweets != 'n' and incl_retweets != 'Y' and incl_retweets != 'N':
incl_retweets = input('Invalid response. Please enter Y for yes or N for no: ')
# If user elects to include RTs in the search, set the appropriate variable which will flag this in the search
if incl_retweets == 'y' or incl_retweets == 'Y':
self._incl_retweets = 1
def ask_num_tweets_live(self):
"""Ask user how many tweets to collect from the live Twitter stream
Method for LIVE tweets only
"""
print()
tweets_wanted = input("How many tweets do you want to collect (MAX=100)? ")
# Handle invalid responses
while not tweets_wanted.isdigit() or not 0 < int(tweets_wanted) < 101:
tweets_wanted = input('Invalid response. Please enter a digit between 1 and 100: ')
# Store user's desired number of tweets
self._num_tweets = tweets_wanted
def activate_news_org_search(self, num_tweets):
"""Send num_tweets, auth, search terms, and the include retweets setting to a SearcherInterface which will
set up the search
Method for RECENT tweets only
"""
searcher = SearcherInterface()
searcher.search_for_search_terms_in_twitter(num_tweets, self._auth, self._search_terms, self._incl_retweets)
def activate_stream_listener(self, num_tweets):
"""Send auth, search terms, and the number of tweets the user wants over to a ListenerInterface which will
set up the Twitter Stream listener
Method for LIVE tweets only
"""
listener = ListenerInterface()
listener.get_live_tweets_from_twitter_stream(self._auth, self._search_terms, num_tweets)
def view_tweet_in_browser_or_end_program(self):
"""After the search is done and the tweets are presented to the user,
ask user if s/he wants to view one of the listed tweets on the web via their browser
"""
loop = 0
while loop >= 0:
loop += 1
print()
# Use slightly different wording in the question after the first time it's asked
if loop == 1:
response = input("Do you want to view a tweet listed above via your web browser (enter Y or N)? ")
else:
response = input("Do you want to view another tweet from the search results (enter Y or N)? ")
# Handle invalid responses
while response != 'y' and response != 'n' and response != 'Y' and response != 'N':
response = input('Invalid response. Please enter Y for yes or N for no: ')
# Handle a YES response
if response == 'Y' or response == 'y':
line_of_tweet = input("What is the line number of the tweet with the desired URL? ")
# Handle invalid responses
while not line_of_tweet.isdigit() or \
int(line_of_tweet) > int(self._num_tweets) or \
int(line_of_tweet) <= 0:
line_of_tweet = input("Invalid response. Please enter a number corresponding to the tweet "
"you'd like to view online: ")
# Open the JSON file for reading and grab everything in there, then close the file
with open('app/data/election.json', 'r') as data_file:
data = json.load(data_file)
data_file.close()
# Store the id string (id_str) of the desired tweet in the tweet_id variable
tweet_id = data['Tweet' + line_of_tweet]['id_str']
# Open a web browser and go to the URL for the tweet the user wanted to see displayed on the web
webbrowser.open('https://twitter.com/statuses/{0}'.format(tweet_id), new=1, autoraise=True)
# Handle a NO response
else:
# Set 'loop' to be < 0 in order to stop the while loop handling the tweet viewing
loop = -1
print()
print('OK. Thanks for using this app. Come back soon and do another search! Goodbye.')
|
mit
|
invitu/odoomrp-wip
|
account_treasury_forecast/wizard/wiz_create_invoice.py
|
31
|
2577
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import openerp.addons.decimal_precision as dp
from openerp import models, fields, api
class WizCreateInvoice(models.TransientModel):
_name = 'wiz.create.invoice'
_description = 'Wizard to create invoices'
partner_id = fields.Many2one("res.partner", string="Partner")
journal_id = fields.Many2one("account.journal", string="Journal",
domain=[("type", "=", "purchase")])
description = fields.Char(string="Description")
amount = fields.Float(string="Amount",
digits_compute=dp.get_precision('Account'))
line_id = fields.Many2one("account.treasury.forecast.line.template",
string="Payment")
@api.one
def button_create_inv(self):
invoice_obj = self.env['account.invoice']
res_inv = invoice_obj.onchange_partner_id('in_invoice',
self.partner_id.id)
values = res_inv['value']
values['name'] = ('Treasury: ' + self.description + '/ Amount: ' +
str(self.amount))
values['reference'] = ('Treasury: ' + self.description + '/ Amount: ' +
str(self.amount))
values['partner_id'] = self.partner_id.id
values['journal_id'] = self.journal_id.id
values['type'] = 'in_invoice'
invoice_id = invoice_obj.create(values)
self.line_id.write({'invoice_id': invoice_id.id, 'paid': 1,
'journal_id': self.journal_id.id,
'partner_id': self.partner_id.id,
'amount': self.amount})
return {'type': 'ir.actions.act_window_close'}
|
agpl-3.0
|
weimingtom/python-for-android
|
python3-alpha/extra_modules/gdata/apps/adminsettings/service.py
|
48
|
13680
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to set domain admin settings.
AdminSettingsService: Set admin settings."""
__author__ = '[email protected]'
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER='2.0'
class AdminSettingsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Admin Settings service."""
def _serviceUrl(self, setting_id, domain=None):
if domain is None:
domain = self.domain
return '/a/feeds/domain/%s/%s/%s' % (API_VER, domain, setting_id)
def genericGet(self, location):
"""Generic HTTP Get Wrapper
Args:
location: relative uri to Get
Returns:
A dict containing the result of the get operation."""
uri = self._serviceUrl(location)
try:
return self._GetProperties(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
def GetDefaultLanguage(self):
"""Gets Domain Default Language
Args:
None
Returns:
Default Language as a string. All possible values are listed at:
http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags"""
result = self.genericGet('general/defaultLanguage')
return result['defaultLanguage']
def UpdateDefaultLanguage(self, defaultLanguage):
"""Updates Domain Default Language
Args:
defaultLanguage: Domain Language to set
possible values are at:
http://code.google.com/apis/apps/email_settings/developers_guide_protocol.html#GA_email_language_tags
Returns:
A dict containing the result of the put operation"""
uri = self._serviceUrl('general/defaultLanguage')
properties = {'defaultLanguage': defaultLanguage}
return self._PutProperties(uri, properties)
def GetOrganizationName(self):
"""Gets Domain Default Language
Args:
None
Returns:
Organization Name as a string."""
result = self.genericGet('general/organizationName')
return result['organizationName']
def UpdateOrganizationName(self, organizationName):
"""Updates Organization Name
Args:
organizationName: Name of organization
Returns:
A dict containing the result of the put operation"""
uri = self._serviceUrl('general/organizationName')
properties = {'organizationName': organizationName}
return self._PutProperties(uri, properties)
def GetMaximumNumberOfUsers(self):
"""Gets Maximum Number of Users Allowed
Args:
None
Returns: An integer, the maximum number of users"""
result = self.genericGet('general/maximumNumberOfUsers')
return int(result['maximumNumberOfUsers'])
def GetCurrentNumberOfUsers(self):
"""Gets Current Number of Users
Args:
None
Returns: An integer, the current number of users"""
result = self.genericGet('general/currentNumberOfUsers')
return int(result['currentNumberOfUsers'])
def IsDomainVerified(self):
"""Is the domain verified
Args:
None
Returns: Boolean, is domain verified"""
result = self.genericGet('accountInformation/isVerified')
if result['isVerified'] == 'true':
return True
else:
return False
def GetSupportPIN(self):
"""Gets Support PIN
Args:
None
Returns: A string, the Support PIN"""
result = self.genericGet('accountInformation/supportPIN')
return result['supportPIN']
def GetEdition(self):
"""Gets Google Apps Domain Edition
Args:
None
Returns: A string, the domain's edition (premier, education, partner)"""
result = self.genericGet('accountInformation/edition')
return result['edition']
def GetCustomerPIN(self):
"""Gets Customer PIN
Args:
None
Returns: A string, the customer PIN"""
result = self.genericGet('accountInformation/customerPIN')
return result['customerPIN']
def GetCreationTime(self):
"""Gets Domain Creation Time
Args:
None
Returns: A string, the domain's creation time"""
result = self.genericGet('accountInformation/creationTime')
return result['creationTime']
def GetCountryCode(self):
"""Gets Domain Country Code
Args:
None
Returns: A string, the domain's country code. Possible values at:
http://www.iso.org/iso/country_codes/iso_3166_code_lists/english_country_names_and_code_elements.htm"""
result = self.genericGet('accountInformation/countryCode')
return result['countryCode']
def GetAdminSecondaryEmail(self):
"""Gets Domain Admin Secondary Email Address
Args:
None
Returns: A string, the secondary email address for domain admin"""
result = self.genericGet('accountInformation/adminSecondaryEmail')
return result['adminSecondaryEmail']
def UpdateAdminSecondaryEmail(self, adminSecondaryEmail):
"""Gets Domain Creation Time
Args:
adminSecondaryEmail: string, secondary email address of admin
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('accountInformation/adminSecondaryEmail')
properties = {'adminSecondaryEmail': adminSecondaryEmail}
return self._PutProperties(uri, properties)
def GetDomainLogo(self):
"""Gets Domain Logo
This function does not make use of the Google Apps Admin Settings API,
it does an HTTP Get of a url specific to the Google Apps domain. It is
included for completeness sake.
Args:
None
Returns: binary image file"""
import urllib.request, urllib.parse, urllib.error
url = 'http://www.google.com/a/cpanel/'+self.domain+'/images/logo.gif'
response = urllib.request.urlopen(url)
return response.read()
def UpdateDomainLogo(self, logoImage):
"""Update Domain's Custom Logo
Args:
logoImage: binary image data
Returns: A dict containing the result of the put operation"""
from base64 import b64encode
uri = self._serviceUrl('appearance/customLogo')
properties = {'logoImage': b64encode(logoImage)}
return self._PutProperties(uri, properties)
def GetCNAMEVerificationStatus(self):
"""Gets Domain CNAME Verification Status
Args:
None
Returns: A dict {recordName, verified, verifiedMethod}"""
return self.genericGet('verification/cname')
def UpdateCNAMEVerificationStatus(self, verified):
"""Updates CNAME Verification Status
Args:
verified: boolean, True will retry verification process
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('verification/cname')
properties = self.GetCNAMEVerificationStatus()
properties['verified'] = verified
return self._PutProperties(uri, properties)
def GetMXVerificationStatus(self):
"""Gets Domain MX Verification Status
Args:
None
Returns: A dict {verified, verifiedMethod}"""
return self.genericGet('verification/mx')
def UpdateMXVerificationStatus(self, verified):
"""Updates MX Verification Status
Args:
verified: boolean, True will retry verification process
Returns: A dict containing the result of the put operation"""
uri = self._serviceUrl('verification/mx')
properties = self.GetMXVerificationStatus()
properties['verified'] = verified
return self._PutProperties(uri, properties)
def GetSSOSettings(self):
"""Gets Domain Single Sign-On Settings
Args:
None
Returns: A dict {samlSignonUri, samlLogoutUri, changePasswordUri, enableSSO, ssoWhitelist, useDomainSpecificIssuer}"""
return self.genericGet('sso/general')
def UpdateSSOSettings(self, enableSSO=None, samlSignonUri=None,
samlLogoutUri=None, changePasswordUri=None,
ssoWhitelist=None, useDomainSpecificIssuer=None):
"""Update SSO Settings.
Args:
enableSSO: boolean, SSO Master on/off switch
samlSignonUri: string, SSO Login Page
samlLogoutUri: string, SSO Logout Page
samlPasswordUri: string, SSO Password Change Page
ssoWhitelist: string, Range of IP Addresses which will see SSO
useDomainSpecificIssuer: boolean, Include Google Apps Domain in Issuer
Returns:
A dict containing the result of the update operation.
"""
uri = self._serviceUrl('sso/general')
#Get current settings, replace Nones with ''
properties = self.GetSSOSettings()
if properties['samlSignonUri'] == None:
properties['samlSignonUri'] = ''
if properties['samlLogoutUri'] == None:
properties['samlLogoutUri'] = ''
if properties['changePasswordUri'] == None:
properties['changePasswordUri'] = ''
if properties['ssoWhitelist'] == None:
properties['ssoWhitelist'] = ''
#update only the values we were passed
if enableSSO != None:
properties['enableSSO'] = gdata.apps.service._bool2str(enableSSO)
if samlSignonUri != None:
properties['samlSignonUri'] = samlSignonUri
if samlLogoutUri != None:
properties['samlLogoutUri'] = samlLogoutUri
if changePasswordUri != None:
properties['changePasswordUri'] = changePasswordUri
if ssoWhitelist != None:
properties['ssoWhitelist'] = ssoWhitelist
if useDomainSpecificIssuer != None:
properties['useDomainSpecificIssuer'] = gdata.apps.service._bool2str(useDomainSpecificIssuer)
return self._PutProperties(uri, properties)
def GetSSOKey(self):
"""Gets Domain Single Sign-On Signing Key
Args:
None
Returns: A dict {modulus, exponent, algorithm, format}"""
return self.genericGet('sso/signingkey')
def UpdateSSOKey(self, signingKey):
"""Update SSO Settings.
Args:
signingKey: string, public key to be uploaded
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('sso/signingkey')
properties = {'signingKey': signingKey}
return self._PutProperties(uri, properties)
def IsUserMigrationEnabled(self):
"""Is User Migration Enabled
Args:
None
Returns:
boolean, is user migration enabled"""
result = self.genericGet('email/migration')
if result['enableUserMigration'] == 'true':
return True
else:
return False
def UpdateUserMigrationStatus(self, enableUserMigration):
"""Update User Migration Status
Args:
enableUserMigration: boolean, user migration enable/disable
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('email/migration')
properties = {'enableUserMigration': enableUserMigration}
return self._PutProperties(uri, properties)
def GetOutboundGatewaySettings(self):
"""Get Outbound Gateway Settings
Args:
None
Returns:
A dict {smartHost, smtpMode}"""
uri = self._serviceUrl('email/gateway')
try:
return self._GetProperties(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
except TypeError:
#if no outbound gateway is set, we get a TypeError,
#catch it and return nothing...
return {'smartHost': None, 'smtpMode': None}
def UpdateOutboundGatewaySettings(self, smartHost=None, smtpMode=None):
"""Update Outbound Gateway Settings
Args:
smartHost: string, ip address or hostname of outbound gateway
smtpMode: string, SMTP or SMTP_TLS
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('email/gateway')
#Get current settings, replace Nones with ''
properties = GetOutboundGatewaySettings()
if properties['smartHost'] == None:
properties['smartHost'] = ''
if properties['smtpMode'] == None:
properties['smtpMode'] = ''
#If we were passed new values for smartHost or smtpMode, update them
if smartHost != None:
properties['smartHost'] = smartHost
if smtpMode != None:
properties['smtpMode'] = smtpMode
return self._PutProperties(uri, properties)
def AddEmailRoute(self, routeDestination, routeRewriteTo, routeEnabled, bounceNotifications, accountHandling):
"""Adds Domain Email Route
Args:
routeDestination: string, destination ip address or hostname
routeRewriteTo: boolean, rewrite smtp envelop To:
routeEnabled: boolean, enable disable email routing
bounceNotifications: boolean, send bound notificiations to sender
accountHandling: string, which to route, "allAccounts", "provisionedAccounts", "unknownAccounts"
Returns:
A dict containing the result of the update operation."""
uri = self._serviceUrl('emailrouting')
properties = {}
properties['routeDestination'] = routeDestination
properties['routeRewriteTo'] = gdata.apps.service._bool2str(routeRewriteTo)
properties['routeEnabled'] = gdata.apps.service._bool2str(routeEnabled)
properties['bounceNotifications'] = gdata.apps.service._bool2str(bounceNotifications)
properties['accountHandling'] = accountHandling
return self._PostProperties(uri, properties)
|
apache-2.0
|
OpenMined/PySyft
|
packages/grid/apps/domain/src/main/core/model_centric/tasks/cycle.py
|
1
|
1084
|
# stdlib
import logging
import traceback
# grid relative
from ....utils.executor import executor
def run_task_once(name, func, *args):
future = executor.futures._futures.get(name)
logging.info("future: %s" % str(future))
logging.info("futures count: %d" % len(executor.futures._futures))
# prevent running multiple threads
if future is None or future.done() is True:
executor.futures.pop(name)
try:
executor.submit_stored(name, func, *args)
except Exception as e:
logging.error(
"Failed to start new thread: %s %s" % (str(e), traceback.format_exc())
)
else:
logging.warning(
"Skipping %s execution because previous one is not finished" % name
)
def complete_cycle(cycle_manager, cycle_id):
logging.info("running complete_cycle")
try:
cycle_manager.complete_cycle(cycle_id)
except Exception as e:
logging.error(
"Error in complete_cycle task: %s %s" % (str(e), traceback.format_exc())
)
return e
|
apache-2.0
|
petrutlucian94/cinder
|
cinder/tests/unit/test_backup_ceph.py
|
19
|
49997
|
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for Ceph backup service."""
import hashlib
import os
import tempfile
import uuid
import mock
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
import six
from six.moves import range
from cinder.backup import driver
from cinder.backup.drivers import ceph
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.volume.drivers import rbd as rbddriver
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockObjectNotFoundException(MockException):
"""Used as mock for rados.MockObjectNotFoundException."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
# NOTE(dosaboy): mock Popen to, by default, raise Exception in order to
# ensure that any test ending up in a subprocess fails
# if not properly mocked.
@mock.patch('subprocess.Popen', spec=True)
# NOTE(dosaboy): mock out eventlet.sleep() so that it does nothing.
@mock.patch('eventlet.sleep', spec=True)
@mock.patch('time.time', spec=True)
# NOTE(dosaboy): set spec to empty object so that hasattr calls return
# False by default.
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_time, mock_sleep,
mock_popen):
mock_time.side_effect = inst.time_inc
mock_popen.side_effect = Exception
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.service.rbd = inst.mock_rbd
inst.service.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
class BackupCephTestCase(test.TestCase):
"""Test case for ceph backup driver."""
def _create_volume_db_entry(self, id, size):
vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size,
userid='user-id', projectid='project-id'):
backup = {'id': backupid, 'size': size, 'volume_id': volid,
'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id']
def time_inc(self):
self.counter += 1
return self.counter
def _get_wrapped_rbd_io(self, rbd_image):
rbd_meta = rbddriver.RBDImageMetadata(rbd_image, 'pool_foo',
'user_foo', 'conf_foo')
return rbddriver.RBDImageIOWrapper(rbd_meta)
def _setup_mock_popen(self, mock_popen, retval=None, p1hook=None,
p2hook=None):
class MockPopen(object):
hooks = [p2hook, p1hook]
def __init__(mock_inst, cmd, *args, **kwargs):
self.callstack.append('popen_init')
mock_inst.stdout = mock.Mock()
mock_inst.stdout.close = mock.Mock()
mock_inst.stdout.close.side_effect = \
lambda *args: self.callstack.append('stdout_close')
mock_inst.returncode = 0
hook = mock_inst.__class__.hooks.pop()
if hook is not None:
hook()
def communicate(mock_inst):
self.callstack.append('communicate')
return retval
mock_popen.side_effect = MockPopen
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(BackupCephTestCase, self).setUp()
self.ctxt = context.get_admin_context()
# Create volume.
self.volume_size = 1
self.volume_id = str(uuid.uuid4())
self._create_volume_db_entry(self.volume_id, self.volume_size)
self.volume = db.volume_get(self.ctxt, self.volume_id)
# Create backup of volume.
self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id,
self.volume_size)
self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
# Create alternate volume.
self.alt_volume_id = str(uuid.uuid4())
self._create_volume_db_entry(self.alt_volume_id, self.volume_size)
self.alt_volume = db.volume_get(self.ctxt, self.alt_volume_id)
self.chunk_size = 1024
self.num_chunks = 128
self.data_length = self.num_chunks * self.chunk_size
self.checksum = hashlib.sha256()
# Create a file with some data in it.
self.volume_file = tempfile.NamedTemporaryFile()
self.addCleanup(self.volume_file.close)
for _i in range(0, self.num_chunks):
data = os.urandom(self.chunk_size)
self.checksum.update(data)
self.volume_file.write(data)
self.volume_file.seek(0)
# Always trigger an exception if a command is executed since it should
# always be dealt with gracefully. At time of writing on rbd
# export/import-diff is executed and if they fail we expect to find
# alternative means of backing up.
mock_exec = mock.Mock()
mock_exec.side_effect = processutils.ProcessExecutionError
self.service = ceph.CephBackupDriver(self.ctxt, execute=mock_exec)
# Ensure that time.time() always returns more than the last time it was
# called to avoid div by zero errors.
self.counter = float(0)
self.callstack = []
@common_mocks
def test_get_rbd_support(self):
del self.service.rbd.RBD_FEATURE_LAYERING
del self.service.rbd.RBD_FEATURE_STRIPINGV2
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_LAYERING'))
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_STRIPINGV2'))
oldformat, features = self.service._get_rbd_support()
self.assertTrue(oldformat)
self.assertEqual(0, features)
self.service.rbd.RBD_FEATURE_LAYERING = 1
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1, features)
self.service.rbd.RBD_FEATURE_STRIPINGV2 = 2
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1 | 2, features)
@common_mocks
def test_get_most_recent_snap(self):
last = 'backup.%s.snap.9824923.1212' % (uuid.uuid4())
image = self.mock_rbd.Image.return_value
image.list_snaps.return_value = \
[{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': last},
{'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}]
snap = self.service._get_most_recent_snap(image)
self.assertEqual(last, snap)
@common_mocks
def test_get_backup_snap_name(self):
snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4())
def get_backup_snaps(inst, *args):
return [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4()),
'backup_id': str(uuid.uuid4())},
{'name': snap_name,
'backup_id': self.backup_id}]
with mock.patch.object(self.service, 'get_backup_snaps'):
name = self.service._get_backup_snap_name(self.service.rbd.Image(),
'base_foo',
self.backup_id)
self.assertIsNone(name)
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.side_effect = get_backup_snaps
name = self.service._get_backup_snap_name(self.service.rbd.Image(),
'base_foo',
self.backup_id)
self.assertEqual(snap_name, name)
self.assertTrue(mock_get_backup_snaps.called)
@common_mocks
def test_get_backup_snaps(self):
image = self.mock_rbd.Image.return_value
image.list_snaps.return_value = [
{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.wambam.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': 'bbbackup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}]
snaps = self.service.get_backup_snaps(image)
self.assertEqual(3, len(snaps))
@common_mocks
def test_transfer_data_from_rbd_to_file(self):
def fake_read(offset, length):
self.volume_file.seek(offset)
return self.volume_file.read(length)
self.mock_rbd.Image.return_value.read.side_effect = fake_read
self.mock_rbd.Image.return_value.size.return_value = self.data_length
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
self.service._transfer_data(rbd_io, 'src_foo', test_file,
'dest_foo', self.data_length)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_rbd_to_rbd(self):
def fake_read(offset, length):
self.volume_file.seek(offset)
return self.volume_file.read(length)
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
rbd1 = mock.Mock()
rbd1.read.side_effect = fake_read
rbd1.size.return_value = os.fstat(self.volume_file.fileno()).st_size
rbd2 = mock.Mock()
rbd2.write.side_effect = mock_write_data
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
src_rbd_io = self._get_wrapped_rbd_io(rbd1)
dest_rbd_io = self._get_wrapped_rbd_io(rbd2)
self.service._transfer_data(src_rbd_io, 'src_foo', dest_rbd_io,
'dest_foo', self.data_length)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_file_to_rbd(self):
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
self.mock_rbd.Image.return_value.write.side_effect = mock_write_data
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
self.service._transfer_data(self.volume_file, 'src_foo',
rbd_io, 'dest_foo', self.data_length)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_file_to_file(self):
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
self.service._transfer_data(self.volume_file, 'src_foo', test_file,
'dest_foo', self.data_length)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_backup_volume_from_file(self):
checksum = hashlib.sha256()
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
self.service.rbd.Image.return_value.write.side_effect = mock_write_data
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, '_discard_bytes'):
with tempfile.NamedTemporaryFile() as test_file:
self.service.backup(self.backup, self.volume_file)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
self.assertTrue(self.service.rbd.Image.return_value.write.called)
@common_mocks
def test_get_backup_base_name(self):
name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.assertEqual("volume-%s.backup.base" % (self.volume_id), name)
self.assertRaises(exception.InvalidParameterValue,
self.service._get_backup_base_name,
self.volume_id)
name = self.service._get_backup_base_name(self.volume_id, '1234')
self.assertEqual("volume-%s.backup.%s" % (self.volume_id, '1234'),
name)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd(self, mock_popen, mock_fnctl):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
with mock.patch.object(self.service, '_full_backup') as \
mock_full_backup:
with mock.patch.object(self.service,
'_try_delete_base_image'):
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
self.service.backup(self.backup, rbdio)
self.assertEqual(['popen_init',
'read',
'popen_init',
'write',
'stdout_close',
'communicate'], self.callstack)
self.assertFalse(mock_full_backup.called)
self.assertTrue(mock_get_backup_snaps.called)
# Ensure the files are equal
self.assertEqual(checksum.digest(),
self.checksum.digest())
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd_fail(self, mock_popen, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In _backup_rbd(), after an exception.BackupRBDOperationFailed
occurs in self._rbd_diff_transfer(), we want to check the
process when the second exception occurs in
self._try_delete_base_image().
"""
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'), \
mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
def mock_rbd_diff_transfer_side_effect(src_name, src_pool,
dest_name, dest_pool,
src_user, src_conf,
dest_user, dest_conf,
src_snap, from_snap):
raise exception.BackupRBDOperationFailed(_('mock'))
# Raise a pseudo exception.BackupRBDOperationFailed.
mock_rbd_diff_transfer.side_effect \
= mock_rbd_diff_transfer_side_effect
with mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service,
'_try_delete_base_image') as \
mock_try_delete_base_image:
def mock_try_delete_base_image_side_effect(backup_id,
volume_id,
base_name):
raise self.service.rbd.ImageNotFound(_('mock'))
# Raise a pesudo exception rbd.ImageNotFound.
mock_try_delete_base_image.side_effect \
= mock_try_delete_base_image_side_effect
with mock.patch.object(self.service, '_backup_metadata'):
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
# We expect that the second exception is
# notified.
self.assertRaises(
self.service.rbd.ImageNotFound,
self.service.backup,
self.backup, rbdio)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd_fail2(self, mock_popen, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception.BackupOperationError occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete().
"""
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'), \
mock.patch.object(self.service, '_rbd_diff_transfer'), \
mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service, '_backup_metadata') as \
mock_backup_metadata:
def mock_backup_metadata_side_effect(backup):
raise exception.BackupOperationError(_('mock'))
# Raise a pseudo exception.BackupOperationError.
mock_backup_metadata.side_effect = mock_backup_metadata_side_effect
with mock.patch.object(self.service, 'delete') as mock_delete:
def mock_delete_side_effect(backup):
raise self.service.rbd.ImageBusy()
# Raise a pseudo exception rbd.ImageBusy.
mock_delete.side_effect = mock_delete_side_effect
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
# We expect that the second exception is
# notified.
self.assertRaises(
self.service.rbd.ImageBusy,
self.service.backup,
self.backup, rbdio)
@common_mocks
def test_backup_vol_length_0(self):
volume_id = str(uuid.uuid4())
self._create_volume_db_entry(volume_id, 0)
backup_id = str(uuid.uuid4())
self._create_backup_db_entry(backup_id, volume_id, 1)
backup = objects.Backup.get_by_id(self.ctxt, backup_id)
self.assertRaises(exception.InvalidParameterValue, self.service.backup,
backup, self.volume_file)
@common_mocks
def test_restore(self):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
def mock_read_data(offset, length):
return self.volume_file.read(self.data_length)
self.mock_rbd.Image.return_value.read.side_effect = mock_read_data
self.mock_rbd.Image.return_value.size.return_value = \
self.chunk_size * self.num_chunks
with mock.patch.object(self.service, '_restore_metadata') as \
mock_restore_metadata:
with mock.patch.object(self.service, '_discard_bytes') as \
mock_discard_bytes:
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
self.service.restore(self.backup, self.volume_id,
test_file)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
self.assertTrue(mock_restore_metadata.called)
self.assertTrue(mock_discard_bytes.called)
self.assertTrue(mock_discard_bytes.called)
self.assertTrue(self.service.rbd.Image.return_value.read.called)
@common_mocks
def test_discard_bytes(self):
# Lower the chunksize to a memory managable number
self.service.chunk_size = 1024
image = self.mock_rbd.Image.return_value
wrapped_rbd = self._get_wrapped_rbd_io(image)
self.service._discard_bytes(wrapped_rbd, 0, 0)
self.assertEqual(0, image.discard.call_count)
self.service._discard_bytes(wrapped_rbd, 0, 1234)
self.assertEqual(1, image.discard.call_count)
image.reset_mock()
# Test discard with no remainder
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
self.service._discard_bytes(wrapped_rbd, 0,
self.service.chunk_size * 2)
self.assertEqual(2, image.write.call_count)
self.assertEqual(2, image.flush.call_count)
self.assertFalse(image.discard.called)
image.reset_mock()
# Now test with a remainder.
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
self.service._discard_bytes(wrapped_rbd, 0,
(self.service.chunk_size * 2) + 1)
self.assertEqual(3, image.write.call_count)
self.assertEqual(3, image.flush.call_count)
self.assertFalse(image.discard.called)
@common_mocks
def test_delete_backup_snapshot(self):
snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4())
base_name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.mock_rbd.RBD.remove_snap = mock.Mock()
with mock.patch.object(self.service, '_get_backup_snap_name') as \
mock_get_backup_snap_name:
mock_get_backup_snap_name.return_value = snap_name
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = None
rem = self.service._delete_backup_snapshot(self.mock_rados,
base_name,
self.backup_id)
self.assertTrue(mock_get_backup_snap_name.called)
self.assertTrue(mock_get_backup_snaps.called)
self.assertEqual((snap_name, 0), rem)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_try_delete_base_image_diff_format(self, mock_meta_backup):
backup_name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
with mock.patch.object(self.service, '_delete_backup_snapshot') as \
mock_del_backup_snap:
snap_name = self.service._get_new_snap_name(self.backup_id)
mock_del_backup_snap.return_value = (snap_name, 0)
self.service.delete(self.backup)
self.assertTrue(mock_del_backup_snap.called)
self.assertTrue(self.mock_rbd.RBD.return_value.list.called)
self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_try_delete_base_image(self, mock_meta_backup):
backup_name = self.service._get_backup_base_name(self.volume_id,
self.backup_id)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'):
self.service.delete(self.backup)
self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
@common_mocks
def test_try_delete_base_image_busy(self):
"""This should induce retries then raise rbd.ImageBusy."""
backup_name = self.service._get_backup_base_name(self.volume_id,
self.backup_id)
rbd = self.mock_rbd.RBD.return_value
rbd.list.return_value = [backup_name]
rbd.remove.side_effect = self.mock_rbd.ImageBusy
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
self.assertRaises(self.mock_rbd.ImageBusy,
self.service._try_delete_base_image,
self.backup['id'], self.backup['volume_id'])
self.assertTrue(mock_get_backup_snaps.called)
self.assertTrue(rbd.list.called)
self.assertTrue(rbd.remove.called)
self.assertTrue(MockImageBusyException in RAISED_EXCEPTIONS)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_delete(self, mock_meta_backup):
with mock.patch.object(self.service, '_try_delete_base_image'):
self.service.delete(self.backup)
self.assertEqual([], RAISED_EXCEPTIONS)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_delete_image_not_found(self, mock_meta_backup):
with mock.patch.object(self.service, '_try_delete_base_image') as \
mock_del_base:
mock_del_base.side_effect = self.mock_rbd.ImageNotFound
# ImageNotFound exception is caught so that db entry can be cleared
self.service.delete(self.backup)
self.assertEqual([MockImageNotFoundException], RAISED_EXCEPTIONS)
@common_mocks
def test_diff_restore_allowed_with_image_not_exists(self):
"""Test diff restore not allowed when backup not diff-format."""
not_allowed = (False, None)
backup_base = 'backup.base'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (False, backup_base)
resp = self.service._diff_restore_allowed(*args_vols_different)
self.assertEqual(not_allowed, resp)
mock_rbd_image_exists.assert_called_once_with(
backup_base,
self.backup['volume_id'],
self.mock_rados)
@common_mocks
def test_diff_restore_allowed_with_no_restore_point(self):
"""Test diff restore not allowed when no restore point found.
Detail conditions:
1. backup base is diff-format
2. restore point does not exist
"""
not_allowed = (False, None)
backup_base = 'backup.base'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = None
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual(not_allowed, resp)
self.assertTrue(mock_rbd_image_exists.called)
mock_get_restore_point.assert_called_once_with(
backup_base,
self.backup['id'])
@common_mocks
def test_diff_restore_allowed_with_not_rbd(self):
"""Test diff restore not allowed when destination volume is not rbd.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is not an rbd.
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
mock_file_is_rbd.assert_called_once_with(
rbd_io)
@common_mocks
def test_diff_restore_allowed_with_same_volume(self):
"""Test diff restore not allowed when volumes are same.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are the same
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_same = [backup_base, self.backup, self.volume, rbd_io,
self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
resp = self.service._diff_restore_allowed(*args_vols_same)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
@common_mocks
def test_diff_restore_allowed_with_has_extents(self):
"""Test diff restore not allowed when destination volume has data.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are different
5. destination volume has data on it - full copy is mandated
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
with mock.patch.object(self.service, '_rbd_has_extents') \
as mock_rbd_has_extents:
mock_rbd_has_extents.return_value = True
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
mock_rbd_has_extents.assert_called_once_with(
rbd_io.rbd_image)
@common_mocks
def test_diff_restore_allowed_with_no_extents(self):
"""Test diff restore allowed when no data in destination volume.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are different
5. destination volume no data on it
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
with mock.patch.object(self.service, '_rbd_has_extents') \
as mock_rbd_has_extents:
mock_rbd_has_extents.return_value = False
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((True, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
self.assertTrue(mock_rbd_has_extents.called)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_piped_execute(self, mock_popen, mock_fcntl):
mock_fcntl.return_value = 0
self._setup_mock_popen(mock_popen, ['out', 'err'])
self.service._piped_execute(['foo'], ['bar'])
self.assertEqual(['popen_init', 'popen_init',
'stdout_close', 'communicate'], self.callstack)
@common_mocks
def test_restore_metdata(self):
version = 2
def mock_read(*args):
base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META
glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META
return jsonutils.dumps({base_tag: {'image_name': 'image.base'},
glance_tag: {'image_name': 'image.glance'},
'version': version})
self.mock_rados.Object.return_value.read.side_effect = mock_read
self.service._restore_metadata(self.backup, self.volume_id)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.assertTrue(self.mock_rados.Object.return_value.read.called)
version = 3
try:
self.service._restore_metadata(self.backup, self.volume_id)
except exception.BackupOperationError as exc:
msg = _("Metadata restore failed due to incompatible version")
self.assertEqual(msg, six.text_type(exc))
else:
# Force a test failure
self.assertFalse(True)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_backup_metadata_already_exists(self, mock_meta_backup):
def mock_set(json_meta):
msg = (_("Metadata backup object '%s' already exists") %
("backup.%s.meta" % (self.backup_id)))
raise exception.VolumeMetadataBackupExists(msg)
mock_meta_backup.return_value.set = mock.Mock()
mock_meta_backup.return_value.set.side_effect = mock_set
with mock.patch.object(self.service, 'get_metadata') as \
mock_get_metadata:
mock_get_metadata.return_value = "some.json.metadata"
try:
self.service._backup_metadata(self.backup)
except exception.BackupOperationError as e:
msg = (_("Failed to backup volume metadata - Metadata backup "
"object 'backup.%s.meta' already exists") %
(self.backup_id))
self.assertEqual(msg, six.text_type(e))
else:
# Make the test fail
self.assertFalse(True)
self.assertFalse(mock_meta_backup.set.called)
@common_mocks
def test_backup_metata_error(self):
"""Ensure that delete() is called if the metadata backup fails.
Also ensure that the exception is propagated to the caller.
"""
with mock.patch.object(self.service, '_backup_metadata') as \
mock_backup_metadata:
mock_backup_metadata.side_effect = exception.BackupOperationError
with mock.patch.object(self.service, '_get_volume_size_gb'):
with mock.patch.object(self.service, '_file_is_rbd',
return_value=False):
with mock.patch.object(self.service, '_full_backup'):
with mock.patch.object(self.service, 'delete') as \
mock_delete:
self.assertRaises(exception.BackupOperationError,
self.service.backup, self.backup,
mock.Mock(),
backup_metadata=True)
self.assertTrue(mock_delete.called)
@common_mocks
def test_restore_invalid_metadata_version(self):
def mock_read(*args):
base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META
glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META
return jsonutils.dumps({base_tag: {'image_name': 'image.base'},
glance_tag: {'image_name': 'image.glance'},
'version': 3})
self.mock_rados.Object.return_value.read.side_effect = mock_read
with mock.patch.object(ceph.VolumeMetadataBackup, '_exists') as \
mock_exists:
mock_exists.return_value = True
self.assertRaises(exception.BackupOperationError,
self.service._restore_metadata,
self.backup, self.volume_id)
self.assertTrue(mock_exists.called)
self.assertTrue(self.mock_rados.Object.return_value.read.called)
def common_meta_backup_mocks(f):
"""Decorator to set mocks common to all metadata backup tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd):
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rados.ObjectNotFound = MockObjectNotFoundException
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
class VolumeMetadataBackupTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(VolumeMetadataBackupTestCase, self).setUp()
self.backup_id = str(uuid.uuid4())
self.mb = ceph.VolumeMetadataBackup(mock.Mock(), self.backup_id)
@common_meta_backup_mocks
def test_name(self):
self.assertEqual('backup.%s.meta' % (self.backup_id), self.mb.name)
@common_meta_backup_mocks
def test_exists(self):
# True
self.assertTrue(self.mb.exists)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.mock_rados.Object.return_value.reset_mock()
# False
self.mock_rados.Object.return_value.stat.side_effect = (
self.mock_rados.ObjectNotFound)
self.assertFalse(self.mb.exists)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS)
@common_meta_backup_mocks
def test_set(self):
obj_data = []
called = []
def mock_read(*args):
called.append('read')
self.assertTrue(len(obj_data) == 1)
return obj_data[0]
def _mock_write(data):
obj_data.append(data)
called.append('write')
self.mb.get = mock.Mock()
self.mb.get.side_effect = mock_read
with mock.patch.object(ceph.VolumeMetadataBackup, 'set') as mock_write:
mock_write.side_effect = _mock_write
self.mb.set({'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, self.mb.get())
self.assertTrue(self.mb.get.called)
self.mb._exists = mock.Mock()
self.mb._exists.return_value = True
# use the unmocked set() method.
self.assertRaises(exception.VolumeMetadataBackupExists, self.mb.set,
{'doo': 'dah'})
# check the meta obj state has not changed.
self.assertEqual({'foo': 'bar'}, self.mb.get())
self.assertEqual(['write', 'read', 'read'], called)
@common_meta_backup_mocks
def test_get(self):
self.mock_rados.Object.return_value.stat.side_effect = (
self.mock_rados.ObjectNotFound)
self.mock_rados.Object.return_value.read.return_value = 'meta'
self.assertIsNone(self.mb.get())
self.mock_rados.Object.return_value.stat.side_effect = None
self.assertEqual('meta', self.mb.get())
@common_meta_backup_mocks
def remove_if_exists(self):
with mock.patch.object(self.mock_rados.Object, 'remove') as \
mock_remove:
mock_remove.side_effect = self.mock_rados.ObjectNotFound
self.mb.remove_if_exists()
self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS)
self.mock_rados.Object.remove.side_effect = None
self.mb.remove_if_exists()
self.assertEqual([], RAISED_EXCEPTIONS)
|
apache-2.0
|
Lh4cKg/brython
|
www/src/Lib/unittest/test/test_break.py
|
785
|
8138
|
import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
|
bsd-3-clause
|
maxolasersquad/ninja-ide
|
ninja_ide/core/file_handling/filesystem_notifications/linux.py
|
8
|
5070
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import os
from PyQt4.QtCore import QThread
from pyinotify import ProcessEvent, IN_CREATE, IN_DELETE, IN_DELETE_SELF, \
IN_MODIFY, WatchManager, Notifier, ExcludeFilter
from ninja_ide.tools.logger import NinjaLogger
logger = NinjaLogger('ninja_ide.core.file_handling.filesystem_notifications.linux')
DEBUG = logger.debug
from ninja_ide.core.file_handling.filesystem_notifications import base_watcher
ADDED = base_watcher.ADDED
DELETED = base_watcher.DELETED
REMOVE = base_watcher.REMOVE
RENAME = base_watcher.RENAME
MODIFIED = base_watcher.MODIFIED
#FIXME: For some reaseon the code below raises an import error with name ADDED
#from ninja_ide.core.file_handling.filesystem_notifications.base_watcher import ADDED, \
# DELETED, REMOVE, RENAME, MODIFIED
mask = IN_CREATE | IN_DELETE | IN_DELETE_SELF | IN_MODIFY
class NinjaProcessEvent(ProcessEvent):
def __init__(self, process_callback):
self._process_callback = process_callback
ProcessEvent.__init__(self)
def process_IN_CREATE(self, event):
self._process_callback((ADDED, event.pathname))
def process_IN_DELETE(self, event):
self._process_callback((DELETED, event.pathname))
def process_IN_DELETE_SELF(self, event):
self._process_callback((DELETED, event.pathname))
def process_IN_MODIFY(self, event):
self._process_callback((MODIFIED, event.pathname))
def process_IN_MOVED_TO(self, event):
self._process_callback((REMOVE, event.pathname))
def process_IN_MOVED_FROM(self, event):
self._process_callback((REMOVE, event.pathname))
def process_IN_MOVE_SELF(self, event):
self._process_callback((RENAME, event.pathname))
class QNotifier(QThread):
def __init__(self, wm, processor):
self.event_queue = list()
self._processor = processor
self.notifier = Notifier(wm,
NinjaProcessEvent(self.event_queue.append))
self.notifier.coalesce_events(True)
self.keep_running = True
QThread.__init__(self)
def run(self):
while self.keep_running:
try:
self.notifier.process_events()
except OSError:
pass # OSError: [Errno 2] No such file or directory happens
e_dict = {}
while len(self.event_queue):
e_type, e_path = self.event_queue.pop(0)
e_dict.setdefault(e_path, []).append(e_type)
keys = list(e_dict.keys())
while len(keys):
key = keys.pop(0)
event = e_dict.pop(key)
if (ADDED in event) and (DELETED in event):
event = [e for e in event if e not in (ADDED, DELETED)]
for each_event in event:
self._processor(each_event, key)
if self.notifier.check_events():
self.notifier.read_events()
self.notifier.stop()
class NinjaFileSystemWatcher(base_watcher.BaseWatcher):
def __init__(self):
self.watching_paths = {}
super(NinjaFileSystemWatcher, self).__init__()
self._ignore_hidden = ('.git', '.hg', '.svn', '.bzr')
def add_watch(self, path):
if path not in self.watching_paths:
try:
wm = WatchManager()
notifier = QNotifier(wm, self._emit_signal_on_change)
notifier.start()
exclude = ExcludeFilter([os.path.join(path, folder)
for folder in self._ignore_hidden])
wm.add_watch(path, mask, rec=True, auto_add=True,
exclude_filter=exclude)
self.watching_paths[path] = notifier
except (OSError, IOError):
pass
#Shit happens, most likely temp file
def remove_watch(self, path):
if path in self.watching_paths:
notifier = self.watching_paths.pop(path)
notifier.keep_running = False
notifier.quit()
def shutdown_notification(self):
base_watcher.BaseWatcher.shutdown_notification(self)
for each_path in self.watching_paths:
notifier = self.watching_paths[each_path]
notifier.keep_running = False
notifier.quit()
|
gpl-3.0
|
konstruktoid/ansible-upstream
|
lib/ansible/parsing/vault/__init__.py
|
3
|
57055
|
# (c) 2014, James Tanner <[email protected]>
# (c) 2016, Adrian Likins <[email protected]>
# (c) 2016 Toshio Kuratomi <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import random
import shlex
import shutil
import subprocess
import sys
import tempfile
import warnings
from binascii import hexlify
from binascii import unhexlify
from binascii import Error as BinasciiError
from hashlib import md5
from hashlib import sha256
from io import BytesIO
HAS_CRYPTOGRAPHY = False
HAS_PYCRYPTO = False
HAS_SOME_PYCRYPTO = False
CRYPTOGRAPHY_BACKEND = None
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, padding
from cryptography.hazmat.primitives.hmac import HMAC
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.primitives.ciphers import (
Cipher as C_Cipher, algorithms, modes
)
CRYPTOGRAPHY_BACKEND = default_backend()
HAS_CRYPTOGRAPHY = True
except ImportError:
pass
try:
from Crypto.Cipher import AES as AES_pycrypto
HAS_SOME_PYCRYPTO = True
# Note: Only used for loading obsolete VaultAES files. All files are written
# using the newer VaultAES256 which does not require md5
from Crypto.Hash import SHA256 as SHA256_pycrypto
from Crypto.Hash import HMAC as HMAC_pycrypto
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
from Crypto.Util import Counter as Counter_pycrypto
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
from Crypto.Protocol.KDF import PBKDF2 as PBKDF2_pycrypto
HAS_PYCRYPTO = True
except ImportError:
pass
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible import constants as C
from ansible.module_utils.six import PY3, binary_type
# Note: on py2, this zip is izip not the list based zip() builtin
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.utils.path import makedirs_safe
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
b_HEADER = b'$ANSIBLE_VAULT'
CIPHER_WHITELIST = frozenset((u'AES', u'AES256'))
CIPHER_WRITE_WHITELIST = frozenset((u'AES256',))
# See also CIPHER_MAPPING at the bottom of the file which maps cipher strings
# (used in VaultFile header) to a cipher class
NEED_CRYPTO_LIBRARY = "ansible-vault requires either the cryptography library (preferred) or"
if HAS_SOME_PYCRYPTO:
NEED_CRYPTO_LIBRARY += " a newer version of"
NEED_CRYPTO_LIBRARY += " pycrypto in order to function."
class AnsibleVaultError(AnsibleError):
pass
class AnsibleVaultPasswordError(AnsibleVaultError):
pass
class AnsibleVaultFormatError(AnsibleError):
pass
def is_encrypted(data):
""" Test if this is vault encrypted data blob
:arg data: a byte or text string to test whether it is recognized as vault
encrypted data
:returns: True if it is recognized. Otherwise, False.
"""
try:
# Make sure we have a byte string and that it only contains ascii
# bytes.
b_data = to_bytes(to_text(data, encoding='ascii', errors='strict', nonstring='strict'), encoding='ascii', errors='strict')
except (UnicodeError, TypeError):
# The vault format is pure ascii so if we failed to encode to bytes
# via ascii we know that this is not vault data.
# Similarly, if it's not a string, it's not vault data
return False
if b_data.startswith(b_HEADER):
return True
return False
def is_encrypted_file(file_obj, start_pos=0, count=-1):
"""Test if the contents of a file obj are a vault encrypted data blob.
:arg file_obj: A file object that will be read from.
:kwarg start_pos: A byte offset in the file to start reading the header
from. Defaults to 0, the beginning of the file.
:kwarg count: Read up to this number of bytes from the file to determine
if it looks like encrypted vault data. The default is -1, read to the
end of file.
:returns: True if the file looks like a vault file. Otherwise, False.
"""
# read the header and reset the file stream to where it started
current_position = file_obj.tell()
try:
file_obj.seek(start_pos)
return is_encrypted(file_obj.read(count))
finally:
file_obj.seek(current_position)
def _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
b_tmpdata = b_vaulttext_envelope.splitlines()
b_tmpheader = b_tmpdata[0].strip().split(b';')
b_version = b_tmpheader[1].strip()
cipher_name = to_text(b_tmpheader[2].strip())
vault_id = default_vault_id
# Only attempt to find vault_id if the vault file is version 1.2 or newer
# if self.b_version == b'1.2':
if len(b_tmpheader) >= 4:
vault_id = to_text(b_tmpheader[3].strip())
b_ciphertext = b''.join(b_tmpdata[1:])
return b_ciphertext, b_version, cipher_name, vault_id
def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None, filename=None):
"""Parse the vaulttext envelope
When data is saved, it has a header prepended and is formatted into 80
character lines. This method extracts the information from the header
and then removes the header and the inserted newlines. The string returned
is suitable for processing by the Cipher classes.
:arg b_vaulttext: byte str containing the data from a save file
:kwarg default_vault_id: The vault_id name to use if the vaulttext does not provide one.
:kwarg filename: The filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted. This is optional.
:returns: A tuple of byte str of the vaulttext suitable to pass to parse_vaultext,
a byte str of the vault format version,
the name of the cipher used, and the vault_id.
:raises: AnsibleVaultFormatError: if the vaulttext_envelope format is invalid
"""
# used by decrypt
default_vault_id = default_vault_id or C.DEFAULT_VAULT_IDENTITY
try:
return _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id)
except Exception as exc:
msg = "Vault envelope format error"
if filename:
msg += ' in %s' % (filename)
msg += ': %s' % exc
raise AnsibleVaultFormatError(msg)
def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=None):
""" Add header and format to 80 columns
:arg b_ciphertext: the encrypted and hexlified data as a byte string
:arg cipher_name: unicode cipher name (for ex, u'AES256')
:arg version: unicode vault version (for ex, '1.2'). Optional ('1.1' is default)
:arg vault_id: unicode vault identifier. If provided, the version will be bumped to 1.2.
:returns: a byte str that should be dumped into a file. It's
formatted to 80 char columns and has the header prepended
"""
if not cipher_name:
raise AnsibleError("the cipher must be set before adding a header")
version = version or '1.1'
# If we specify a vault_id, use format version 1.2. For no vault_id, stick to 1.1
if vault_id and vault_id != u'default':
version = '1.2'
b_version = to_bytes(version, 'utf-8', errors='strict')
b_vault_id = to_bytes(vault_id, 'utf-8', errors='strict')
b_cipher_name = to_bytes(cipher_name, 'utf-8', errors='strict')
header_parts = [b_HEADER,
b_version,
b_cipher_name]
if b_version == b'1.2' and b_vault_id:
header_parts.append(b_vault_id)
header = b';'.join(header_parts)
b_vaulttext = [header]
b_vaulttext += [b_ciphertext[i:i + 80] for i in range(0, len(b_ciphertext), 80)]
b_vaulttext += [b'']
b_vaulttext = b'\n'.join(b_vaulttext)
return b_vaulttext
def _unhexlify(b_data):
try:
return unhexlify(b_data)
except (BinasciiError, TypeError) as exc:
raise AnsibleVaultFormatError('Vault format unhexlify error: %s' % exc)
def _parse_vaulttext(b_vaulttext):
b_vaulttext = _unhexlify(b_vaulttext)
b_salt, b_crypted_hmac, b_ciphertext = b_vaulttext.split(b"\n", 2)
b_salt = _unhexlify(b_salt)
b_ciphertext = _unhexlify(b_ciphertext)
return b_ciphertext, b_salt, b_crypted_hmac
def parse_vaulttext(b_vaulttext):
"""Parse the vaulttext
:arg b_vaulttext: byte str containing the vaulttext (ciphertext, salt, crypted_hmac)
:returns: A tuple of byte str of the ciphertext suitable for passing to a
Cipher class's decrypt() function, a byte str of the salt,
and a byte str of the crypted_hmac
:raises: AnsibleVaultFormatError: if the vaulttext format is invalid
"""
# SPLIT SALT, DIGEST, AND DATA
try:
return _parse_vaulttext(b_vaulttext)
except AnsibleVaultFormatError:
raise
except Exception as exc:
msg = "Vault vaulttext format error: %s" % exc
raise AnsibleVaultFormatError(msg)
def verify_secret_is_not_empty(secret, msg=None):
'''Check the secret against minimal requirements.
Raises: AnsibleVaultPasswordError if the password does not meet requirements.
Currently, only requirement is that the password is not None or an empty string.
'''
msg = msg or 'Invalid vault password was provided'
if not secret:
raise AnsibleVaultPasswordError(msg)
class VaultSecret:
'''Opaque/abstract objects for a single vault secret. ie, a password or a key.'''
def __init__(self, _bytes=None):
# FIXME: ? that seems wrong... Unset etc?
self._bytes = _bytes
@property
def bytes(self):
'''The secret as a bytestring.
Sub classes that store text types will need to override to encode the text to bytes.
'''
return self._bytes
def load(self):
return self._bytes
class PromptVaultSecret(VaultSecret):
default_prompt_formats = ["Vault password (%s): "]
def __init__(self, _bytes=None, vault_id=None, prompt_formats=None):
super(PromptVaultSecret, self).__init__(_bytes=_bytes)
self.vault_id = vault_id
if prompt_formats is None:
self.prompt_formats = self.default_prompt_formats
else:
self.prompt_formats = prompt_formats
@property
def bytes(self):
return self._bytes
def load(self):
self._bytes = self.ask_vault_passwords()
def ask_vault_passwords(self):
b_vault_passwords = []
for prompt_format in self.prompt_formats:
prompt = prompt_format % {'vault_id': self.vault_id}
try:
vault_pass = display.prompt(prompt, private=True)
except EOFError:
raise AnsibleVaultError('EOFError (ctrl-d) on prompt for (%s)' % self.vault_id)
verify_secret_is_not_empty(vault_pass)
b_vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
b_vault_passwords.append(b_vault_pass)
# Make sure the passwords match by comparing them all to the first password
for b_vault_password in b_vault_passwords:
self.confirm(b_vault_passwords[0], b_vault_password)
if b_vault_passwords:
return b_vault_passwords[0]
return None
def confirm(self, b_vault_pass_1, b_vault_pass_2):
# enforce no newline chars at the end of passwords
if b_vault_pass_1 != b_vault_pass_2:
# FIXME: more specific exception
raise AnsibleError("Passwords do not match")
def script_is_client(filename):
'''Determine if a vault secret script is a client script that can be given --vault-id args'''
# if password script is 'something-client' or 'something-client.[sh|py|rb|etc]'
# script_name can still have '.' or could be entire filename if there is no ext
script_name, dummy = os.path.splitext(filename)
# TODO: for now, this is entirely based on filename
if script_name.endswith('-client'):
return True
return False
def get_file_vault_secret(filename=None, vault_id=None, encoding=None, loader=None):
this_path = os.path.realpath(os.path.expanduser(filename))
if not os.path.exists(this_path):
raise AnsibleError("The vault password file %s was not found" % this_path)
if loader.is_executable(this_path):
if script_is_client(filename):
display.vvvv('The vault password file %s is a client script.' % filename)
# TODO: pass vault_id_name to script via cli
return ClientScriptVaultSecret(filename=this_path, vault_id=vault_id,
encoding=encoding, loader=loader)
# just a plain vault password script. No args, returns a byte array
return ScriptVaultSecret(filename=this_path, encoding=encoding, loader=loader)
return FileVaultSecret(filename=this_path, encoding=encoding, loader=loader)
# TODO: mv these classes to a seperate file so we don't pollute vault with 'subprocess' etc
class FileVaultSecret(VaultSecret):
def __init__(self, filename=None, encoding=None, loader=None):
super(FileVaultSecret, self).__init__()
self.filename = filename
self.loader = loader
self.encoding = encoding or 'utf8'
# We could load from file here, but that is eventually a pain to test
self._bytes = None
self._text = None
@property
def bytes(self):
if self._bytes:
return self._bytes
if self._text:
return self._text.encode(self.encoding)
return None
def load(self):
self._bytes = self._read_file(self.filename)
def _read_file(self, filename):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
# TODO: replace with use of self.loader
try:
f = open(filename, "rb")
vault_pass = f.read().strip()
f.close()
except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (filename, e))
b_vault_data, dummy = self.loader._decrypt_if_vault_data(vault_pass, filename)
vault_pass = b_vault_data.strip(b'\r\n')
verify_secret_is_not_empty(vault_pass,
msg='Invalid vault password was provided from file (%s)' % filename)
return vault_pass
def __repr__(self):
if self.filename:
return "%s(filename='%s')" % (self.__class__.__name__, self.filename)
return "%s()" % (self.__class__.__name__)
class ScriptVaultSecret(FileVaultSecret):
def _read_file(self, filename):
if not self.loader.is_executable(filename):
raise AnsibleVaultError("The vault password script %s was not executable" % filename)
command = self._build_command()
stdout, stderr, p = self._run(command)
self._check_results(stdout, stderr, p)
vault_pass = stdout.strip(b'\r\n')
empty_password_msg = 'Invalid vault password was provided from script (%s)' % filename
verify_secret_is_not_empty(vault_pass,
msg=empty_password_msg)
return vault_pass
def _run(self, command):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(command, stdout=subprocess.PIPE)
except OSError as e:
msg_format = "Problem running vault password script %s (%s)." \
" If this is not a script, remove the executable bit from the file."
msg = msg_format % (self.filename, e)
raise AnsibleError(msg)
stdout, stderr = p.communicate()
return stdout, stderr, p
def _check_results(self, stdout, stderr, popen):
if popen.returncode != 0:
raise AnsibleError("Vault password script %s returned non-zero (%s): %s" %
(self.filename, popen.returncode, stderr))
def _build_command(self):
return [self.filename]
class ClientScriptVaultSecret(ScriptVaultSecret):
VAULT_ID_UNKNOWN_RC = 2
def __init__(self, filename=None, encoding=None, loader=None, vault_id=None):
super(ClientScriptVaultSecret, self).__init__(filename=filename,
encoding=encoding,
loader=loader)
self._vault_id = vault_id
display.vvvv('Executing vault password client script: %s --vault-id %s' % (filename, vault_id))
def _run(self, command):
try:
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
msg_format = "Problem running vault password client script %s (%s)." \
" If this is not a script, remove the executable bit from the file."
msg = msg_format % (self.filename, e)
raise AnsibleError(msg)
stdout, stderr = p.communicate()
return stdout, stderr, p
def _check_results(self, stdout, stderr, popen):
if popen.returncode == self.VAULT_ID_UNKNOWN_RC:
raise AnsibleError('Vault password client script %s did not find a secret for vault-id=%s: %s' %
(self.filename, self._vault_id, stderr))
if popen.returncode != 0:
raise AnsibleError("Vault password client script %s returned non-zero (%s) when getting secret for vault-id=%s: %s" %
(self.filename, popen.returncode, self._vault_id, stderr))
def _build_command(self):
command = [self.filename]
if self._vault_id:
command.extend(['--vault-id', self._vault_id])
return command
def __repr__(self):
if self.filename:
return "%s(filename='%s', vault_id='%s')" % \
(self.__class__.__name__, self.filename, self._vault_id)
return "%s()" % (self.__class__.__name__)
def match_secrets(secrets, target_vault_ids):
'''Find all VaultSecret objects that are mapped to any of the target_vault_ids in secrets'''
if not secrets:
return []
matches = [(vault_id, secret) for vault_id, secret in secrets if vault_id in target_vault_ids]
return matches
def match_best_secret(secrets, target_vault_ids):
'''Find the best secret from secrets that matches target_vault_ids
Since secrets should be ordered so the early secrets are 'better' than later ones, this
just finds all the matches, then returns the first secret'''
matches = match_secrets(secrets, target_vault_ids)
if matches:
return matches[0]
# raise exception?
return None
def match_encrypt_vault_id_secret(secrets, encrypt_vault_id=None):
# See if the --encrypt-vault-id matches a vault-id
display.vvvv('encrypt_vault_id=%s' % encrypt_vault_id)
if encrypt_vault_id is None:
raise AnsibleError('match_encrypt_vault_id_secret requires a non None encrypt_vault_id')
encrypt_vault_id_matchers = [encrypt_vault_id]
encrypt_secret = match_best_secret(secrets, encrypt_vault_id_matchers)
# return the best match for --encrypt-vault-id
if encrypt_secret:
return encrypt_secret
# If we specified a encrypt_vault_id and we couldn't find it, dont
# fallback to using the first/best secret
raise AnsibleVaultError('Did not find a match for --encrypt-vault-id=%s in the known vault-ids %s' % (encrypt_vault_id,
[_v for _v, _vs in secrets]))
def match_encrypt_secret(secrets, encrypt_vault_id=None):
'''Find the best/first/only secret in secrets to use for encrypting'''
display.vvvv('encrypt_vault_id=%s' % encrypt_vault_id)
# See if the --encrypt-vault-id matches a vault-id
if encrypt_vault_id:
return match_encrypt_vault_id_secret(secrets,
encrypt_vault_id=encrypt_vault_id)
# Find the best/first secret from secrets since we didnt specify otherwise
# ie, consider all of the available secrets as matches
_vault_id_matchers = [_vault_id for _vault_id, dummy in secrets]
best_secret = match_best_secret(secrets, _vault_id_matchers)
# can be empty list sans any tuple
return best_secret
class VaultLib:
def __init__(self, secrets=None):
self.secrets = secrets or []
self.cipher_name = None
self.b_version = b'1.2'
def encrypt(self, plaintext, secret=None, vault_id=None):
"""Vault encrypt a piece of data.
:arg plaintext: a text or byte string to encrypt.
:returns: a utf-8 encoded byte str of encrypted data. The string
contains a header identifying this as vault encrypted data and
formatted to newline terminated lines of 80 characters. This is
suitable for dumping as is to a vault file.
If the string passed in is a text string, it will be encoded to UTF-8
before encryption.
"""
if secret is None:
if self.secrets:
dummy, secret = match_encrypt_secret(self.secrets)
else:
raise AnsibleVaultError("A vault password must be specified to encrypt data")
b_plaintext = to_bytes(plaintext, errors='surrogate_or_strict')
if is_encrypted(b_plaintext):
raise AnsibleError("input is already encrypted")
if not self.cipher_name or self.cipher_name not in CIPHER_WRITE_WHITELIST:
self.cipher_name = u"AES256"
try:
this_cipher = CIPHER_MAPPING[self.cipher_name]()
except KeyError:
raise AnsibleError(u"{0} cipher could not be found".format(self.cipher_name))
# encrypt data
if vault_id:
display.vvvvv('Encrypting with vault_id "%s" and vault secret %s' % (vault_id, secret))
else:
display.vvvvv('Encrypting without a vault_id using vault secret %s' % secret)
b_ciphertext = this_cipher.encrypt(b_plaintext, secret)
# format the data for output to the file
b_vaulttext = format_vaulttext_envelope(b_ciphertext,
self.cipher_name,
vault_id=vault_id)
return b_vaulttext
def decrypt(self, vaulttext, filename=None):
'''Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
:kwarg filename: a filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted.
:returns: a byte string containing the decrypted data and the vault-id that was used
'''
plaintext, vault_id, vault_secret = self.decrypt_and_get_vault_id(vaulttext, filename=filename)
return plaintext
def decrypt_and_get_vault_id(self, vaulttext, filename=None):
"""Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
:kwarg filename: a filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted.
:returns: a byte string containing the decrypted data and the vault-id vault-secret that was used
"""
b_vaulttext = to_bytes(vaulttext, errors='strict', encoding='utf-8')
if self.secrets is None:
raise AnsibleVaultError("A vault password must be specified to decrypt data")
if not is_encrypted(b_vaulttext):
msg = "input is not vault encrypted data"
if filename:
msg += "%s is not a vault encrypted file" % to_native(filename)
raise AnsibleError(msg)
b_vaulttext, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext,
filename=filename)
# create the cipher object, note that the cipher used for decrypt can
# be different than the cipher used for encrypt
if cipher_name in CIPHER_WHITELIST:
this_cipher = CIPHER_MAPPING[cipher_name]()
else:
raise AnsibleError("{0} cipher could not be found".format(cipher_name))
b_plaintext = None
if not self.secrets:
raise AnsibleVaultError('Attempting to decrypt but no vault secrets found')
# WARNING: Currently, the vault id is not required to match the vault id in the vault blob to
# decrypt a vault properly. The vault id in the vault blob is not part of the encrypted
# or signed vault payload. There is no cryptographic checking/verification/validation of the
# vault blobs vault id. It can be tampered with and changed. The vault id is just a nick
# name to use to pick the best secret and provide some ux/ui info.
# iterate over all the applicable secrets (all of them by default) until one works...
# if we specify a vault_id, only the corresponding vault secret is checked and
# we check it first.
vault_id_matchers = []
vault_id_used = None
vault_secret_used = None
if vault_id:
display.vvvvv('Found a vault_id (%s) in the vaulttext' % (vault_id))
vault_id_matchers.append(vault_id)
_matches = match_secrets(self.secrets, vault_id_matchers)
if _matches:
display.vvvvv('We have a secret associated with vault id (%s), will try to use to decrypt %s' % (vault_id, to_text(filename)))
else:
display.vvvvv('Found a vault_id (%s) in the vault text, but we do not have a associated secret (--vault-id)' % (vault_id))
# Not adding the other secrets to vault_secret_ids enforces a match between the vault_id from the vault_text and
# the known vault secrets.
if not C.DEFAULT_VAULT_ID_MATCH:
# Add all of the known vault_ids as candidates for decrypting a vault.
vault_id_matchers.extend([_vault_id for _vault_id, _dummy in self.secrets if _vault_id != vault_id])
matched_secrets = match_secrets(self.secrets, vault_id_matchers)
# for vault_secret_id in vault_secret_ids:
for vault_secret_id, vault_secret in matched_secrets:
display.vvvvv('Trying to use vault secret=(%s) id=%s to decrypt %s' % (vault_secret, vault_secret_id, to_text(filename)))
try:
# secret = self.secrets[vault_secret_id]
display.vvvv('Trying secret %s for vault_id=%s' % (vault_secret, vault_secret_id))
b_plaintext = this_cipher.decrypt(b_vaulttext, vault_secret)
if b_plaintext is not None:
vault_id_used = vault_secret_id
vault_secret_used = vault_secret
file_slug = ''
if filename:
file_slug = ' of "%s"' % filename
display.vvvvv('Decrypt%s successful with secret=%s and vault_id=%s' % (file_slug, vault_secret, vault_secret_id))
break
except AnsibleVaultFormatError as exc:
msg = "There was a vault format error"
if filename:
msg += ' in %s' % (to_text(filename))
msg += ': %s' % exc
display.warning(msg)
raise
except AnsibleError as e:
display.vvvv('Tried to use the vault secret (%s) to decrypt (%s) but it failed. Error: %s' %
(vault_secret_id, to_text(filename), e))
continue
else:
msg = "Decryption failed (no vault secrets were found that could decrypt)"
if filename:
msg += " on %s" % to_native(filename)
raise AnsibleVaultError(msg)
if b_plaintext is None:
msg = "Decryption failed"
if filename:
msg += " on %s" % to_native(filename)
raise AnsibleError(msg)
return b_plaintext, vault_id_used, vault_secret_used
class VaultEditor:
def __init__(self, vault=None):
# TODO: it may be more useful to just make VaultSecrets and index of VaultLib objects...
self.vault = vault or VaultLib()
# TODO: mv shred file stuff to it's own class
def _shred_file_custom(self, tmp_path):
""""Destroy a file, when shred (core-utils) is not available
Unix `shred' destroys files "so that they can be recovered only with great difficulty with
specialised hardware, if at all". It is based on the method from the paper
"Secure Deletion of Data from Magnetic and Solid-State Memory",
Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
We do not go to that length to re-implement shred in Python; instead, overwriting with a block
of random data should suffice.
See https://github.com/ansible/ansible/pull/13700 .
"""
file_len = os.path.getsize(tmp_path)
if file_len > 0: # avoid work when file was empty
max_chunk_len = min(1024 * 1024 * 2, file_len)
passes = 3
with open(tmp_path, "wb") as fh:
for _ in range(passes):
fh.seek(0, 0)
# get a random chunk of data, each pass with other length
chunk_len = random.randint(max_chunk_len // 2, max_chunk_len)
data = os.urandom(chunk_len)
for _ in range(0, file_len // chunk_len):
fh.write(data)
fh.write(data[:file_len % chunk_len])
# FIXME remove this assert once we have unittests to check its accuracy
if fh.tell() != file_len:
raise AnsibleAssertionError()
os.fsync(fh)
def _shred_file(self, tmp_path):
"""Securely destroy a decrypted file
Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
it is a non-issue.
Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
a custom shredding method.
"""
if not os.path.isfile(tmp_path):
# file is already gone
return
try:
r = subprocess.call(['shred', tmp_path])
except (OSError, ValueError):
# shred is not available on this system, or some other error occurred.
# ValueError caught because OS X El Capitan is raising an
# exception big enough to hit a limit in python2-2.7.11 and below.
# Symptom is ValueError: insecure pickle when shred is not
# installed there.
r = 1
if r != 0:
# we could not successfully execute unix shred; therefore, do custom shred.
self._shred_file_custom(tmp_path)
os.remove(tmp_path)
def _edit_file_helper(self, filename, secret,
existing_data=None, force_save=False, vault_id=None):
# Create a tempfile
root, ext = os.path.splitext(os.path.realpath(filename))
fd, tmp_path = tempfile.mkstemp(suffix=ext)
os.close(fd)
try:
if existing_data:
self.write_data(existing_data, tmp_path, shred=False)
# drop the user into an editor on the tmp file
subprocess.call(self._editor_shell_command(tmp_path))
except:
# whatever happens, destroy the decrypted file
self._shred_file(tmp_path)
raise
b_tmpdata = self.read_data(tmp_path)
# Do nothing if the content has not changed
if existing_data == b_tmpdata and not force_save:
self._shred_file(tmp_path)
return
# encrypt new data and write out to tmp
# An existing vaultfile will always be UTF-8,
# so decode to unicode here
b_ciphertext = self.vault.encrypt(b_tmpdata, secret, vault_id=vault_id)
self.write_data(b_ciphertext, tmp_path)
# shuffle tmp file into place
self.shuffle_files(tmp_path, filename)
display.vvvvv('Saved edited file "%s" encrypted using %s and vault id "%s"' % (filename, secret, vault_id))
def _real_path(self, filename):
# '-' is special to VaultEditor, dont expand it.
if filename == '-':
return filename
real_path = os.path.realpath(filename)
return real_path
def encrypt_bytes(self, b_plaintext, secret, vault_id=None):
b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
return b_ciphertext
def encrypt_file(self, filename, secret, vault_id=None, output_file=None):
# A file to be encrypted into a vaultfile could be any encoding
# so treat the contents as a byte string.
# follow the symlink
filename = self._real_path(filename)
b_plaintext = self.read_data(filename)
b_ciphertext = self.vault.encrypt(b_plaintext, secret, vault_id=vault_id)
self.write_data(b_ciphertext, output_file or filename)
def decrypt_file(self, filename, output_file=None):
# follow the symlink
filename = self._real_path(filename)
ciphertext = self.read_data(filename)
try:
plaintext = self.vault.decrypt(ciphertext, filename=filename)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_bytes(e), to_bytes(filename)))
self.write_data(plaintext, output_file or filename, shred=False)
def create_file(self, filename, secret, vault_id=None):
""" create a new encrypted file """
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
display.warning("%s does not exist, creating..." % dirname)
makedirs_safe(dirname)
# FIXME: If we can raise an error here, we can probably just make it
# behave like edit instead.
if os.path.isfile(filename):
raise AnsibleError("%s exists, please use 'edit' instead" % filename)
self._edit_file_helper(filename, secret, vault_id=vault_id)
def edit_file(self, filename):
vault_id_used = None
vault_secret_used = None
# follow the symlink
filename = self._real_path(filename)
b_vaulttext = self.read_data(filename)
# vault or yaml files are always utf8
vaulttext = to_text(b_vaulttext)
try:
# vaulttext gets converted back to bytes, but alas
# TODO: return the vault_id that worked?
plaintext, vault_id_used, vault_secret_used = self.vault.decrypt_and_get_vault_id(vaulttext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_bytes(e), to_bytes(filename)))
# Figure out the vault id from the file, to select the right secret to re-encrypt it
# (duplicates parts of decrypt, but alas...)
dummy, dummy, cipher_name, vault_id = parse_vaulttext_envelope(b_vaulttext,
filename=filename)
# vault id here may not be the vault id actually used for decrypting
# as when the edited file has no vault-id but is decrypted by non-default id in secrets
# (vault_id=default, while a different vault-id decrypted)
# Keep the same vault-id (and version) as in the header
if cipher_name not in CIPHER_WRITE_WHITELIST:
# we want to get rid of files encrypted with the AES cipher
self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext,
force_save=True, vault_id=vault_id)
else:
self._edit_file_helper(filename, vault_secret_used, existing_data=plaintext,
force_save=False, vault_id=vault_id)
def plaintext(self, filename):
b_vaulttext = self.read_data(filename)
vaulttext = to_text(b_vaulttext)
try:
plaintext = self.vault.decrypt(vaulttext, filename=filename)
return plaintext
except AnsibleError as e:
raise AnsibleVaultError("%s for %s" % (to_bytes(e), to_bytes(filename)))
# FIXME/TODO: make this use VaultSecret
def rekey_file(self, filename, new_vault_secret, new_vault_id=None):
# follow the symlink
filename = self._real_path(filename)
prev = os.stat(filename)
b_vaulttext = self.read_data(filename)
vaulttext = to_text(b_vaulttext)
display.vvvvv('Rekeying file "%s" to with new vault-id "%s" and vault secret %s' %
(filename, new_vault_id, new_vault_secret))
try:
plaintext, vault_id_used, _dummy = self.vault.decrypt_and_get_vault_id(vaulttext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_bytes(e), to_bytes(filename)))
# This is more or less an assert, see #18247
if new_vault_secret is None:
raise AnsibleError('The value for the new_password to rekey %s with is not valid' % filename)
# FIXME: VaultContext...? could rekey to a different vault_id in the same VaultSecrets
# Need a new VaultLib because the new vault data can be a different
# vault lib format or cipher (for ex, when we migrate 1.0 style vault data to
# 1.1 style data we change the version and the cipher). This is where a VaultContext might help
# the new vault will only be used for encrypting, so it doesn't need the vault secrets
# (we will pass one in directly to encrypt)
new_vault = VaultLib(secrets={})
b_new_vaulttext = new_vault.encrypt(plaintext, new_vault_secret, vault_id=new_vault_id)
self.write_data(b_new_vaulttext, filename)
# preserve permissions
os.chmod(filename, prev.st_mode)
os.chown(filename, prev.st_uid, prev.st_gid)
display.vvvvv('Rekeyed file "%s" (decrypted with vault id "%s") was encrypted with new vault-id "%s" and vault secret %s' %
(filename, vault_id_used, new_vault_id, new_vault_secret))
def read_data(self, filename):
try:
if filename == '-':
data = sys.stdin.read()
else:
with open(filename, "rb") as fh:
data = fh.read()
except Exception as e:
raise AnsibleError(str(e))
return data
# TODO: add docstrings for arg types since this code is picky about that
def write_data(self, data, filename, shred=True):
"""Write the data bytes to given path
This is used to write a byte string to a file or stdout. It is used for
writing the results of vault encryption or decryption. It is used for
saving the ciphertext after encryption and it is also used for saving the
plaintext after decrypting a vault. The type of the 'data' arg should be bytes,
since in the plaintext case, the original contents can be of any text encoding
or arbitrary binary data.
When used to write the result of vault encryption, the val of the 'data' arg
should be a utf-8 encoded byte string and not a text typ and not a text type..
When used to write the result of vault decryption, the val of the 'data' arg
should be a byte string and not a text type.
:arg data: the byte string (bytes) data
:arg filename: filename to save 'data' to.
:arg shred: if shred==True, make sure that the original data is first shredded so that is cannot be recovered.
:returns: None
"""
# FIXME: do we need this now? data_bytes should always be a utf-8 byte string
b_file_data = to_bytes(data, errors='strict')
# get a ref to either sys.stdout.buffer for py3 or plain old sys.stdout for py2
# We need sys.stdout.buffer on py3 so we can write bytes to it since the plaintext
# of the vaulted object could be anything/binary/etc
output = getattr(sys.stdout, 'buffer', sys.stdout)
if filename == '-':
output.write(b_file_data)
else:
if os.path.isfile(filename):
if shred:
self._shred_file(filename)
else:
os.remove(filename)
with open(filename, "wb") as fh:
fh.write(b_file_data)
def shuffle_files(self, src, dest):
prev = None
# overwrite dest with src
if os.path.isfile(dest):
prev = os.stat(dest)
# old file 'dest' was encrypted, no need to _shred_file
os.remove(dest)
shutil.move(src, dest)
# reset permissions if needed
if prev is not None:
# TODO: selinux, ACLs, xattr?
os.chmod(dest, prev.st_mode)
os.chown(dest, prev.st_uid, prev.st_gid)
def _editor_shell_command(self, filename):
env_editor = os.environ.get('EDITOR', 'vi')
editor = shlex.split(env_editor)
editor.append(filename)
return editor
########################################
# CIPHERS #
########################################
class VaultAES:
# this version has been obsoleted by the VaultAES256 class
# which uses encrypt-then-mac (fixing order) and also improving the KDF used
# code remains for upgrade purposes only
# http://stackoverflow.com/a/16761459
# Note: strings in this class should be byte strings by default.
def __init__(self):
if not HAS_CRYPTOGRAPHY and not HAS_PYCRYPTO:
raise AnsibleError(NEED_CRYPTO_LIBRARY)
@staticmethod
def _aes_derive_key_and_iv(b_password, b_salt, key_length, iv_length):
""" Create a key and an initialization vector """
b_d = b_di = b''
while len(b_d) < key_length + iv_length:
b_text = b''.join([b_di, b_password, b_salt])
b_di = to_bytes(md5(b_text).digest(), errors='strict')
b_d += b_di
b_key = b_d[:key_length]
b_iv = b_d[key_length:key_length + iv_length]
return b_key, b_iv
@staticmethod
def encrypt(b_plaintext, b_password, key_length=32):
""" Read plaintext data from in_file and write encrypted to out_file """
raise AnsibleError("Encryption disabled for deprecated VaultAES class")
@staticmethod
def _parse_plaintext_envelope(b_envelope):
# split out sha and verify decryption
b_split_data = b_envelope.split(b"\n", 1)
b_this_sha = b_split_data[0]
b_plaintext = b_split_data[1]
b_test_sha = to_bytes(sha256(b_plaintext).hexdigest())
return b_plaintext, b_this_sha, b_test_sha
@classmethod
def _decrypt_cryptography(cls, b_salt, b_ciphertext, b_password, key_length):
bs = algorithms.AES.block_size // 8
b_key, b_iv = cls._aes_derive_key_and_iv(b_password, b_salt, key_length, bs)
cipher = C_Cipher(algorithms.AES(b_key), modes.CBC(b_iv), CRYPTOGRAPHY_BACKEND).decryptor()
unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
try:
b_plaintext_envelope = unpadder.update(
cipher.update(b_ciphertext) + cipher.finalize()
) + unpadder.finalize()
except ValueError:
# In VaultAES, ValueError: invalid padding bytes can mean bad
# password was given
raise AnsibleError("Decryption failed")
b_plaintext, b_this_sha, b_test_sha = cls._parse_plaintext_envelope(b_plaintext_envelope)
if b_this_sha != b_test_sha:
raise AnsibleError("Decryption failed")
return b_plaintext
@classmethod
def _decrypt_pycrypto(cls, b_salt, b_ciphertext, b_password, key_length):
in_file = BytesIO(b_ciphertext)
in_file.seek(0)
out_file = BytesIO()
bs = AES_pycrypto.block_size
b_key, b_iv = cls._aes_derive_key_and_iv(b_password, b_salt, key_length, bs)
cipher = AES_pycrypto.new(b_key, AES_pycrypto.MODE_CBC, b_iv)
b_next_chunk = b''
finished = False
while not finished:
b_chunk, b_next_chunk = b_next_chunk, cipher.decrypt(in_file.read(1024 * bs))
if len(b_next_chunk) == 0:
if PY3:
padding_length = b_chunk[-1]
else:
padding_length = ord(b_chunk[-1])
b_chunk = b_chunk[:-padding_length]
finished = True
out_file.write(b_chunk)
out_file.flush()
# reset the stream pointer to the beginning
out_file.seek(0)
b_plaintext_envelope = out_file.read()
out_file.close()
b_plaintext, b_this_sha, b_test_sha = cls._parse_plaintext_envelope(b_plaintext_envelope)
if b_this_sha != b_test_sha:
raise AnsibleError("Decryption failed")
return b_plaintext
@classmethod
def decrypt(cls, b_vaulttext, secret, key_length=32):
""" Decrypt the given data and return it
:arg b_data: A byte string containing the encrypted data
:arg b_password: A byte string containing the encryption password
:arg key_length: Length of the key
:returns: A byte string containing the decrypted data
"""
display.deprecated(u'The VaultAES format is insecure and has been '
'deprecated since Ansible-1.5. Use vault rekey FILENAME to '
'switch to the newer VaultAES256 format', version='2.3')
# http://stackoverflow.com/a/14989032
b_vaultdata = _unhexlify(b_vaulttext)
b_salt = b_vaultdata[len(b'Salted__'):16]
b_ciphertext = b_vaultdata[16:]
b_password = secret.bytes
if HAS_CRYPTOGRAPHY:
b_plaintext = cls._decrypt_cryptography(b_salt, b_ciphertext, b_password, key_length)
elif HAS_PYCRYPTO:
b_plaintext = cls._decrypt_pycrypto(b_salt, b_ciphertext, b_password, key_length)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + ' (Late detection)')
return b_plaintext
class VaultAES256:
"""
Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
Keys are derived using PBKDF2
"""
# http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
# Note: strings in this class should be byte strings by default.
def __init__(self):
if not HAS_CRYPTOGRAPHY and not HAS_PYCRYPTO:
raise AnsibleError(NEED_CRYPTO_LIBRARY)
@staticmethod
def _create_key_cryptography(b_password, b_salt, key_length, iv_length):
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=2 * key_length + iv_length,
salt=b_salt,
iterations=10000,
backend=CRYPTOGRAPHY_BACKEND)
b_derivedkey = kdf.derive(b_password)
return b_derivedkey
@staticmethod
def _pbkdf2_prf(p, s):
hash_function = SHA256_pycrypto
return HMAC_pycrypto.new(p, s, hash_function).digest()
@classmethod
def _create_key_pycrypto(cls, b_password, b_salt, key_length, iv_length):
# make two keys and one iv
b_derivedkey = PBKDF2_pycrypto(b_password, b_salt, dkLen=(2 * key_length) + iv_length,
count=10000, prf=cls._pbkdf2_prf)
return b_derivedkey
@classmethod
def _gen_key_initctr(cls, b_password, b_salt):
# 16 for AES 128, 32 for AES256
key_length = 32
if HAS_CRYPTOGRAPHY:
# AES is a 128-bit block cipher, so IVs and counter nonces are 16 bytes
iv_length = algorithms.AES.block_size // 8
b_derivedkey = cls._create_key_cryptography(b_password, b_salt, key_length, iv_length)
b_iv = b_derivedkey[(key_length * 2):(key_length * 2) + iv_length]
elif HAS_PYCRYPTO:
# match the size used for counter.new to avoid extra work
iv_length = 16
b_derivedkey = cls._create_key_pycrypto(b_password, b_salt, key_length, iv_length)
b_iv = hexlify(b_derivedkey[(key_length * 2):(key_length * 2) + iv_length])
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in initctr)')
b_key1 = b_derivedkey[:key_length]
b_key2 = b_derivedkey[key_length:(key_length * 2)]
return b_key1, b_key2, b_iv
@staticmethod
def _encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv):
cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
encryptor = cipher.encryptor()
padder = padding.PKCS7(algorithms.AES.block_size).padder()
b_ciphertext = encryptor.update(padder.update(b_plaintext) + padder.finalize())
b_ciphertext += encryptor.finalize()
# COMBINE SALT, DIGEST AND DATA
hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
hmac.update(b_ciphertext)
b_hmac = hmac.finalize()
return to_bytes(hexlify(b_hmac), errors='surrogate_or_strict'), hexlify(b_ciphertext)
@staticmethod
def _encrypt_pycrypto(b_plaintext, b_key1, b_key2, b_iv):
# PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
bs = AES_pycrypto.block_size
padding_length = (bs - len(b_plaintext) % bs) or bs
b_plaintext += to_bytes(padding_length * chr(padding_length), encoding='ascii', errors='strict')
# COUNTER.new PARAMETERS
# 1) nbits (integer) - Length of the counter, in bits.
# 2) initial_value (integer) - initial value of the counter. "iv" from _gen_key_initctr
ctr = Counter_pycrypto.new(128, initial_value=int(b_iv, 16))
# AES.new PARAMETERS
# 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from _gen_key_initctr
# 2) MODE_CTR, is the recommended mode
# 3) counter=<CounterObject>
cipher = AES_pycrypto.new(b_key1, AES_pycrypto.MODE_CTR, counter=ctr)
# ENCRYPT PADDED DATA
b_ciphertext = cipher.encrypt(b_plaintext)
# COMBINE SALT, DIGEST AND DATA
hmac = HMAC_pycrypto.new(b_key2, b_ciphertext, SHA256_pycrypto)
return to_bytes(hmac.hexdigest(), errors='surrogate_or_strict'), hexlify(b_ciphertext)
@classmethod
def encrypt(cls, b_plaintext, secret):
if secret is None:
raise AnsibleVaultError('The secret passed to encrypt() was None')
b_salt = os.urandom(32)
b_password = secret.bytes
b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
if HAS_CRYPTOGRAPHY:
b_hmac, b_ciphertext = cls._encrypt_cryptography(b_plaintext, b_key1, b_key2, b_iv)
elif HAS_PYCRYPTO:
b_hmac, b_ciphertext = cls._encrypt_pycrypto(b_plaintext, b_key1, b_key2, b_iv)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in encrypt)')
b_vaulttext = b'\n'.join([hexlify(b_salt), b_hmac, b_ciphertext])
# Unnecessary but getting rid of it is a backwards incompatible vault
# format change
b_vaulttext = hexlify(b_vaulttext)
return b_vaulttext
@classmethod
def _decrypt_cryptography(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
# b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt)
# EXIT EARLY IF DIGEST DOESN'T MATCH
hmac = HMAC(b_key2, hashes.SHA256(), CRYPTOGRAPHY_BACKEND)
hmac.update(b_ciphertext)
try:
hmac.verify(_unhexlify(b_crypted_hmac))
except InvalidSignature as e:
raise AnsibleVaultError('HMAC verification failed: %s' % e)
cipher = C_Cipher(algorithms.AES(b_key1), modes.CTR(b_iv), CRYPTOGRAPHY_BACKEND)
decryptor = cipher.decryptor()
unpadder = padding.PKCS7(128).unpadder()
b_plaintext = unpadder.update(
decryptor.update(b_ciphertext) + decryptor.finalize()
) + unpadder.finalize()
return b_plaintext
@staticmethod
def _is_equal(b_a, b_b):
"""
Comparing 2 byte arrrays in constant time
to avoid timing attacks.
It would be nice if there was a library for this but
hey.
"""
if not (isinstance(b_a, binary_type) and isinstance(b_b, binary_type)):
raise TypeError('_is_equal can only be used to compare two byte strings')
# http://codahale.com/a-lesson-in-timing-attacks/
if len(b_a) != len(b_b):
return False
result = 0
for b_x, b_y in zip(b_a, b_b):
if PY3:
result |= b_x ^ b_y
else:
result |= ord(b_x) ^ ord(b_y)
return result == 0
@classmethod
def _decrypt_pycrypto(cls, b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv):
# EXIT EARLY IF DIGEST DOESN'T MATCH
hmac_decrypt = HMAC_pycrypto.new(b_key2, b_ciphertext, SHA256_pycrypto)
if not cls._is_equal(b_crypted_hmac, to_bytes(hmac_decrypt.hexdigest())):
return None
# SET THE COUNTER AND THE CIPHER
ctr = Counter_pycrypto.new(128, initial_value=int(b_iv, 16))
cipher = AES_pycrypto.new(b_key1, AES_pycrypto.MODE_CTR, counter=ctr)
# DECRYPT PADDED DATA
b_plaintext = cipher.decrypt(b_ciphertext)
# UNPAD DATA
if PY3:
padding_length = b_plaintext[-1]
else:
padding_length = ord(b_plaintext[-1])
b_plaintext = b_plaintext[:-padding_length]
return b_plaintext
@classmethod
def decrypt(cls, b_vaulttext, secret):
b_ciphertext, b_salt, b_crypted_hmac = parse_vaulttext(b_vaulttext)
# TODO: would be nice if a VaultSecret could be passed directly to _decrypt_*
# (move _gen_key_initctr() to a AES256 VaultSecret or VaultContext impl?)
# though, likely needs to be python cryptography specific impl that basically
# creates a Cipher() with b_key1, a Mode.CTR() with b_iv, and a HMAC() with sign key b_key2
b_password = secret.bytes
b_key1, b_key2, b_iv = cls._gen_key_initctr(b_password, b_salt)
if HAS_CRYPTOGRAPHY:
b_plaintext = cls._decrypt_cryptography(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
elif HAS_PYCRYPTO:
b_plaintext = cls._decrypt_pycrypto(b_ciphertext, b_crypted_hmac, b_key1, b_key2, b_iv)
else:
raise AnsibleError(NEED_CRYPTO_LIBRARY + '(Detected in decrypt)')
return b_plaintext
# Keys could be made bytes later if the code that gets the data is more
# naturally byte-oriented
CIPHER_MAPPING = {
u'AES': VaultAES,
u'AES256': VaultAES256,
}
|
gpl-3.0
|
markhamstra/spark
|
examples/src/main/python/ml/train_validation_split.py
|
56
|
2841
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This example demonstrates applying TrainValidationSplit to split data
and preform model selection.
Run with:
bin/spark-submit examples/src/main/python/ml/train_validation_split.py
"""
# $example on$
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.regression import LinearRegression
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("TrainValidationSplit")\
.getOrCreate()
# $example on$
# Prepare training and test data.
data = spark.read.format("libsvm")\
.load("data/mllib/sample_linear_regression_data.txt")
train, test = data.randomSplit([0.9, 0.1], seed=12345)
lr = LinearRegression(maxIter=10)
# We use a ParamGridBuilder to construct a grid of parameters to search over.
# TrainValidationSplit will try all combinations of values and determine best model using
# the evaluator.
paramGrid = ParamGridBuilder()\
.addGrid(lr.regParam, [0.1, 0.01]) \
.addGrid(lr.fitIntercept, [False, True])\
.addGrid(lr.elasticNetParam, [0.0, 0.5, 1.0])\
.build()
# In this case the estimator is simply the linear regression.
# A TrainValidationSplit requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.
tvs = TrainValidationSplit(estimator=lr,
estimatorParamMaps=paramGrid,
evaluator=RegressionEvaluator(),
# 80% of the data will be used for training, 20% for validation.
trainRatio=0.8)
# Run TrainValidationSplit, and choose the best set of parameters.
model = tvs.fit(train)
# Make predictions on test data. model is the model with combination of parameters
# that performed best.
model.transform(test)\
.select("features", "label", "prediction")\
.show()
# $example off$
spark.stop()
|
apache-2.0
|
synth3tk/the-blue-alliance
|
models/award.py
|
4
|
3235
|
import json
from google.appengine.ext import ndb
from models.event import Event
from models.team import Team
class Award(ndb.Model):
"""
Awards represent FIRST Robotics Competition awards given out at an event.
key_name is formatted as: <event_key_name>_<award_type_enum>
If multiple recipients win the same award at the same event (such as
Winner or Dean's List), they show up under the repeated properties.
"""
name_str = ndb.StringProperty(required=True, indexed=False) # award name that shows up on USFIRST Pages. May vary for the same award type.
award_type_enum = ndb.IntegerProperty(required=True)
year = ndb.IntegerProperty(required=True) # year the award was awarded
event = ndb.KeyProperty(kind=Event, required=True) # event at which the award was awarded
event_type_enum = ndb.IntegerProperty(required=True) # needed to query for awards from events of a certain event type
team_list = ndb.KeyProperty(kind=Team, repeated=True) # key of team(s) that won the award (if applicable)
recipient_json_list = ndb.StringProperty(repeated=True) # JSON dict(s) with team_number and/or awardee
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
def __init__(self, *args, **kw):
# store set of affected references referenced keys for cache clearing
# keys must be model properties
self._affected_references = {
'event': set(),
'team_list': set(),
'year': set(),
}
self._recipient_list = None
self._recipient_dict = None
self._recipient_list_json = None
super(Award, self).__init__(*args, **kw)
@property
def recipient_dict(self):
"""
Uses recipient_list to add a recipient_dict property,
where the key is the team_number and the value is a list of awardees.
"""
if self._recipient_dict is None:
self._recipient_dict = {}
for recipient in self.recipient_list:
team_number = recipient['team_number']
awardee = recipient['awardee']
if team_number in self._recipient_dict:
self._recipient_dict[team_number].append(awardee)
else:
self._recipient_dict[team_number] = [awardee]
return self._recipient_dict
@property
def recipient_list(self):
if self._recipient_list is None:
self._recipient_list = []
for recipient_json in self.recipient_json_list:
self._recipient_list.append(json.loads(recipient_json))
return self._recipient_list
@property
def recipient_list_json(self):
"""
A JSON version of the recipient_list
"""
if self._recipient_list_json is None:
self._recipient_list_json = json.dumps(self.recipient_list)
return self._recipient_list_json
@property
def key_name(self):
return self.render_key_name(self.event.id(), self.award_type_enum)
@classmethod
def render_key_name(self, event_key_name, award_type_enum):
return '{}_{}'.format(event_key_name, award_type_enum)
|
mit
|
ticosax/django
|
django/core/cache/backends/locmem.py
|
586
|
4287
|
"Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
|
bsd-3-clause
|
wooga/airflow
|
airflow/migrations/versions/4446e08588_dagrun_start_end.py
|
7
|
1372
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""dagrun start end
Revision ID: 4446e08588
Revises: 561833c1c74b
Create Date: 2015-12-10 11:26:18.439223
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4446e08588'
down_revision = '561833c1c74b'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('dag_run', sa.Column('end_date', sa.DateTime(), nullable=True))
op.add_column('dag_run', sa.Column('start_date', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('dag_run', 'start_date')
op.drop_column('dag_run', 'end_date')
|
apache-2.0
|
nburn42/tensorflow
|
tensorflow/python/ops/io_ops.py
|
14
|
16185
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Inputs and Readers.
See the @{$python/io_ops} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_io_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_io_ops import *
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# pylint: disable=protected-access
def _save(filename, tensor_names, tensors, tensor_slices=None, name="save"):
"""Save a list of tensors to a file with given names.
Example usage without slice info:
Save("/foo/bar", ["w", "b"], [w, b])
Example usage with slices:
Save("/foo/bar", ["w", "w"], [slice0, slice1],
tensor_slices=["4 10 0,2:-", "4 10 2,2:-"])
Args:
filename: the file name of the sstable.
tensor_names: a list of strings.
tensors: the list of tensors to be saved.
tensor_slices: Optional list of strings to specify the shape and slices of
a larger virtual tensor that each tensor is a part of. If not specified
each tensor is saved as a full slice.
name: string. Optional name for the op.
Requires:
The length of tensors should match the size of tensor_names and of
tensor_slices.
Returns:
An Operation that saves the tensors.
"""
if tensor_slices is None:
return gen_io_ops.save(filename, tensor_names, tensors, name=name)
else:
return gen_io_ops.save_slices(filename, tensor_names, tensor_slices,
tensors, name=name)
def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type,
name="restore_slice", preferred_shard=-1):
"""Restore a tensor slice from a set of files with a given pattern.
Example usage:
RestoreSlice("/foo/bar-?????-of-?????", "w", "10 10 0,2:-", DT_FLOAT)
Args:
file_pattern: the file pattern used to match a set of checkpoint files.
tensor_name: the name of the tensor to restore.
shape_and_slice: the shape-and-slice spec of the slice.
tensor_type: the type of the tensor to restore.
name: string. Optional name for the op.
preferred_shard: Int. Optional shard to open first in the checkpoint file.
Returns:
A tensor of type "tensor_type".
"""
base_type = dtypes.as_dtype(tensor_type).base_dtype
return gen_io_ops.restore_slice(
file_pattern, tensor_name, shape_and_slice, base_type,
preferred_shard, name=name)
@tf_export("ReaderBase")
class ReaderBase(object):
"""Base class for different Reader types, that produce a record every step.
Conceptually, Readers convert string 'work units' into records (key,
value pairs). Typically the 'work units' are filenames and the
records are extracted from the contents of those files. We want a
single record produced per step, but a work unit can correspond to
many records.
Therefore we introduce some decoupling using a queue. The queue
contains the work units and the Reader dequeues from the queue when
it is asked to produce a record (via Read()) but it has finished the
last work unit.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
def __init__(self, reader_ref, supports_serialize=False):
"""Creates a new ReaderBase.
Args:
reader_ref: The operation that implements the reader.
supports_serialize: True if the reader implementation can
serialize its state.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError(
"Readers are not supported when eager execution is enabled. "
"Instead, please use tf.data to get data into your model.")
self._reader_ref = reader_ref
self._supports_serialize = supports_serialize
@property
def reader_ref(self):
"""Op that implements the reader."""
return self._reader_ref
def read(self, queue, name=None):
"""Returns the next record (key, value) pair produced by a reader.
Will dequeue a work unit from queue if necessary (e.g. when the
Reader needs to start reading from a new file since it has
finished with the previous file).
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (key, value).
key: A string scalar Tensor.
value: A string scalar Tensor.
"""
if isinstance(queue, ops.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_read_v2(self._reader_ref, queue_ref, name=name)
else:
# For compatibility with pre-resource queues, create a ref(string) tensor
# which can be looked up as the same queue by a resource manager.
old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)
return gen_io_ops.reader_read(self._reader_ref, old_queue_op, name=name)
def read_up_to(self, queue, num_records, # pylint: disable=invalid-name
name=None):
"""Returns up to num_records (key, value) pairs produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values).
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
if isinstance(queue, ops.Tensor):
queue_ref = queue
else:
queue_ref = queue.queue_ref
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_read_up_to_v2(self._reader_ref,
queue_ref,
num_records,
name=name)
else:
# For compatibility with pre-resource queues, create a ref(string) tensor
# which can be looked up as the same queue by a resource manager.
old_queue_op = gen_data_flow_ops.fake_queue(queue_ref)
return gen_io_ops.reader_read_up_to(self._reader_ref,
old_queue_op,
num_records,
name=name)
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
This is the same as the number of Read executions that have
succeeded.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_num_records_produced_v2(self._reader_ref,
name=name)
else:
return gen_io_ops.reader_num_records_produced(self._reader_ref,
name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_num_work_units_completed_v2(self._reader_ref,
name=name)
else:
return gen_io_ops.reader_num_work_units_completed(self._reader_ref,
name=name)
def serialize_state(self, name=None):
"""Produce a string tensor that encodes the state of a reader.
Not all Readers support being serialized, so this can produce an
Unimplemented error.
Args:
name: A name for the operation (optional).
Returns:
A string Tensor.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_serialize_state_v2(self._reader_ref, name=name)
else:
return gen_io_ops.reader_serialize_state(self._reader_ref, name=name)
def restore_state(self, state, name=None):
"""Restore a reader to a previously saved state.
Not all Readers support being restored, so this can produce an
Unimplemented error.
Args:
state: A string Tensor.
Result of a SerializeState of a Reader with matching type.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_restore_state_v2(
self._reader_ref, state, name=name)
else:
return gen_io_ops.reader_restore_state(self._reader_ref, state, name=name)
@property
def supports_serialize(self):
"""Whether the Reader implementation can serialize its state."""
return self._supports_serialize
def reset(self, name=None):
"""Restore a reader to its initial clean state.
Args:
name: A name for the operation (optional).
Returns:
The created Operation.
"""
if self._reader_ref.dtype == dtypes.resource:
return gen_io_ops.reader_reset_v2(self._reader_ref, name=name)
else:
return gen_io_ops.reader_reset(self._reader_ref, name=name)
ops.NotDifferentiable("ReaderRead")
ops.NotDifferentiable("ReaderReadUpTo")
ops.NotDifferentiable("ReaderNumRecordsProduced")
ops.NotDifferentiable("ReaderNumWorkUnitsCompleted")
ops.NotDifferentiable("ReaderSerializeState")
ops.NotDifferentiable("ReaderRestoreState")
ops.NotDifferentiable("ReaderReset")
@tf_export("WholeFileReader")
class WholeFileReader(ReaderBase):
"""A Reader that outputs the entire contents of a file as a value.
To use, enqueue filenames in a Queue. The output of Read will
be a filename (key) and the contents of that file (value).
See ReaderBase for supported methods.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
def __init__(self, name=None):
"""Create a WholeFileReader.
Args:
name: A name for the operation (optional).
"""
rr = gen_io_ops.whole_file_reader_v2(name=name)
super(WholeFileReader, self).__init__(rr, supports_serialize=True)
ops.NotDifferentiable("WholeFileReader")
@tf_export("TextLineReader")
class TextLineReader(ReaderBase):
"""A Reader that outputs the lines of a file delimited by newlines.
Newlines are stripped from the output.
See ReaderBase for supported methods.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self, skip_header_lines=None, name=None):
"""Create a TextLineReader.
Args:
skip_header_lines: An optional int. Defaults to 0. Number of lines
to skip from the beginning of every file.
name: A name for the operation (optional).
"""
rr = gen_io_ops.text_line_reader_v2(skip_header_lines=skip_header_lines,
name=name)
super(TextLineReader, self).__init__(rr)
ops.NotDifferentiable("TextLineReader")
@tf_export("FixedLengthRecordReader")
class FixedLengthRecordReader(ReaderBase):
"""A Reader that outputs fixed-length records from a file.
See ReaderBase for supported methods.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self,
record_bytes,
header_bytes=None,
footer_bytes=None,
hop_bytes=None,
name=None,
encoding=None):
"""Create a FixedLengthRecordReader.
Args:
record_bytes: An int.
header_bytes: An optional int. Defaults to 0.
footer_bytes: An optional int. Defaults to 0.
hop_bytes: An optional int. Defaults to 0.
name: A name for the operation (optional).
encoding: The type of encoding for the file. Defaults to none.
"""
rr = gen_io_ops.fixed_length_record_reader_v2(
record_bytes=record_bytes,
header_bytes=header_bytes,
footer_bytes=footer_bytes,
hop_bytes=hop_bytes,
encoding=encoding,
name=name)
super(FixedLengthRecordReader, self).__init__(rr)
ops.NotDifferentiable("FixedLengthRecordReader")
@tf_export("TFRecordReader")
class TFRecordReader(ReaderBase):
"""A Reader that outputs the records from a TFRecords file.
See ReaderBase for supported methods.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
# TODO(josh11b): Support serializing and restoring state.
def __init__(self, name=None, options=None):
"""Create a TFRecordReader.
Args:
name: A name for the operation (optional).
options: A TFRecordOptions object (optional).
"""
compression_type = python_io.TFRecordOptions.get_compression_type_string(
options)
rr = gen_io_ops.tf_record_reader_v2(
name=name, compression_type=compression_type)
super(TFRecordReader, self).__init__(rr)
ops.NotDifferentiable("TFRecordReader")
@tf_export("LMDBReader")
class LMDBReader(ReaderBase):
"""A Reader that outputs the records from a LMDB file.
See ReaderBase for supported methods.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
def __init__(self, name=None, options=None):
"""Create a LMDBReader.
Args:
name: A name for the operation (optional).
options: A LMDBRecordOptions object (optional).
"""
rr = gen_io_ops.lmdb_reader(name=name)
super(LMDBReader, self).__init__(rr)
ops.NotDifferentiable("LMDBReader")
@tf_export("IdentityReader")
class IdentityReader(ReaderBase):
"""A Reader that outputs the queued work as both the key and value.
To use, enqueue strings in a Queue. Read will take the front
work string and output (work, work).
See ReaderBase for supported methods.
@compatibility(eager)
Readers are not compatible with eager execution. Instead, please
use `tf.data` to get data into your model.
@end_compatibility
"""
def __init__(self, name=None):
"""Create a IdentityReader.
Args:
name: A name for the operation (optional).
"""
rr = gen_io_ops.identity_reader_v2(name=name)
super(IdentityReader, self).__init__(rr, supports_serialize=True)
ops.NotDifferentiable("IdentityReader")
|
apache-2.0
|
adaussy/eclipse-monkey-revival
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_copy_reg.py
|
129
|
4256
|
import copy_reg
import unittest
from test import test_support
from test.pickletester import ExtensionSaver
class C:
pass
class WithoutSlots(object):
pass
class WithWeakref(object):
__slots__ = ('__weakref__',)
class WithPrivate(object):
__slots__ = ('__spam',)
class WithSingleString(object):
__slots__ = 'spam'
class WithInherited(WithSingleString):
__slots__ = ('eggs',)
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copy_reg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEqual(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code)
copy_reg.add_extension(mod, func, code)
# Should be in the registry.
self.assertTrue(copy_reg._extension_registry[mod, func] == code)
self.assertTrue(copy_reg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assertNotIn(code, copy_reg._extension_cache)
# Redundant registration should be OK.
copy_reg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copy_reg._inverted_registry:
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assertNotIn((mod, func), copy_reg._extension_registry)
# The code *may* be in copy_reg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copy_reg.add_extension(mod, func, code)
copy_reg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000L:
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code)
def test_slotnames(self):
self.assertEqual(copy_reg._slotnames(WithoutSlots), [])
self.assertEqual(copy_reg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEqual(copy_reg._slotnames(WithPrivate), expected)
self.assertEqual(copy_reg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
result = copy_reg._slotnames(WithInherited)
result.sort()
self.assertEqual(result, expected)
def test_main():
test_support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
|
epl-1.0
|
guorendong/iridium-browser-ubuntu
|
third_party/chromite/lib/graphite_lib/statsd_mock_unittest.py
|
2
|
1605
|
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for statsd mock."""
from __future__ import print_function
import unittest
from chromite.lib.graphite_lib import statsd_mock as statsd
class statsd_mock_test(unittest.TestCase):
"""Test statsd_mock"""
def test_average_mock(self):
"""Test mock class Average"""
statsd.Average('average').send('name', 1)
def test_connection_mock(self):
"""Test mock class Connection"""
statsd.Connection(host='host', port=1)
statsd.Connection.set_defaults(host='host', port=1)
def test_counter_mock(self):
"""Test mock class Counter"""
counter = statsd.Counter('counter')
counter.increment(subname='name', delta=1)
counter.decrement(subname='name', delta=1)
def test_gauge_mock(self):
"""Test mock class Gauge"""
statsd.Gauge('gauge').send('name', 1)
def test_raw_mock(self):
"""Test mock class Raw"""
statsd.Raw('raw').send(subname='name', value=1, timestamp=None)
def test_timer_mock(self):
"""Test mock class Timer"""
timer = statsd.Timer('timer')
timer.start()
timer.stop()
class decorate_test(object):
"""Test class to test timer decorator."""
test_timer = statsd.Timer('test')
@test_timer.decorate
def f(self):
"""Test function to apply timer decorator to."""
return True
dt = decorate_test()
self.assertTrue(dt.f(), 'timer decorator failed.')
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
duyet-website/api.duyet.net
|
api/clean_datetime.py
|
1
|
1037
|
# Clean
import datetime as dt
import time
import datetime
from utils import *
def is_present(date_text): return date_text.upper() == 'NOW'
def get_current(): return dt.datetime.now().date()
def clean_datetime(date_text):
if not isinstance(date_text, basestring):
return None
# Parse from text
date_text = date_text.strip()
# Replace
date_text = date_text.replace(' ', ' ').replace('–', '-').rstrip('-')
date_text = date_text.replace('Sept ', 'Sep ').replace('Febr ', 'Feb ').replace('Sept ', 'Sep ').replace('Octo ', 'Oct ')
date_text = date_text.strip()
if not date_text: return None
# Current or present
if is_present(date_text): return get_current()
_date = date_text
for dateformat in dateFormats:
try:
_date = dt.datetime.strptime(date_text, dateformat)
break
except ValueError as err:
pass
try:
_date = _date.date()
except AttributeError as err:
return None
return _date
|
mit
|
Backflipz/plugin.video.excubed
|
resources/lib/js2py/prototypes/jsregexp.py
|
54
|
1252
|
class RegExpPrototype:
def toString():
flags = u''
if this.glob:
flags += u'g'
if this.ignore_case:
flags += u'i'
if this.multiline:
flags += u'm'
v = this.value if this.value else '(?:)'
return u'/%s/'%v + flags
def test(string):
return Exec(this, string) is not this.null
def exec2(string): # will be changed to exec in base.py. cant name it exec here
return Exec(this, string)
def Exec(this, string):
if this.Class!='RegExp':
raise this.MakeError('TypeError', 'RegExp.prototype.exec is not generic!')
string = string.to_string()
length = len(string)
i = this.get('lastIndex').to_int() if this.glob else 0
matched = False
while not matched:
if i < 0 or i > length:
this.put('lastIndex', this.Js(0))
return this.null
matched = this.match(string.value, i)
i += 1
start, end = matched.span()#[0]+i-1, matched.span()[1]+i-1
if this.glob:
this.put('lastIndex', this.Js(end))
arr = this.Js([this.Js(e) for e in [matched.group()]+list(matched.groups())])
arr.put('index', this.Js(start))
arr.put('input', string)
return arr
|
gpl-2.0
|
chirilo/remo
|
vendor-local/lib/python/unidecode/x0a4.py
|
252
|
4437
|
data = (
'qiet', # 0x00
'qiex', # 0x01
'qie', # 0x02
'qiep', # 0x03
'quot', # 0x04
'quox', # 0x05
'quo', # 0x06
'quop', # 0x07
'qot', # 0x08
'qox', # 0x09
'qo', # 0x0a
'qop', # 0x0b
'qut', # 0x0c
'qux', # 0x0d
'qu', # 0x0e
'qup', # 0x0f
'qurx', # 0x10
'qur', # 0x11
'qyt', # 0x12
'qyx', # 0x13
'qy', # 0x14
'qyp', # 0x15
'qyrx', # 0x16
'qyr', # 0x17
'jjit', # 0x18
'jjix', # 0x19
'jji', # 0x1a
'jjip', # 0x1b
'jjiet', # 0x1c
'jjiex', # 0x1d
'jjie', # 0x1e
'jjiep', # 0x1f
'jjuox', # 0x20
'jjuo', # 0x21
'jjuop', # 0x22
'jjot', # 0x23
'jjox', # 0x24
'jjo', # 0x25
'jjop', # 0x26
'jjut', # 0x27
'jjux', # 0x28
'jju', # 0x29
'jjup', # 0x2a
'jjurx', # 0x2b
'jjur', # 0x2c
'jjyt', # 0x2d
'jjyx', # 0x2e
'jjy', # 0x2f
'jjyp', # 0x30
'njit', # 0x31
'njix', # 0x32
'nji', # 0x33
'njip', # 0x34
'njiet', # 0x35
'njiex', # 0x36
'njie', # 0x37
'njiep', # 0x38
'njuox', # 0x39
'njuo', # 0x3a
'njot', # 0x3b
'njox', # 0x3c
'njo', # 0x3d
'njop', # 0x3e
'njux', # 0x3f
'nju', # 0x40
'njup', # 0x41
'njurx', # 0x42
'njur', # 0x43
'njyt', # 0x44
'njyx', # 0x45
'njy', # 0x46
'njyp', # 0x47
'njyrx', # 0x48
'njyr', # 0x49
'nyit', # 0x4a
'nyix', # 0x4b
'nyi', # 0x4c
'nyip', # 0x4d
'nyiet', # 0x4e
'nyiex', # 0x4f
'nyie', # 0x50
'nyiep', # 0x51
'nyuox', # 0x52
'nyuo', # 0x53
'nyuop', # 0x54
'nyot', # 0x55
'nyox', # 0x56
'nyo', # 0x57
'nyop', # 0x58
'nyut', # 0x59
'nyux', # 0x5a
'nyu', # 0x5b
'nyup', # 0x5c
'xit', # 0x5d
'xix', # 0x5e
'xi', # 0x5f
'xip', # 0x60
'xiet', # 0x61
'xiex', # 0x62
'xie', # 0x63
'xiep', # 0x64
'xuox', # 0x65
'xuo', # 0x66
'xot', # 0x67
'xox', # 0x68
'xo', # 0x69
'xop', # 0x6a
'xyt', # 0x6b
'xyx', # 0x6c
'xy', # 0x6d
'xyp', # 0x6e
'xyrx', # 0x6f
'xyr', # 0x70
'yit', # 0x71
'yix', # 0x72
'yi', # 0x73
'yip', # 0x74
'yiet', # 0x75
'yiex', # 0x76
'yie', # 0x77
'yiep', # 0x78
'yuot', # 0x79
'yuox', # 0x7a
'yuo', # 0x7b
'yuop', # 0x7c
'yot', # 0x7d
'yox', # 0x7e
'yo', # 0x7f
'yop', # 0x80
'yut', # 0x81
'yux', # 0x82
'yu', # 0x83
'yup', # 0x84
'yurx', # 0x85
'yur', # 0x86
'yyt', # 0x87
'yyx', # 0x88
'yy', # 0x89
'yyp', # 0x8a
'yyrx', # 0x8b
'yyr', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'Qot', # 0x90
'Li', # 0x91
'Kit', # 0x92
'Nyip', # 0x93
'Cyp', # 0x94
'Ssi', # 0x95
'Ggop', # 0x96
'Gep', # 0x97
'Mi', # 0x98
'Hxit', # 0x99
'Lyr', # 0x9a
'Bbut', # 0x9b
'Mop', # 0x9c
'Yo', # 0x9d
'Put', # 0x9e
'Hxuo', # 0x9f
'Tat', # 0xa0
'Ga', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'Ddur', # 0xa4
'Bur', # 0xa5
'Gguo', # 0xa6
'Nyop', # 0xa7
'Tu', # 0xa8
'Op', # 0xa9
'Jjut', # 0xaa
'Zot', # 0xab
'Pyt', # 0xac
'Hmo', # 0xad
'Yit', # 0xae
'Vur', # 0xaf
'Shy', # 0xb0
'Vep', # 0xb1
'Za', # 0xb2
'Jo', # 0xb3
'[?]', # 0xb4
'Jjy', # 0xb5
'Got', # 0xb6
'Jjie', # 0xb7
'Wo', # 0xb8
'Du', # 0xb9
'Shur', # 0xba
'Lie', # 0xbb
'Cy', # 0xbc
'Cuop', # 0xbd
'Cip', # 0xbe
'Hxop', # 0xbf
'Shat', # 0xc0
'[?]', # 0xc1
'Shop', # 0xc2
'Che', # 0xc3
'Zziet', # 0xc4
'[?]', # 0xc5
'Ke', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
bsd-3-clause
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/google/protobuf/wrappers_pb2.py
|
8
|
11378
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/wrappers.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/wrappers.proto',
package='google.protobuf',
syntax='proto3',
serialized_pb=_b('\n\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"\x1c\n\x0b\x44oubleValue\x12\r\n\x05value\x18\x01 \x01(\x01\"\x1b\n\nFloatValue\x12\r\n\x05value\x18\x01 \x01(\x02\"\x1b\n\nInt64Value\x12\r\n\x05value\x18\x01 \x01(\x03\"\x1c\n\x0bUInt64Value\x12\r\n\x05value\x18\x01 \x01(\x04\"\x1b\n\nInt32Value\x12\r\n\x05value\x18\x01 \x01(\x05\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1b\n\nBytesValue\x12\r\n\x05value\x18\x01 \x01(\x0c\x42|\n\x13\x63om.google.protobufB\rWrappersProtoP\x01Z*github.com/golang/protobuf/ptypes/wrappers\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
)
_DOUBLEVALUE = _descriptor.Descriptor(
name='DoubleValue',
full_name='google.protobuf.DoubleValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.DoubleValue.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=79,
)
_FLOATVALUE = _descriptor.Descriptor(
name='FloatValue',
full_name='google.protobuf.FloatValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.FloatValue.value', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=81,
serialized_end=108,
)
_INT64VALUE = _descriptor.Descriptor(
name='Int64Value',
full_name='google.protobuf.Int64Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Int64Value.value', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=110,
serialized_end=137,
)
_UINT64VALUE = _descriptor.Descriptor(
name='UInt64Value',
full_name='google.protobuf.UInt64Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.UInt64Value.value', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=167,
)
_INT32VALUE = _descriptor.Descriptor(
name='Int32Value',
full_name='google.protobuf.Int32Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Int32Value.value', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=169,
serialized_end=196,
)
_UINT32VALUE = _descriptor.Descriptor(
name='UInt32Value',
full_name='google.protobuf.UInt32Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.UInt32Value.value', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=198,
serialized_end=226,
)
_BOOLVALUE = _descriptor.Descriptor(
name='BoolValue',
full_name='google.protobuf.BoolValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.BoolValue.value', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=228,
serialized_end=254,
)
_STRINGVALUE = _descriptor.Descriptor(
name='StringValue',
full_name='google.protobuf.StringValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.StringValue.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=284,
)
_BYTESVALUE = _descriptor.Descriptor(
name='BytesValue',
full_name='google.protobuf.BytesValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.BytesValue.value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=286,
serialized_end=313,
)
DESCRIPTOR.message_types_by_name['DoubleValue'] = _DOUBLEVALUE
DESCRIPTOR.message_types_by_name['FloatValue'] = _FLOATVALUE
DESCRIPTOR.message_types_by_name['Int64Value'] = _INT64VALUE
DESCRIPTOR.message_types_by_name['UInt64Value'] = _UINT64VALUE
DESCRIPTOR.message_types_by_name['Int32Value'] = _INT32VALUE
DESCRIPTOR.message_types_by_name['UInt32Value'] = _UINT32VALUE
DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE
DESCRIPTOR.message_types_by_name['StringValue'] = _STRINGVALUE
DESCRIPTOR.message_types_by_name['BytesValue'] = _BYTESVALUE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DoubleValue = _reflection.GeneratedProtocolMessageType('DoubleValue', (_message.Message,), dict(
DESCRIPTOR = _DOUBLEVALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.DoubleValue)
))
_sym_db.RegisterMessage(DoubleValue)
FloatValue = _reflection.GeneratedProtocolMessageType('FloatValue', (_message.Message,), dict(
DESCRIPTOR = _FLOATVALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FloatValue)
))
_sym_db.RegisterMessage(FloatValue)
Int64Value = _reflection.GeneratedProtocolMessageType('Int64Value', (_message.Message,), dict(
DESCRIPTOR = _INT64VALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Int64Value)
))
_sym_db.RegisterMessage(Int64Value)
UInt64Value = _reflection.GeneratedProtocolMessageType('UInt64Value', (_message.Message,), dict(
DESCRIPTOR = _UINT64VALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.UInt64Value)
))
_sym_db.RegisterMessage(UInt64Value)
Int32Value = _reflection.GeneratedProtocolMessageType('Int32Value', (_message.Message,), dict(
DESCRIPTOR = _INT32VALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Int32Value)
))
_sym_db.RegisterMessage(Int32Value)
UInt32Value = _reflection.GeneratedProtocolMessageType('UInt32Value', (_message.Message,), dict(
DESCRIPTOR = _UINT32VALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.UInt32Value)
))
_sym_db.RegisterMessage(UInt32Value)
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), dict(
DESCRIPTOR = _BOOLVALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.BoolValue)
))
_sym_db.RegisterMessage(BoolValue)
StringValue = _reflection.GeneratedProtocolMessageType('StringValue', (_message.Message,), dict(
DESCRIPTOR = _STRINGVALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.StringValue)
))
_sym_db.RegisterMessage(StringValue)
BytesValue = _reflection.GeneratedProtocolMessageType('BytesValue', (_message.Message,), dict(
DESCRIPTOR = _BYTESVALUE,
__module__ = 'google.protobuf.wrappers_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.BytesValue)
))
_sym_db.RegisterMessage(BytesValue)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.google.protobufB\rWrappersProtoP\001Z*github.com/golang/protobuf/ptypes/wrappers\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'))
# @@protoc_insertion_point(module_scope)
|
bsd-2-clause
|
yunqu/PYNQ
|
pynq/lib/pmod/pmod_grove_ledbar.py
|
4
|
6752
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import Pmod
from . import PMOD_GROVE_G1
from . import PMOD_GROVE_G2
from . import PMOD_GROVE_G3
from . import PMOD_GROVE_G4
__author__ = "Naveen Purushotham"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "[email protected]"
PMOD_GROVE_LEDBAR_PROGRAM = "pmod_grove_ledbar.bin"
HIGH = 0xFF
LOW = 0x01
MED = 0xAA
OFF = 0x00
CONFIG_IOP_SWITCH = 0x1
RESET = 0x3
WRITE_LEDS = 0x5
SET_BRIGHTNESS = 0x7
SET_LEVEL = 0x9
READ_LEDS = 0xB
class Grove_LEDbar(object):
"""This class controls the Grove LED BAR.
Grove LED Bar is comprised of a 10 segment LED gauge bar and an MY9221 LED
controlling chip. Model: LED05031P. Hardware version: v2.0.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of an Grove LEDbar object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G1,
PMOD_GROVE_G2,
PMOD_GROVE_G3,
PMOD_GROVE_G4]:
raise ValueError("Group number can only be G1 - G4.")
self.microblaze = Pmod(mb_info, PMOD_GROVE_LEDBAR_PROGRAM)
self.microblaze.write_mailbox(0, gr_pin)
self.microblaze.write_blocking_command(CONFIG_IOP_SWITCH)
def reset(self):
"""Resets the LEDbar.
Clears the LED bar, sets all LEDs to OFF state.
Returns
-------
None
"""
self.microblaze.write_blocking_command(RESET)
def write_binary(self, data_in):
"""Set individual LEDs in the LEDbar based on 10 bit binary input.
Each bit in the 10-bit `data_in` points to a LED position on the
LEDbar. Red LED corresponds to the LSB, while green LED corresponds
to the MSB.
Parameters
----------
data_in : int
10 LSBs of this parameter control the LEDbar.
Returns
-------
None
"""
self.microblaze.write_mailbox(0, data_in)
self.microblaze.write_blocking_command(WRITE_LEDS)
def write_brightness(self, data_in, brightness=[MED]*10):
"""Set individual LEDs with 3 level brightness control.
Each bit in the 10-bit `data_in` points to a LED position on the
LEDbar. Red LED corresponds to the LSB, while green LED corresponds
to the MSB.
Brightness of each LED is controlled by the brightness parameter.
There are 3 perceivable levels of brightness:
0xFF : HIGH
0xAA : MED
0x01 : LOW
Parameters
----------
data_in : int
10 LSBs of this parameter control the LEDbar.
brightness : list
Each element controls a single LED.
Returns
-------
None
"""
data = [data_in]
data += brightness
self.microblaze.write_mailbox(0, data)
self.microblaze.write_blocking_command(SET_BRIGHTNESS)
def write_level(self, level, bright_level, green_to_red):
"""Set the level to which the leds are to be lit in levels 1 - 10.
Level can be set in both directions. `set_level` operates by setting
all LEDs to the same brightness level.
There are 4 preset brightness levels:
bright_level = 0: off
bright_level = 1: low
bright_level = 2: medium
bright_level = 3: maximum
`green_to_red` indicates the direction, either from red to green when
it is 0, or green to red when it is 1.
Parameters
----------
level : int
10 levels exist, where 1 is minimum and 10 is maximum.
bright_level : int
Controls brightness of all LEDs in the LEDbar, from 0 to 3.
green_to_red : int
Sets the direction of the sequence.
Returns
-------
None
"""
self.microblaze.write_mailbox(0, [level, bright_level, green_to_red])
self.microblaze.write_blocking_command(SET_LEVEL)
def read(self):
"""Reads the current status of LEDbar.
Reads the current status of LED bar and returns 10-bit binary string.
Each bit position corresponds to a LED position in the LEDbar,
and bit value corresponds to the LED state.
Red LED corresponds to the LSB, while green LED corresponds
to the MSB.
Returns
-------
str
String of 10 binary bits.
"""
self.microblaze.write_blocking_command(READ_LEDS)
value = self.microblaze.read_mailbox(0x0)
return bin(value)[2:].zfill(10)
|
bsd-3-clause
|
pdebuyl/lammps
|
doc/utils/converters/lammpsdoc/rst_anchor_check.py
|
4
|
2189
|
#! /usr/bin/env python3
# LAMMPS Documentation Utilities
#
# Scan for duplicate anchor labels in documentation files
#
# Copyright (C) 2017 Richard Berger
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import argparse
def main():
parser = argparse.ArgumentParser(description='scan for duplicate anchor labels in documentation files')
parser.add_argument('files', metavar='file', nargs='+', help='one or more files to scan')
parsed_args = parser.parse_args()
anchor_pattern = re.compile(r'^\.\. _(.*):$')
anchors = {}
for filename in parsed_args.files:
#print("filename: %s" % filename)
with open(filename, 'rt') as f:
for line_number, line in enumerate(f):
m = anchor_pattern.match(line)
if m:
label = m.group(1)
#print("found label: %s" % label)
if label in anchors:
anchors[label].append((filename, line_number+1))
else:
anchors[label] = [(filename, line_number+1)]
print("Found %d anchor labels" % len(anchors))
count = 0
for label in sorted(anchors.keys()):
if len(anchors[label]) > 1:
print(label)
count += 1
for filename, line_number in anchors[label]:
print(" - %s:%d" % (filename, line_number))
if count > 0:
print("Found %d anchor label errors." % count)
sys.exit(1)
else:
print("No anchor label errors.")
if __name__ == "__main__":
main()
|
gpl-2.0
|
BT-ojossen/l10n-switzerland
|
l10n_ch_account_statement_base_import/parsers/postfinance_file_parser.py
|
1
|
12859
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, David Wulliamoz
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import re
from os.path import splitext
from tarfile import TarFile, TarError
from cStringIO import StringIO
from lxml import etree
from wand.image import Image
import logging
from openerp import fields
from .camt import PFCamtParser
from .base_parser import BaseSwissParser
_logger = logging.getLogger(__name__)
class XMLPFParser(BaseSwissParser):
"""
Parser for XML Postfinance Statements (can be wrapped in a tar.gz file)
"""
_ftype = 'postfinance'
def __init__(self, data_file):
"""Constructor
Try to uncompress the file if possible
"""
super(XMLPFParser, self).__init__(data_file)
self.is_tar = None
self.tar_source = data_file
self.data_file = self._get_content_from_stream(data_file)
self.attachments = None
self.is_camt = None
self.camt_parser = PFCamtParser()
if self.is_tar:
self.attachments = self._get_attachments_from_stream(data_file)
def _get_content_from_stream(self, data_file):
"""Source file can be a raw or tar file. We try to guess the
file type and return valid file content
:retrun: uncompressed file content
:rtype: string
"""
# https://hg.python.org/cpython/file/6969bac411fa/Lib/tarfile.py#l2605
pf_file = StringIO(data_file)
pf_file.seek(0)
try:
# Taken from original code it seem we only want to
# parse first XML file. TO BE CONFIRMED
tar_file = TarFile.open(fileobj=pf_file, mode="r:gz")
xmls = [tar_content
for tar_content in tar_file.getnames()
if tar_content.endswith('.xml')]
self.is_tar = True
self.file_name = splitext(xmls[0])[0]
return tar_file.extractfile(xmls[0]).read()
except TarError:
return data_file
def _get_attachments_from_stream(self, data_file):
"""Retrieve attachment from tar file.
Return a dict containing all attachments ready to be saved
in Odoo.
The key is the name of file without extension
The value the PNG content encoded in base64
:param data_file: raw statement file sent to odoo (not in b64)
:type data_file: basestring subclass
:return: Return a dict containing all attachments ready
to be saved in Odoo.
"""
pf_file = StringIO(data_file)
pf_file.seek(0)
try:
attachments = {}
tar_file = TarFile.open(fileobj=pf_file, mode="r:gz")
accepted_formats = ['.tiff', '.png', '.jpeg', '.jpg']
for file_name in tar_file.getnames():
accepted = reduce(lambda x, y: x or y, [
file_name.endswith(format) for format in accepted_formats
])
if accepted:
key = splitext(file_name)[0]
img_data = tar_file.extractfile(file_name).read()
if file_name.endswith('.tiff'):
# Convert to png for viewing the image in Odoo
with Image(blob=img_data) as img:
img.format = 'png'
img_data = img.make_blob()
attachments[key] = img_data.encode('base64')
return attachments
except TarError:
return {}
def file_is_known(self):
"""Predicate the tells if the parser can parse the data file
:return: True if file is supported
:rtype: bool
"""
try:
pf_xml = re.search(r'\<IC\b', self.data_file)
if pf_xml is None:
camt_xml = re.search('<GrpHdr>', self.data_file)
if camt_xml is None:
return False
self.is_camt = True
return True
except:
return False
def _parse_account_number(self, tree):
"""Parse file account number using xml tree
:param tree: lxml element tree instance
:type tree: :py:class:`lxml.etree.element.Element`
:return: the file account number
:rtype: string
"""
account_number = None
if self.is_camt:
ns = tree.tag[1:tree.tag.index("}")] # namespace
account_node = tree.xpath(
'//ns:Stmt/ns:Acct/ns:Id/ns:IBAN/text()',
namespaces={'ns': ns})
else:
account_node = tree.xpath('//SG2/FII/C078/D_3194/text()')
if account_node and len(account_node) == 1:
account_number = account_node[0]
return account_number
def _parse_currency_code(self, tree):
"""Parse file currency ISO code using xml tree
:param tree: lxml element tree instance
:type tree: :py:class:`lxml.etree.element.Element`
:return: the currency ISO code of the file eg: CHF
:rtype: string
"""
currency_node = tree.xpath('//SG2/FII/C078/D_6345/@Value')
if not currency_node:
return
if len(currency_node) != 1:
raise ValueError(
'Many currencies found for postfinance statement')
return currency_node[0]
def _parse_statement_balance(self, tree):
"""Parse file start and end balance
:param tree: lxml element tree instance
:type tree: :py:class:`lxml.etree.element.Element`
:return: the file start and end balance
:rtype: tuple (start, end) balances
"""
balance_start = balance_end = False
balance_nodes = tree.xpath("//SG5/MOA/C516")
for move in balance_nodes:
if move.xpath(".//@Value='315'"):
balance_start = float(move.xpath("./D_5004/text()")[0])
if move.xpath(".//@Value='343'"):
balance_end = float(move.xpath("./D_5004/text()")[0])
return balance_start, balance_end
def _parse_transactions(self, tree):
"""Parse bank statement lines from file
list of dict containing :
- 'name': string (e.g: 'KBC-INVESTERINGSKREDIET 787-5562831-01')
- 'date': date
- 'amount': float
- 'unique_import_id': string
-o 'account_number': string
Will be used to find/create the res.partner.bank in odoo
-o 'note': string
-o 'partner_name': string
-o 'ref': string
:param tree: lxml element tree instance
:type tree: :py:class:`lxml.etree.element.Element`
:return: a list of transactions
:rtype: list
"""
transactions = []
transaction_nodes = tree.xpath("//SG6")
for transaction in transaction_nodes:
if not transaction.xpath(".//@Value='TGT'"):
continue
res = {}
desc = '/'
date = datetime.date.today()
if transaction.xpath(".//@Value='TGT'"):
transaction_date_text = transaction.xpath(
"DTM/C507/D_2380/text()"
)
if transaction_date_text:
date = datetime.datetime.strptime(
transaction_date_text[0], "%Y%m%d").date()
res['date'] = fields.Date.to_string(date)
if transaction.xpath(".//@Value='ZZZ'"):
desc = transaction.xpath("RFF/C506/D_1154/text()")[1]
res['name'] = "\n".join(transaction.xpath(
"FTX/C108/D_4440/text()"))
amount = float(transaction.xpath("MOA/C516/D_5004/text()")[0])
if transaction.xpath("MOA/C516/D_5025/@Value='211'"):
amount *= -1
res['amount'] = amount
# We have an issue with XPATH and namespace here because of
# faulty definition on a deprecated URL
uid = [x.text for x in transaction.iter()
if (x.prefix == 'PF' and x.tag.endswith('D_4754'))]
uid = uid[0] if uid else None
res['unique_import_id'] = uid
res['ref'] = uid if uid else desc
res['account_number'] = None
res['note'] = None
res['partner_name'] = None
transactions.append(res)
return transactions
def _parse_attachments(self, tree):
"""Parse file statement to get wich attachment to use
:param tree: lxml element tree instance
:type tree: :py:class:`lxml.etree.element.Element`
:return: a list of attachment tuple (name, content)
:rtype: list
"""
attachments = [('Statement File', self.tar_source.encode('base64'))]
if self.is_camt and self.is_tar:
ns = tree.tag[1:tree.tag.index("}")] # namespace
transaction_nodes = tree.xpath(
'//ns:Stmt/ns:Ntry/ns:AcctSvcrRef/text()',
namespaces={'ns': ns})
for transaction in transaction_nodes:
att_name = self.file_name + '-' + transaction
# Attachment files are limited to 87 char names
att = self.attachments.get(att_name[:87])
if att:
attachments.append((transaction, att))
elif self.is_tar:
transaction_nodes = tree.xpath("//SG6")
for transaction in transaction_nodes:
desc = '/'
if transaction.xpath(".//@Value='ZZZ'"):
desc = transaction.xpath("RFF/C506/D_1154/text()")[1]
att = self.attachments.get(desc)
if att:
uid = [x.text for x in transaction.iter()
if (x.prefix == 'PF' and x.tag.endswith('D_4754'))]
uid = uid[0] if uid else desc
attachments.append((uid, att))
return attachments
def _parse_statement_date(self, tree):
"""Parse file statement date from tree
:param tree: lxml element tree instance
:type tree: :py:class:`lxml.etree.element.Element`
:return: A date usable by Odoo in write or create dict
:rtype: string
"""
if self.is_camt:
ns = tree.tag[1:tree.tag.index("}")] # namespace
date = tree.xpath(
'//ns:GrpHdr/ns:CreDtTm/text()',
namespaces={'ns': ns})
if date:
return date[0][:10]
else:
date = tree.xpath('//DTM/D_2380/@Desc="Date"')
if date:
formatted_date = date[0][:4] + '-' + date[0][4:6] + '-' + \
date[0][6:]
return formatted_date
return fields.Date.today()
def _parse(self):
"""
Launch the parsing through The XML file. It sets the various
property of the class from the parse result.
This implementation expect one XML file to represents one statement
"""
if self.is_camt:
tree = etree.fromstring(self.data_file)
self.statements += self.camt_parser.parse(self.data_file)
if self.statements:
self.statements[0]['attachments'] = self._parse_attachments(
tree)
else:
tree = etree.fromstring(self.data_file)
self.currency_code = self._parse_currency_code(tree)
statement = {}
balance_start, balance_stop = self._parse_statement_balance(tree)
statement['balance_start'] = balance_start
statement['balance_end_real'] = balance_stop
statement['date'] = self._parse_statement_date(tree)
statement['attachments'] = self._parse_attachments(tree)
statement['transactions'] = self._parse_transactions(tree)
self.statements.append(statement)
self.account_number = self._parse_account_number(tree)
return True
|
agpl-3.0
|
redhawkci/omniEvents
|
examples/python/pullsupp.py
|
2
|
8708
|
# Package : omniEvents
# pullsupp.py Created : 16/11/2003
# Author : Alex Tingle
#
# Copyright (C) 2003-2004 Alex Tingle.
#
# This file is part of the omniEvents application.
#
# omniEvents is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# omniEvents is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Pull Model supplier implementation."""
import os,sys
import time
import threading
import signal
import getopt
import CORBA
import CosNaming
import CosEventComm__POA
import CosEventChannelAdmin
from naming import *
class Supplier_i(CosEventComm__POA.PullSupplier):
def __init__(self,disconnect=0):
self.event=threading.Event()
self.i=0
self.l=0
self.disconnect=disconnect
def disconnect_pull_supplier(self):
print "Pull Supplier: disconnected by channel."
def pull(self):
print "Pull Supplier: pull() called. Data :",self.l
any = CORBA.Any(CORBA.TC_ulong,self.l)
self.l=self.l+1
# Exercise Disconnect
if ((self.disconnect > 0) and (self.i == self.disconnect)):
self.i = 0
# Signal main thread to disconnect and re-connect.
self.event.set()
self.i=self.i+1
return any
def try_pull(self):
print "Pull Supplier: try_pull() called. Data :",self.l
any = CORBA.Any(CORBA.TC_ulong,self.l)
self.l=self.l+1
# Exercise Disconnect
if ((self.disconnect > 0) and (self.i == self.disconnect)):
self.i = 0
# Signal main thread to disconnect and re-connect.
self.event.set()
self.i=self.i+1
return any,1
#end class Supplier_i
def main():
orb = CORBA.ORB_init(sys.argv, CORBA.ORB_ID)
# Process Options
discnum = 0
sleepInterval = 0
channelName = "EventChannel"
# Process Options
try:
opts,args=getopt.getopt(sys.argv[1:],"d:s:n:h")
except getopt.error:
# print help information and exit:
usage()
sys.exit(-1)
for option, optarg in opts:
if option=='-d':
discnum = int(optarg)
elif option=='-s':
sleepInterval = int(optarg)
elif option=='-n':
channelName = optarg
elif option=='-h':
usage()
sys.exit(0)
else:
usage()
sys.exit(-1)
# Ignore broken pipes
if signal.__dict__.has_key('SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
action="start" # Use this variable to help report errors.
try:
action="resolve initial reference 'RootPOA'"
poa=orb.resolve_initial_references("RootPOA")
action="activate the RootPOA's POAManager"
poaManager=poa._get_the_POAManager()
poaManager.activate()
#
# Obtain reference to the Event Channel.
# (from command-line argument or from the Naming Service).
if len(args):
action="convert URI from command line into object reference"
obj=orb.string_to_object(args[0])
else:
#
# Get Name Service root context.
action="resolve initial reference 'NameService'"
obj=orb.resolve_initial_references("NameService")
rootContext=obj._narrow(CosNaming.NamingContext)
if rootContext is None:
raise CORBA.OBJECT_NOT_EXIST(0,CORBA.COMPLETED_NO)
#
# Obtain reference to the Event Channel.
action="find Event Channel in naming service"
obj=rootContext.resolve(str2name(channelName))
action="narrow object reference to event channel"
channel=obj._narrow(CosEventChannelAdmin.EventChannel)
if channel is None:
raise CORBA.OBJECT_NOT_EXIST(0,CORBA.COMPLETED_NO)
except CORBA.ORB.InvalidName, ex: # resolve_initial_references()
sys.stderr.write("Failed to %s. ORB::InvalidName\n"%action)
sys.exit(1)
except CosNaming.NamingContext.InvalidName, ex: # resolve()
sys.stderr.write("Failed to %s. NamingContext::InvalidName\n"%action)
sys.exit(1)
except CosNaming.NamingContext.NotFound, ex: # resolve()
sys.stderr.write("Failed to %s. NamingContext::NotFound\n"%action)
sys.exit(1)
except CosNaming.NamingContext.CannotProceed, ex: # resolve()
sys.stderr.write("Failed to %s. NamingContext::CannotProceed\n"%action)
sys.exit(1)
except CORBA.TRANSIENT, ex:
sys.stderr.write("Failed to %s. TRANSIENT\n"%action)
sys.exit(1)
except CORBA.OBJECT_NOT_EXIST, ex:
sys.stderr.write("Failed to %s. OBJECT_NOT_EXIST\n"%action)
sys.exit(1)
except CORBA.COMM_FAILURE, ex:
sys.stderr.write("Failed to %s. COMM_FAILURE\n"%action)
sys.exit(1)
except CORBA.SystemException, ex:
sys.stderr.write("System exception, unable to %s.\n"%action)
sys.exit(1)
except CORBA.Exception, ex:
sys.stderr.write("CORBA exception, unable to %s.\n"%action)
sys.exit(1)
#
# Get Supplier Admin interface - retrying on Comms Failure.
while(1):
try:
supplier_admin=channel.for_suppliers()
if supplier_admin is None:
sys.stderr.write("Event Channel returned nil Supplier Admin!\n")
sys.exit(1)
break
except CORBA.COMM_FAILURE, ex:
sys.stderr.write("Caught COMM_FAILURE exception "+ \
"obtaining Supplier Admin! Retrying...\n")
time.sleep(1)
print "Obtained SupplierAdmin."
while(1):
#
# Get proxy consumer - retrying on Comms Failure.
while (1):
try:
proxy_consumer=supplier_admin.obtain_pull_consumer()
if proxy_consumer is None:
sys.stderr.write("Supplier Admin return nil proxy_consumer!\n")
sys.exit(1)
break
except CORBA.COMM_FAILURE, ex:
sys.stderr.write("Caught COMM_FAILURE exception "+ \
"obtaining Proxy Pull Consumer! Retrying...\n")
time.sleep(1)
print "Obtained ProxyPullConsumer."
#
# Make Pull Supplier
supplier = Supplier_i(discnum)
#
# Connect Pull Supplier - retrying on Comms Failure.
while (1):
try:
proxy_consumer.connect_pull_supplier(supplier._this())
break
except CORBA.BAD_PARAM, ex:
sys.stderr.write( \
'Caught BAD_PARAM Exception connecting Pull Supplier!\n')
sys.exit(1)
except CosEventChannelAdmin.AlreadyConnected, ex:
sys.stderr.write('Proxy Pull Consumer already connected!\n')
sys.exit(1)
except CORBA.COMM_FAILURE, ex:
sys.stderr.write("Caught COMM_FAILURE exception " +\
"connecting Pull Supplier! Retrying...\n")
time.sleep(1)
print "Connected Pull Supplier."
#
# Wait for indication to disconnect before re-connecting.
# Make sure that the main thread gets some time every 200ms, so that it
# can respond to keyboard interrupts.
while(not supplier.event.isSet()):
try:
supplier.event.wait(0.2)
except:
os._exit(0) # Kills all known threads, dead!
supplier.event.clear()
#
# Disconnect - retrying on Comms Failure.
while (1):
try:
proxy_consumer.disconnect_pull_consumer()
break
except CORBA.COMM_FAILURE, ex:
sys.stderr.write("Caught COMM_FAILURE Exception " +\
"disconnecting Pull Supplier! Retrying...\n")
time.sleep(1)
print "Disconnected Pull Supplier."
# Yawn
print "Sleeping ",sleepInterval," seconds."
time.sleep(sleepInterval)
#end while
#end main()
def usage():
print """
Create a PullSupplier to send events to a channel.
syntax: python pullsupp.py OPTIONS [CHANNEL_URI]
CHANNEL_URI: The event channel may be specified as a URI.
This may be an IOR, or a corbaloc::: or corbaname::: URI.
OPTIONS: DEFAULT:
-d NUM disconnect after sending NUM events [0 - never disconnect]
-s SECS sleep SECS seconds after disconnecting [0]
-n NAME channel name (if URI is not specified) ["EventChannel"]
-h display this help text
"""
################################################################################
# If this file is executed directly, then we start here.
if(__name__=="__main__"):
main()
|
lgpl-2.1
|
Venturi/oldcms
|
env/lib/python2.7/site-packages/PIL/ImageFilter.py
|
87
|
6618
|
#
# The Python Imaging Library.
# $Id$
#
# standard filters
#
# History:
# 1995-11-27 fl Created
# 2002-06-08 fl Added rank and mode filters
# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2002 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import functools
class Filter(object):
pass
class Kernel(Filter):
"""
Create a convolution kernel. The current version only
supports 3x3 and 5x5 integer and floating point kernels.
In the current version, kernels can only be applied to
"L" and "RGB" images.
:param size: Kernel size, given as (width, height). In the current
version, this must be (3,3) or (5,5).
:param kernel: A sequence containing kernel weights.
:param scale: Scale factor. If given, the result for each pixel is
divided by this value. the default is the sum of the
kernel weights.
:param offset: Offset. If given, this value is added to the result,
after it has been divided by the scale factor.
"""
def __init__(self, size, kernel, scale=None, offset=0):
if scale is None:
# default scale is sum of kernel
scale = functools.reduce(lambda a, b: a+b, kernel)
if size[0] * size[1] != len(kernel):
raise ValueError("not enough coefficients in kernel")
self.filterargs = size, scale, offset, kernel
def filter(self, image):
if image.mode == "P":
raise ValueError("cannot filter palette images")
return image.filter(*self.filterargs)
class BuiltinFilter(Kernel):
def __init__(self):
pass
class RankFilter(Filter):
"""
Create a rank filter. The rank filter sorts all pixels in
a window of the given size, and returns the **rank**'th value.
:param size: The kernel size, in pixels.
:param rank: What pixel value to pick. Use 0 for a min filter,
``size * size / 2`` for a median filter, ``size * size - 1``
for a max filter, etc.
"""
name = "Rank"
def __init__(self, size, rank):
self.size = size
self.rank = rank
def filter(self, image):
if image.mode == "P":
raise ValueError("cannot filter palette images")
image = image.expand(self.size//2, self.size//2)
return image.rankfilter(self.size, self.rank)
class MedianFilter(RankFilter):
"""
Create a median filter. Picks the median pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Median"
def __init__(self, size=3):
self.size = size
self.rank = size*size//2
class MinFilter(RankFilter):
"""
Create a min filter. Picks the lowest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Min"
def __init__(self, size=3):
self.size = size
self.rank = 0
class MaxFilter(RankFilter):
"""
Create a max filter. Picks the largest pixel value in a window with the
given size.
:param size: The kernel size, in pixels.
"""
name = "Max"
def __init__(self, size=3):
self.size = size
self.rank = size*size-1
class ModeFilter(Filter):
"""
Create a mode filter. Picks the most frequent pixel value in a box with the
given size. Pixel values that occur only once or twice are ignored; if no
pixel value occurs more than twice, the original pixel value is preserved.
:param size: The kernel size, in pixels.
"""
name = "Mode"
def __init__(self, size=3):
self.size = size
def filter(self, image):
return image.modefilter(self.size)
class GaussianBlur(Filter):
"""Gaussian blur filter.
:param radius: Blur radius.
"""
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
def filter(self, image):
return image.gaussian_blur(self.radius)
class UnsharpMask(Filter):
"""Unsharp mask filter.
See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
the parameters.
:param radius: Blur Radius
:param percent: Unsharp strength, in percent
:param threshold: Threshold controls the minimum brightness change that
will be sharpened
.. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
"""
name = "UnsharpMask"
def __init__(self, radius=2, percent=150, threshold=3):
self.radius = radius
self.percent = percent
self.threshold = threshold
def filter(self, image):
return image.unsharp_mask(self.radius, self.percent, self.threshold)
class BLUR(BuiltinFilter):
name = "Blur"
filterargs = (5, 5), 16, 0, (
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1
)
class CONTOUR(BuiltinFilter):
name = "Contour"
filterargs = (3, 3), 1, 255, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1
)
class DETAIL(BuiltinFilter):
name = "Detail"
filterargs = (3, 3), 6, 0, (
0, -1, 0,
-1, 10, -1,
0, -1, 0
)
class EDGE_ENHANCE(BuiltinFilter):
name = "Edge-enhance"
filterargs = (3, 3), 2, 0, (
-1, -1, -1,
-1, 10, -1,
-1, -1, -1
)
class EDGE_ENHANCE_MORE(BuiltinFilter):
name = "Edge-enhance More"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 9, -1,
-1, -1, -1
)
class EMBOSS(BuiltinFilter):
name = "Emboss"
filterargs = (3, 3), 1, 128, (
-1, 0, 0,
0, 1, 0,
0, 0, 0
)
class FIND_EDGES(BuiltinFilter):
name = "Find Edges"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1
)
class SMOOTH(BuiltinFilter):
name = "Smooth"
filterargs = (3, 3), 13, 0, (
1, 1, 1,
1, 5, 1,
1, 1, 1
)
class SMOOTH_MORE(BuiltinFilter):
name = "Smooth More"
filterargs = (5, 5), 100, 0, (
1, 1, 1, 1, 1,
1, 5, 5, 5, 1,
1, 5, 44, 5, 1,
1, 5, 5, 5, 1,
1, 1, 1, 1, 1
)
class SHARPEN(BuiltinFilter):
name = "Sharpen"
filterargs = (3, 3), 16, 0, (
-2, -2, -2,
-2, 32, -2,
-2, -2, -2
)
|
apache-2.0
|
SlimRemix/android_external_chromium_org
|
tools/telemetry/telemetry/core/platform/profiler/monsoon.py
|
29
|
10196
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Interface for a USB-connected Monsoon power meter.
http://msoon.com/LabEquipment/PowerMonitor/
Currently Unix-only. Relies on fcntl, /dev, and /tmp.
"""
import collections
import logging
import os
import select
import struct
import time
from telemetry.core import util
util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'pyserial')
import serial # pylint: disable=F0401
import serial.tools.list_ports # pylint: disable=F0401,E0611
Power = collections.namedtuple('Power', ['amps', 'volts'])
class Monsoon:
"""Provides a simple class to use the power meter.
mon = monsoon.Monsoon()
mon.SetVoltage(3.7)
mon.StartDataCollection()
mydata = []
while len(mydata) < 1000:
mydata.extend(mon.CollectData())
mon.StopDataCollection()
"""
def __init__(self, device=None, serialno=None, wait=True):
"""Establish a connection to a Monsoon.
By default, opens the first available port, waiting if none are ready.
A particular port can be specified with 'device', or a particular Monsoon
can be specified with 'serialno' (using the number printed on its back).
With wait=False, IOError is thrown if a device is not immediately available.
"""
assert float(serial.VERSION) >= 2.7, \
'Monsoon requires pyserial v2.7 or later. You have %s' % serial.VERSION
self._coarse_ref = self._fine_ref = self._coarse_zero = self._fine_zero = 0
self._coarse_scale = self._fine_scale = 0
self._last_seq = 0
self._voltage_multiplier = None
if device:
self.ser = serial.Serial(device, timeout=1)
return
while 1:
for (port, desc, _) in serial.tools.list_ports.comports():
if not desc.lower().startswith('mobile device power monitor'):
continue
tmpname = '/tmp/monsoon.%s.%s' % (os.uname()[0], os.path.basename(port))
self._tempfile = open(tmpname, 'w')
try: # Use a lockfile to ensure exclusive access.
# Put the import in here to avoid doing it on unsupported platforms.
import fcntl
fcntl.lockf(self._tempfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
logging.error('device %s is in use', port)
continue
try: # Try to open the device.
self.ser = serial.Serial(port, timeout=1)
self.StopDataCollection() # Just in case.
self._FlushInput() # Discard stale input.
status = self.GetStatus()
except IOError, e:
logging.error('error opening device %s: %s', port, e)
continue
if not status:
logging.error('no response from device %s', port)
elif serialno and status['serialNumber'] != serialno:
logging.error('device %s is #%d', port, status['serialNumber'])
else:
if status['hardwareRevision'] == 1:
self._voltage_multiplier = 62.5 / 10**6
else:
self._voltage_multiplier = 125.0 / 10**6
return
self._tempfile = None
if not wait:
raise IOError('No device found')
logging.info('waiting for device...')
time.sleep(1)
def GetStatus(self):
"""Requests and waits for status. Returns status dictionary."""
# status packet format
STATUS_FORMAT = '>BBBhhhHhhhHBBBxBbHBHHHHBbbHHBBBbbbbbbbbbBH'
STATUS_FIELDS = [
'packetType', 'firmwareVersion', 'protocolVersion',
'mainFineCurrent', 'usbFineCurrent', 'auxFineCurrent', 'voltage1',
'mainCoarseCurrent', 'usbCoarseCurrent', 'auxCoarseCurrent', 'voltage2',
'outputVoltageSetting', 'temperature', 'status', 'leds',
'mainFineResistor', 'serialNumber', 'sampleRate',
'dacCalLow', 'dacCalHigh',
'powerUpCurrentLimit', 'runTimeCurrentLimit', 'powerUpTime',
'usbFineResistor', 'auxFineResistor',
'initialUsbVoltage', 'initialAuxVoltage',
'hardwareRevision', 'temperatureLimit', 'usbPassthroughMode',
'mainCoarseResistor', 'usbCoarseResistor', 'auxCoarseResistor',
'defMainFineResistor', 'defUsbFineResistor', 'defAuxFineResistor',
'defMainCoarseResistor', 'defUsbCoarseResistor', 'defAuxCoarseResistor',
'eventCode', 'eventData',
]
self._SendStruct('BBB', 0x01, 0x00, 0x00)
while 1: # Keep reading, discarding non-status packets.
data = self._ReadPacket()
if not data:
return None
if len(data) != struct.calcsize(STATUS_FORMAT) or data[0] != '\x10':
logging.debug('wanted status, dropped type=0x%02x, len=%d',
ord(data[0]), len(data))
continue
status = dict(zip(STATUS_FIELDS, struct.unpack(STATUS_FORMAT, data)))
assert status['packetType'] == 0x10
for k in status.keys():
if k.endswith('VoltageSetting'):
status[k] = 2.0 + status[k] * 0.01
elif k.endswith('FineCurrent'):
pass # Needs calibration data.
elif k.endswith('CoarseCurrent'):
pass # Needs calibration data.
elif k.startswith('voltage') or k.endswith('Voltage'):
status[k] = status[k] * 0.000125
elif k.endswith('Resistor'):
status[k] = 0.05 + status[k] * 0.0001
if k.startswith('aux') or k.startswith('defAux'):
status[k] += 0.05
elif k.endswith('CurrentLimit'):
status[k] = 8 * (1023 - status[k]) / 1023.0
return status
def SetVoltage(self, v):
"""Set the output voltage, 0 to disable."""
if v == 0:
self._SendStruct('BBB', 0x01, 0x01, 0x00)
else:
self._SendStruct('BBB', 0x01, 0x01, int((v - 2.0) * 100))
def SetMaxCurrent(self, i):
"""Set the max output current."""
assert i >= 0 and i <= 8
val = 1023 - int((i/8)*1023)
self._SendStruct('BBB', 0x01, 0x0a, val & 0xff)
self._SendStruct('BBB', 0x01, 0x0b, val >> 8)
def SetUsbPassthrough(self, val):
"""Set the USB passthrough mode: 0 = off, 1 = on, 2 = auto."""
self._SendStruct('BBB', 0x01, 0x10, val)
def StartDataCollection(self):
"""Tell the device to start collecting and sending measurement data."""
self._SendStruct('BBB', 0x01, 0x1b, 0x01) # Mystery command.
self._SendStruct('BBBBBBB', 0x02, 0xff, 0xff, 0xff, 0xff, 0x03, 0xe8)
def StopDataCollection(self):
"""Tell the device to stop collecting measurement data."""
self._SendStruct('BB', 0x03, 0x00) # Stop.
def CollectData(self):
"""Return some current samples. Call StartDataCollection() first."""
while 1: # Loop until we get data or a timeout.
data = self._ReadPacket()
if not data:
return None
if len(data) < 4 + 8 + 1 or data[0] < '\x20' or data[0] > '\x2F':
logging.debug('wanted data, dropped type=0x%02x, len=%d',
ord(data[0]), len(data))
continue
seq, packet_type, x, _ = struct.unpack('BBBB', data[:4])
data = [struct.unpack(">hhhh", data[x:x+8])
for x in range(4, len(data) - 8, 8)]
if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF:
logging.info('data sequence skipped, lost packet?')
self._last_seq = seq
if packet_type == 0:
if not self._coarse_scale or not self._fine_scale:
logging.info('waiting for calibration, dropped data packet')
continue
out = []
for main, usb, _, voltage in data:
main_voltage_v = self._voltage_multiplier * (voltage & ~3)
sample = 0.0
if main & 1:
sample += ((main & ~1) - self._coarse_zero) * self._coarse_scale
else:
sample += (main - self._fine_zero) * self._fine_scale
if usb & 1:
sample += ((usb & ~1) - self._coarse_zero) * self._coarse_scale
else:
sample += (usb - self._fine_zero) * self._fine_scale
out.append(Power(sample, main_voltage_v))
return out
elif packet_type == 1:
self._fine_zero = data[0][0]
self._coarse_zero = data[1][0]
elif packet_type == 2:
self._fine_ref = data[0][0]
self._coarse_ref = data[1][0]
else:
logging.debug('discarding data packet type=0x%02x', packet_type)
continue
if self._coarse_ref != self._coarse_zero:
self._coarse_scale = 2.88 / (self._coarse_ref - self._coarse_zero)
if self._fine_ref != self._fine_zero:
self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero)
def _SendStruct(self, fmt, *args):
"""Pack a struct (without length or checksum) and send it."""
data = struct.pack(fmt, *args)
data_len = len(data) + 1
checksum = (data_len + sum(struct.unpack('B' * len(data), data))) % 256
out = struct.pack('B', data_len) + data + struct.pack('B', checksum)
self.ser.write(out)
def _ReadPacket(self):
"""Read a single data record as a string (without length or checksum)."""
len_char = self.ser.read(1)
if not len_char:
logging.error('timeout reading from serial port')
return None
data_len = struct.unpack('B', len_char)
data_len = ord(len_char)
if not data_len:
return ''
result = self.ser.read(data_len)
if len(result) != data_len:
return None
body = result[:-1]
checksum = (data_len + sum(struct.unpack('B' * len(body), body))) % 256
if result[-1] != struct.pack('B', checksum):
logging.error('invalid checksum from serial port')
return None
return result[:-1]
def _FlushInput(self):
"""Flush all read data until no more available."""
self.ser.flush()
flushed = 0
while True:
ready_r, _, ready_x = select.select([self.ser], [], [self.ser], 0)
if len(ready_x) > 0:
logging.error('exception from serial port')
return None
elif len(ready_r) > 0:
flushed += 1
self.ser.read(1) # This may cause underlying buffering.
self.ser.flush() # Flush the underlying buffer too.
else:
break
if flushed > 0:
logging.debug('dropped >%d bytes', flushed)
|
bsd-3-clause
|
jashandeep-sohi/python-blowfish
|
blowfish.py
|
1
|
49719
|
# vim: filetype=python3 tabstop=2 expandtab fileencoding=utf-8
# blowfish
# Copyright (C) 2015 Jashandeep Sohi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module implements the Blowfish cipher using only Python (3.4+).
Blowfish is a block cipher that can be used for symmetric-key encryption. It
has a 8-byte block size and supports a variable-length key, from 4 to 56 bytes.
It's fast, free and has been analyzed considerably. It was designed by Bruce
Schneier and more details about it can be found at
<https://www.schneier.com/blowfish.html>.
"""
from struct import Struct, error as struct_error
from itertools import cycle as iter_cycle
__version__ = "0.7.1"
# PI_P_ARRAY & PI_S_BOXES are the hexadecimal digits of π (the irrational)
# taken from <https://www.schneier.com/code/constants.txt>.
# 1 x 18
PI_P_ARRAY = (
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
)
# 4 x 256
PI_S_BOXES = (
(
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
),
(
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
),
(
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
),
(
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
),
)
class Cipher(object):
"""
Blowfish block cipher.
Instantiate
-----------
`key` should be a :obj:`bytes` object with a length between 4 and 56 bytes.
`byte_order` determines how bytes are interpreted as numbers to perform
mathematical operations on them. It can either be ``"big"`` or ``"little"``.
The default value of ``"big"`` rarely needs to be changed since most
implementations of Blowfish use big endian byte order.
`P_array` and `S_boxes` are used to derive the key dependent P array and
substitution boxes.
By default, `P_array` is a sequence of 18 32-bit integers and `S_boxes` is a
4 x 256 sequence of 32-bit integers derived from the digits of pi (in
hexadecimal).
If you would like to use custom values (not recommended unless you know what
you are doing), then `S_boxes` should be a 4 x 256 sequence of 32-bit
integers and `P_array` should be an even length sequence of 32-bit integers
(i.e. 16, 18, 20, etc.).
The length of `P_array` also determines how many "rounds" are done per block.
For a `P_array` with length n, n - 2 rounds are done on every block.
Encryption & Decryption
-----------------------
Blowfish is a block cipher with a 64-bits (i.e. 8 bytes) block-size. As
such, it can fundamentally only operate on 8 bytes of data.
To encrypt or decrypt a single block of data use the :meth:`encrypt_block`
or :meth:`decrypt_block` method.
Block ciphers would not be of much use if they could only operate on a single
block of data. Luckily (or rather mathematically), block ciphers can be used
in a certain way, called a "mode of operation", to work with data larger
(or smaller) than the block size.
So far the following modes of operation have been implemented:
Electronic Codebook (ECB)
:meth:`encrypt_ecb` & :meth:`decrypt_ecb`
Electronic Codebook with Ciphertext Stealing (ECB-CTS)
:meth:`encrypt_ecb_cts` & :meth:`decrypt_ecb_cts`
Cipher-Block Chaining (CBC)
:meth:`encrypt_ecb_cts` & :meth:`decrypt_ecb_cts`
Cipher-Block Chaining with Ciphertext Stealing (CBC-CTS)
:meth:`encrypt_cbc_cts` & :meth:`decrypt_cbc_cts`
Propagating Cipher-Block Chaining (PCBC)
:meth:`encrypt_pcbc` & :meth:`decrypt_pcbc`
Cipher Feedback (CFB)
:meth:`encrypt_cfb` & :meth:`decrypt_cfb`
Output Feedback (OFB)
:meth:`encrypt_ofb` & :meth:`decrypt_ofb`
Counter (CTR)
:meth:`encrypt_ctr` & :meth:`decrypt_ctr`
ECB, CBC & PCBC modes can only operate on data that is a multiple of the
block-size in length (i.e. 8, 16, 32, etc. bytes).
ECB-CTS and CBC-CTS modes can only operate on data that is greater than 8
bytes long.
CTR, CFB and OFB modes can operate on data of any length.
Data that is not a multiple of the block-size in length can still be used
with modes that expect otherwise (i.e. ECB, CBC, PCBC), if it is padded
properly. Padding functionality is not implemented in this module, as there
are countless schemes and it's relatively easy to roll out your own.
.. warning::
Some modes have weaknesses and quirks, so please read up on them before
using them. If you can't be bothered, stick with CTR.
"""
def __init__(
self,
key,
byte_order = "big",
P_array = PI_P_ARRAY,
S_boxes = PI_S_BOXES
):
if not 4 <= len(key) <= 56:
raise ValueError("key is not between 4 and 56 bytes")
if not len(P_array) or len(P_array) % 2 != 0:
raise ValueError("P array is not an even length sequence")
if len(S_boxes) != 4 or any(len(box) != 256 for box in S_boxes):
raise ValueError("S-boxes is not a 4 x 256 sequence")
if byte_order == "big":
byte_order_fmt = ">"
elif byte_order == "little":
byte_order_fmt = "<"
else:
raise ValueError("byte order must either be 'big' or 'little'")
self.byte_order = byte_order
# Create structs
u4_2_struct = Struct("{}2I".format(byte_order_fmt))
u4_1_struct = Struct(">I".format(byte_order_fmt))
u8_1_struct = Struct("{}Q".format(byte_order_fmt))
u1_4_struct = Struct("=4B")
# Save refs locally to the needed pack/unpack funcs of the structs to speed
# up look-ups a little.
self._u4_2_pack = u4_2_struct.pack
self._u4_2_unpack = u4_2_struct.unpack
self._u4_2_iter_unpack = u4_2_struct.iter_unpack
self._u4_1_pack = u4_1_pack = u4_1_struct.pack
self._u1_4_unpack = u1_4_unpack = u1_4_struct.unpack
self._u8_1_pack = u8_1_struct.pack
# Cyclic key iterator
cyclic_key_iter = iter_cycle(iter(key))
# Cyclic 32-bit integer iterator over key bytes
cyclic_key_u4_iter = (
x for (x,) in map(
u4_1_struct.unpack,
map(
bytes,
zip(
cyclic_key_iter,
cyclic_key_iter,
cyclic_key_iter,
cyclic_key_iter
)
)
)
)
# Create and initialize subkey P array and S-boxes
# XOR each element in P_array with key and save as pairs.
P = [
(p1 ^ k1, p2 ^ k2) for p1, p2, k1, k2 in zip(
P_array[0::2],
P_array[1::2],
cyclic_key_u4_iter,
cyclic_key_u4_iter
)
]
S1, S2, S3, S4 = S = [[x for x in box] for box in S_boxes]
encrypt = self._encrypt
L = 0x00000000
R = 0x00000000
for i in range(len(P)):
P[i] = L, R = encrypt(L, R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
# Save P as a tuple since working with tuples is slightly faster
self.P = P = tuple(P)
for box in S:
for i in range(0, 256, 2):
L, R = encrypt(L, R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
box[i] = L
box[i + 1] = R
# Save S
self.S = tuple(tuple(box) for box in S)
@staticmethod
def _encrypt(L, R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack):
for p1, p2 in P[:-1]:
L ^= p1
a, b, c, d = u1_4_unpack(u4_1_pack(L))
R ^= (S1[a] + S2[b] ^ S3[c]) + S4[d] & 0xffffffff
R ^= p2
a, b, c, d = u1_4_unpack(u4_1_pack(R))
L ^= (S1[a] + S2[b] ^ S3[c]) + S4[d] & 0xffffffff
p_penultimate, p_last = P[-1]
return R ^ p_last, L ^ p_penultimate
@staticmethod
def _decrypt(L, R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack):
for p2, p1 in P[:0:-1]:
L ^= p1
a, b, c, d = u1_4_unpack(u4_1_pack(L))
R ^= (S1[a] + S2[b] ^ S3[c]) + S4[d] & 0xffffffff
R ^= p2
a, b, c, d = u1_4_unpack(u4_1_pack(R))
L ^= (S1[a] + S2[b] ^ S3[c]) + S4[d] & 0xffffffff
p_first, p_second = P[0]
return R ^ p_first, L ^ p_second
def encrypt_block(self, block):
"""
Return a :obj:`bytes` object containing the encrypted bytes of a `block`.
`block` should be a :obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
S0, S1, S2, S3 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
try:
L, R = self._u4_2_unpack(block)
except struct_error:
raise ValueError("block is not 8 bytes in length")
for p1, p2 in P[:-1]:
L ^= p1
a, b, c, d = u1_4_unpack(u4_1_pack(L))
R ^= (S0[a] + S1[b] ^ S2[c]) + S3[d] & 0xffffffff
R ^= p2
a, b, c, d = u1_4_unpack(u4_1_pack(R))
L ^= (S0[a] + S1[b] ^ S2[c]) + S3[d] & 0xffffffff
p_penultimate, p_last = P[-1]
return self._u4_2_pack(R ^ p_last, L ^ p_penultimate)
def decrypt_block(self, block):
"""
Return a :obj:`bytes` object containing the decrypted bytes of a `block`.
`block` should be a :obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
S0, S1, S2, S3 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
try:
L, R = self._u4_2_unpack(block)
except struct_error:
raise ValueError("block is not 8 bytes in length")
for p2, p1 in P[:0:-1]:
L ^= p1
a, b, c, d = u1_4_unpack(u4_1_pack(L))
R ^= (S0[a] + S1[b] ^ S2[c]) + S3[d] & 0xffffffff
R ^= p2
a, b, c, d = u1_4_unpack(u4_1_pack(R))
L ^= (S0[a] + S1[b] ^ S2[c]) + S3[d] & 0xffffffff
p_first, p_second = P[0]
return self._u4_2_pack(R ^ p_first, L ^ p_second)
def encrypt_ecb(self, data):
"""
Return an iterator that encrypts `data` using the Electronic Codebook (ECB)
mode of operation.
ECB mode can only operate on `data` that is a multiple of the block-size
in length.
Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes)
containing the encrypted bytes of the corresponding block in `data`.
`data` should be a :obj:`bytes`-like object that is a multiple of the
block-size in length (i.e. 8, 16, 32, etc.).
If it is not, a :exc:`ValueError` exception is raised.
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
encrypt = self._encrypt
u4_2_pack = self._u4_2_pack
try:
LR_iter = self._u4_2_iter_unpack(data)
except struct_error:
raise ValueError("data is not a multiple of the block-size in length")
for plain_L, plain_R in LR_iter:
yield u4_2_pack(
*encrypt(plain_L, plain_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
def decrypt_ecb(self, data):
"""
Return an iterator that decrypts `data` using the Electronic Codebook (ECB)
mode of operation.
ECB mode can only operate on `data` that is a multiple of the block-size
in length.
Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes)
containing the decrypted bytes of the corresponding block in `data`.
`data` should be a :obj:`bytes`-like object that is a multiple of the
block-size in length (i.e. 8, 16, 32, etc.).
If it is not, a :exc:`ValueError` exception is raised.
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
decrypt = self._decrypt
u4_2_pack = self._u4_2_pack
try:
LR_iter = self._u4_2_iter_unpack(data)
except struct_error:
raise ValueError("data is not a multiple of the block-size in length")
for cipher_L, cipher_R in LR_iter:
yield u4_2_pack(
*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
def encrypt_ecb_cts(self, data):
"""
Return an iterator that encrypts `data` using the Electronic Codebook with
Ciphertext Stealing (ECB-CTS) mode of operation.
ECB-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
encrypt = self._encrypt
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
plain_L, plain_R = u4_2_unpack(data[0:8])
cipher_block = u4_2_pack(
*encrypt(plain_L, plain_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
for plain_L, plain_R in self._u4_2_iter_unpack(data[8:last_block_stop_i]):
yield cipher_block
cipher_block = u4_2_pack(
*encrypt(plain_L, plain_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
plain_L, plain_R = u4_2_unpack(
data[last_block_stop_i:] + cipher_block[extra_bytes:]
)
yield u4_2_pack(
*encrypt(plain_L, plain_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
yield cipher_block[:extra_bytes]
def decrypt_ecb_cts(self, data):
"""
Return an iterator that decrypts `data` using the Electronic Codebook with
Ciphertext Stealing (ECB-CTS) mode of operation.
ECB-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
decrypt = self._decrypt
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
cipher_L, cipher_R = u4_2_unpack(data[0:8])
plain_block = u4_2_pack(
*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
for cipher_L, cipher_R in self._u4_2_iter_unpack(data[8:last_block_stop_i]):
yield plain_block
plain_block = u4_2_pack(
*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
cipher_L, cipher_R = u4_2_unpack(
data[last_block_stop_i:] + plain_block[extra_bytes:]
)
yield u4_2_pack(
*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
)
yield plain_block[:extra_bytes]
def encrypt_cbc(self, data, init_vector):
"""
Return an iterator that encrypts `data` using the Cipher-Block Chaining
(CBC) mode of operation.
CBC mode can only operate on `data` that is a multiple of the block-size
in length.
Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes)
containing the encrypted bytes of the corresponding block in `data`.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
`data` should be a :obj:`bytes`-like object that is a multiple of the
block-size in length (i.e. 8, 16, 32, etc.).
If it is not, a :exc:`ValueError` exception is raised.
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
encrypt = self._encrypt
u4_2_pack = self._u4_2_pack
try:
prev_cipher_L, prev_cipher_R = self._u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
try:
LR_iter = self._u4_2_iter_unpack(data)
except struct_error:
raise ValueError("data is not a multiple of the block-size in length")
for plain_L, plain_R in LR_iter:
prev_cipher_L, prev_cipher_R = encrypt(
prev_cipher_L ^ plain_L,
prev_cipher_R ^ plain_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(prev_cipher_L, prev_cipher_R)
def decrypt_cbc(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Cipher-Block Chaining
(CBC) mode of operation.
CBC mode can only operate on `data` that is a multiple of the block-size
in length.
Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes)
containing the decrypted bytes of the corresponding block in `data`.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
`data` should be a :obj:`bytes`-like object that is a multiple of the
block-size in length (i.e. 8, 16, 32, etc.).
If it is not, a :exc:`ValueError` exception is raised.
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
decrypt = self._decrypt
u4_2_pack = self._u4_2_pack
try:
prev_cipher_L, prev_cipher_R = self._u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
try:
LR_iter = self._u4_2_iter_unpack(data)
except struct_error:
raise ValueError("data is not a multiple of the block-size in length")
for cipher_L, cipher_R in LR_iter:
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(prev_cipher_L ^ L, prev_cipher_R ^ R)
prev_cipher_L = cipher_L
prev_cipher_R = cipher_R
def encrypt_cbc_cts(self, data, init_vector):
"""
Return an iterator that encrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
encrypt = self._encrypt
try:
prev_cipher_L, prev_cipher_R = u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
plain_L, plain_R = u4_2_unpack(data[0:8])
prev_cipher_L, prev_cipher_R = encrypt(
plain_L ^ prev_cipher_L,
plain_R ^ prev_cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
cipher_block = u4_2_pack(prev_cipher_L, prev_cipher_R)
for plain_L, plain_R in self._u4_2_iter_unpack(data[8:last_block_stop_i]):
yield cipher_block
prev_cipher_L, prev_cipher_R = encrypt(
plain_L ^ prev_cipher_L,
plain_R ^ prev_cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
cipher_block = u4_2_pack(prev_cipher_L, prev_cipher_R)
P_L, P_R = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes))
yield u4_2_pack(
*encrypt(
prev_cipher_L ^ P_L,
prev_cipher_R ^ P_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
)
yield cipher_block[:extra_bytes]
def decrypt_cbc_cts(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
decrypt = self._decrypt
try:
prev_cipher_L, prev_cipher_R = u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
last_block_start_i = last_block_stop_i - 8
for cipher_L, cipher_R in self._u4_2_iter_unpack(
data[0:last_block_start_i]
):
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
prev_cipher_L = cipher_L
prev_cipher_R = cipher_R
cipher_L, cipher_R = u4_2_unpack(data[last_block_start_i:last_block_stop_i])
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
C_L, C_R = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes))
Xn = u4_2_pack(L ^ C_L, R ^ C_R)
E_L, E_R = u4_2_unpack(data[last_block_stop_i:] + Xn[extra_bytes:])
L, R = decrypt(
E_L, E_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
yield Xn[:extra_bytes]
def encrypt_pcbc(self, data, init_vector):
"""
Return an iterator that encrypts `data` using the Propagating Cipher-Block
Chaining (PCBC) mode of operation.
PCBC mode can only operate on `data` that is a multiple of the block-size
in length.
Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes)
containing the encrypted bytes of the corresponding block in `data`.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
`data` should be a :obj:`bytes`-like object that is a multiple of the
block-size in length (i.e. 8, 16, 32, etc.).
If it is not, a :exc:`ValueError` exception is raised.
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
encrypt = self._encrypt
u4_2_pack = self._u4_2_pack
try:
init_L, init_R = self._u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
try:
LR_iter = self._u4_2_iter_unpack(data)
except struct_error:
raise ValueError("data is not a multiple of the block-size in length")
for plain_L, plain_R in LR_iter:
cipher_L, cipher_R = encrypt(
init_L ^ plain_L, init_R ^ plain_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(cipher_L, cipher_R)
init_L = plain_L ^ cipher_L
init_R = plain_R ^ cipher_R
def decrypt_pcbc(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Propagating Cipher-Block
Chaining (PCBC) mode of operation.
PCBC mode can only operate on `data` that is a multiple of the block-size
in length.
Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes)
containing the decrypted bytes of the corresponding block in `data`.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
`data` should be a :obj:`bytes`-like object that is a multiple of the
block-size in length (i.e. 8, 16, 32, etc.).
If it is not, a :exc:`ValueError` exception is raised.
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
decrypt = self._decrypt
u4_2_pack = self._u4_2_pack
try:
init_L, init_R = self._u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
try:
LR_iter = self._u4_2_iter_unpack(data)
except struct_error:
raise ValueError("data is not a multiple of the block-size in length")
for cipher_L, cipher_R in LR_iter:
plain_L, plain_R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
plain_L ^= init_L
plain_R ^= init_R
yield u4_2_pack(plain_L, plain_R)
init_L = cipher_L ^ plain_L
init_R = cipher_R ^ plain_R
def encrypt_cfb(self, data, init_vector):
"""
Return an iterator that encrypts `data` using the Cipher Feedback (CFB)
mode of operation.
CFB mode can operate on `data` of any length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
`data` should be a :obj:`bytes`-like object (of any length).
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
encrypt = self._encrypt
u4_2_pack = self._u4_2_pack
data_len = len(data)
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
try:
prev_cipher_L, prev_cipher_R = self._u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
for plain_L, plain_R in self._u4_2_iter_unpack(
data[0:last_block_stop_i]
):
prev_cipher_L, prev_cipher_R = encrypt(
prev_cipher_L, prev_cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
prev_cipher_L ^= plain_L
prev_cipher_R ^= plain_R
yield u4_2_pack(prev_cipher_L, prev_cipher_R)
if extra_bytes:
yield bytes(
b ^ n for b, n in zip(
data[last_block_stop_i:],
u4_2_pack(
*encrypt(
prev_cipher_L, prev_cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
)
)
)
def decrypt_cfb(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Cipher Feedback (CFB)
mode of operation.
CFB mode can operate on `data` of any length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
`data` should be a :obj:`bytes`-like object (of any length).
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
encrypt = self._encrypt
u4_2_pack = self._u4_2_pack
data_len = len(data)
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
try:
prev_cipher_L, prev_cipher_R = self._u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
for cipher_L, cipher_R in self._u4_2_iter_unpack(
data[0:last_block_stop_i]
):
prev_cipher_L, prev_cipher_R = encrypt(
prev_cipher_L, prev_cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(prev_cipher_L ^ cipher_L, prev_cipher_R ^ cipher_R)
prev_cipher_L = cipher_L
prev_cipher_R = cipher_R
if extra_bytes:
yield bytes(
b ^ n for b, n in zip(
data[last_block_stop_i:],
u4_2_pack(
*encrypt(
prev_cipher_L, prev_cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
)
)
)
def encrypt_ofb(self, data, init_vector):
"""
Return an iterator that encrypts `data` using the Output Feedback (OFB)
mode of operation.
OFB mode can operate on `data` of any length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
`data` should be a :obj:`bytes`-like object (of any length).
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
encrypt = self._encrypt
u4_2_pack = self._u4_2_pack
data_len = len(data)
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
try:
prev_L, prev_R = self._u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
for plain_L, plain_R in self._u4_2_iter_unpack(
data[0:last_block_stop_i]
):
prev_L, prev_R = encrypt(
prev_L, prev_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(plain_L ^ prev_L, plain_R ^ prev_R)
if extra_bytes:
yield bytes(
b ^ n for b, n in zip(
data[last_block_stop_i:],
u4_2_pack(
*encrypt(
prev_L, prev_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
)
)
)
def decrypt_ofb(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Output Feedback (OFB)
mode of operation.
.. note::
In OFB mode, decrypting is the same as encrypting.
Therefore, calling this function is the same as calling
:meth:`encrypt_ofb`.
.. seealso::
:meth:`encrypt_ofb`
"""
return self.encrypt_ofb(data, init_vector)
def encrypt_ctr(self, data, counter):
"""
Return an iterator that encrypts `data` using the Counter (CTR) mode of
operation.
CTR mode can operate on `data` of any length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`counter` should be an iterable sequence of 64-bit integers which are
guaranteed not to repeat for a long time.
If any integer in the sequence is not less than 2^64, a :exc:`ValueError`
exception is raised.
``len(counter)`` should be at least as much as ``ceil(len(data)/8)``,
otherwise the returned iterator will only encrypt `data` partially,
stopping when `counter` is exhausted.
A good default is implemented by :func:`blowfish.ctr_counter`.
`data` should be a :obj:`bytes`-like object (of any length).
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
encrypt = self._encrypt
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
u8_1_pack = self._u8_1_pack
data_len = len(data)
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
for (plain_L, plain_R), counter_n in zip(
self._u4_2_iter_unpack(data[0:last_block_stop_i]),
counter
):
try:
counter_L, counter_R = u4_2_unpack(u8_1_pack(counter_n))
except struct_error:
raise ValueError("integer in counter is not less than 2^64")
counter_L, counter_R = encrypt(
counter_L, counter_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(plain_L ^ counter_L, plain_R ^ counter_R)
if extra_bytes:
try:
counter_L, counter_R = u4_2_unpack(u8_1_pack(next(counter)))
except struct_error:
raise ValueError("integer in counter is not less than 2^64")
counter_L, counter_R = encrypt(
counter_L, counter_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield bytes(
b ^ n for b, n in zip(
data[last_block_stop_i:],
u4_2_pack(counter_L, counter_R)
)
)
def decrypt_ctr(self, data, counter):
"""
Return an iterator that decrypts `data` using the Counter (CTR) mode of
operation.
.. note::
In CTR mode, decrypting is the same as encrypting.
Therefore, calling this function is the same as calling
:meth:`encrypt_ctr`.
.. seealso::
:meth:`encrypt_ctr`
"""
return self.encrypt_ctr(data, counter)
def ctr_counter(nonce, f, start = 0):
"""
Return an infinite iterator that starts at `start` and iterates by 1 over
integers between 0 and 2^64 - 1 cyclically, returning on each iteration the
result of combining each number with `nonce` using function `f`.
`nonce` should be an random 64-bit integer that is used to make the counter
unique.
`f` should be a function that takes two 64-bit integers, the first being the
`nonce`, and combines the two in a lossless manner (i.e. xor, addition, etc.)
The returned value should be a 64-bit integer.
`start` should be a number less than 2^64.
"""
for n in range(start, 2**64):
yield f(nonce, n)
while True:
for n in range(0, 2**64):
yield f(nonce, n)
|
gpl-3.0
|
mkennedy04/knodj
|
env/Lib/site-packages/django/db/migrations/topological_sort.py
|
538
|
1129
|
def topological_sort_as_sets(dependency_graph):
"""Variation of Kahn's algorithm (1962) that returns sets.
Takes a dependency graph as a dictionary of node => dependencies.
Yields sets of items in topological order, where the first set contains
all nodes without dependencies, and each following set contains all
nodes that depend on the nodes in the previously yielded sets.
"""
todo = dependency_graph.copy()
while todo:
current = {node for node, deps in todo.items() if len(deps) == 0}
if not current:
raise ValueError('Cyclic dependency in graph: {}'.format(
', '.join(repr(x) for x in todo.items())))
yield current
# remove current from todo's nodes & dependencies
todo = {node: (dependencies - current) for node, dependencies in
todo.items() if node not in current}
def stable_topological_sort(l, dependency_graph):
result = []
for layer in topological_sort_as_sets(dependency_graph):
for node in l:
if node in layer:
result.append(node)
return result
|
mit
|
bkirui/odoo
|
addons/lunch/wizard/lunch_validation.py
|
440
|
1296
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_validation(osv.Model):
""" lunch validation """
_name = 'lunch.validation'
_description = 'lunch validation for order'
def confirm(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').confirm(cr, uid, ids, context=context)
|
agpl-3.0
|
giserh/SFrame
|
oss_src/unity/python/sframe/meta/asttools/mutators/remove_trivial.py
|
15
|
4065
|
'''
Created on Aug 3, 2011
@author: sean
'''
from __future__ import print_function
import _ast
import ast
from ...asttools.visitors.graph_visitor import GraphGen
from ...asttools import Visitor, dont_visit, visit_children
from ...asttools.mutators.replace_mutator import replace_nodes
from ...asttools.visitors.symbol_visitor import get_symbols
from ...asttools.visitors.cond_symbol_visitor import conditional_lhs
class Assignment(object):
def __init__(self, root, assignments):
self.root = root
self.assignments = assignments
def visit_conditional(self, node):
conditional, stable = conditional_lhs(node)
if not stable:
return
bgather = GatherAssignments()
for stmnt in node.body: bgather.visit(stmnt)
egather = GatherAssignments()
for stmnt in node.orelse: egather.visit(stmnt)
for symbol in stable:
node_list = self.assign_id_map.setdefault(symbol, [])
assignments = []
for asgn_list in bgather.assign_id_map[symbol]:
assignments.extend(asgn_list.assignments)
for asgn_list in egather.assign_id_map[symbol]:
assignments.extend(asgn_list.assignments)
node_list.append(Assignment(root=node, assignments=assignments))
class GatherAssignments(Visitor):
'''
Collect ast nodes that assign to the same variable.
'''
def __init__(self):
self.assign_id_map = {}
visitTryExcept = dont_visit
visitDefault = visit_children
visitIf = visit_conditional
visitFor = visit_conditional
visitWhile = visit_conditional
def visitAssign(self, node):
target_ids = [get_symbols(target, ast.Store) for target in node.targets]
target_ids = set.union(*target_ids)
for id in target_ids:
node_list = self.assign_id_map.setdefault(id, [])
node_list.append(Assignment(root=node, assignments=(node,)))
def remove_trivial(root):
'''
Remove redundant statements.
The statement `a = 1` will be removed::
a = 1
a = 2
The statement `a = 1` will not be removed because `b` depends on it::
a = 1
b = a + 2
a = 2
:param root: ast node
'''
gen = GatherAssignments()
gen.visit(root)
to_remove = []
for symbol, assignments in gen.assign_id_map.items():
if len(assignments) < 2:
continue
for j in range(len(assignments) - 1):
i1 = root.body.index(assignments[j].root)
i2 = root.body.index(assignments[j + 1].root)
body = root.body[i1 + 1:i2]
grapher = GraphGen()
for stmnt in body:
grapher.visit(stmnt)
if symbol not in grapher.used:
to_remove.extend(assignments[j].assignments)
Pass = lambda node: _ast.Pass(lineno=node.lineno, col_offset=node.col_offset)
for old in to_remove:
replace_nodes(root, old, Pass(old))
def remove_unused_assign(root, symbol):
'''
Remove redundant statements.
The statement `a = 1` will be removed::
a = 1
a = 2
The statement `a = 1` will not be removed because `b` depends on it::
a = 1
b = a + 2
a = 2
:param root: ast node
'''
gen = GatherAssignments()
gen.visit(root)
to_remove = []
if symbol not in gen.assign_id_map:
return
assignments = gen.assign_id_map[symbol]
if len(assignments) < 2:
return
for j in range(len(assignments) - 1):
i1 = root.body.index(assignments[j].root)
i2 = root.body.index(assignments[j + 1].root)
body = root.body[i1 + 1:i2]
grapher = GraphGen()
for stmnt in body:
grapher.visit(stmnt)
if symbol not in grapher.used:
to_remove.extend(assignments[j].assignments)
Pass = lambda node: _ast.Pass(lineno=node.lineno, col_offset=node.col_offset)
for old in to_remove:
replace_nodes(root, old, Pass(old))
|
bsd-3-clause
|
ahkscript/sjBot
|
commands/##monsterhunter/help.py
|
1
|
1542
|
#!/usr/bin/env python3
import inspect
owner = False
aliases = ['helpmeplz']
def help(con, sjBot, commands, trigger, host, channel, command=None):
"""Shows information about commands."""
if command is None:
output = []
output.append('Here is a list of commands: {}.'.format(', '.join(
sorted(commands))))
output.append('Use \x1F{}help command\x1F for more '
'info'.format(trigger))
else:
if command in commands:
func = getattr(commands[command], command)
docs = inspect.getdoc(func)
if docs is None:
docs = 'There are no docs for this commands.'
else:
docs = docs.replace('\n', ' ')
output = [docs]
params = inspect.signature(func).parameters
param_info = 'Usage: {}{}'.format(trigger, command)
for i, p in enumerate(params):
if i < 6:
continue
if (params[p].default == params[p].empty and
params[p].kind != params[p].VAR_POSITIONAL):
param_info += ' \x02<{}>\x02'.format(p)
elif params[p].kind == params[p].VAR_POSITIONAL:
param_info += ' \x1D*{}\x1D'.format(p)
else:
param_info += ' \x1F[{}]\x1F'.format(p)
output.append(param_info)
user = host.split('!')[0][1:]
for line in output:
con.privmsg(user, line)
return None
|
gpl-3.0
|
mccheung/kbengine
|
kbe/res/scripts/common/Lib/ctypes/test/test_keeprefs.py
|
105
|
4058
|
from ctypes import *
import unittest
class SimpleTestCase(unittest.TestCase):
def test_cint(self):
x = c_int()
self.assertEqual(x._objects, None)
x.value = 42
self.assertEqual(x._objects, None)
x = c_int(99)
self.assertEqual(x._objects, None)
def test_ccharp(self):
x = c_char_p()
self.assertEqual(x._objects, None)
x.value = b"abc"
self.assertEqual(x._objects, b"abc")
x = c_char_p(b"spam")
self.assertEqual(x._objects, b"spam")
class StructureTestCase(unittest.TestCase):
def test_cint_struct(self):
class X(Structure):
_fields_ = [("a", c_int),
("b", c_int)]
x = X()
self.assertEqual(x._objects, None)
x.a = 42
x.b = 99
self.assertEqual(x._objects, None)
def test_ccharp_struct(self):
class X(Structure):
_fields_ = [("a", c_char_p),
("b", c_char_p)]
x = X()
self.assertEqual(x._objects, None)
x.a = b"spam"
x.b = b"foo"
self.assertEqual(x._objects, {"0": b"spam", "1": b"foo"})
def test_struct_struct(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class RECT(Structure):
_fields_ = [("ul", POINT), ("lr", POINT)]
r = RECT()
r.ul.x = 0
r.ul.y = 1
r.lr.x = 2
r.lr.y = 3
self.assertEqual(r._objects, None)
r = RECT()
pt = POINT(1, 2)
r.ul = pt
self.assertEqual(r._objects, {'0': {}})
r.ul.x = 22
r.ul.y = 44
self.assertEqual(r._objects, {'0': {}})
r.lr = POINT()
self.assertEqual(r._objects, {'0': {}, '1': {}})
class ArrayTestCase(unittest.TestCase):
def test_cint_array(self):
INTARR = c_int * 3
ia = INTARR()
self.assertEqual(ia._objects, None)
ia[0] = 1
ia[1] = 2
ia[2] = 3
self.assertEqual(ia._objects, None)
class X(Structure):
_fields_ = [("x", c_int),
("a", INTARR)]
x = X()
x.x = 1000
x.a[0] = 42
x.a[1] = 96
self.assertEqual(x._objects, None)
x.a = ia
self.assertEqual(x._objects, {'1': {}})
class PointerTestCase(unittest.TestCase):
def test_p_cint(self):
i = c_int(42)
x = pointer(i)
self.assertEqual(x._objects, {'1': i})
class DeletePointerTestCase(unittest.TestCase):
@unittest.skip('test disabled')
def test_X(self):
class X(Structure):
_fields_ = [("p", POINTER(c_char_p))]
x = X()
i = c_char_p("abc def")
from sys import getrefcount as grc
print("2?", grc(i))
x.p = pointer(i)
print("3?", grc(i))
for i in range(320):
c_int(99)
x.p[0]
print(x.p[0])
## del x
## print "2?", grc(i)
## del i
import gc
gc.collect()
for i in range(320):
c_int(99)
x.p[0]
print(x.p[0])
print(x.p.contents)
## print x._objects
x.p[0] = "spam spam"
## print x.p[0]
print("+" * 42)
print(x._objects)
class PointerToStructure(unittest.TestCase):
def test(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class RECT(Structure):
_fields_ = [("a", POINTER(POINT)),
("b", POINTER(POINT))]
r = RECT()
p1 = POINT(1, 2)
r.a = pointer(p1)
r.b = pointer(p1)
## from pprint import pprint as pp
## pp(p1._objects)
## pp(r._objects)
r.a[0].x = 42
r.a[0].y = 99
# to avoid leaking when tests are run several times
# clean up the types left in the cache.
from ctypes import _pointer_type_cache
del _pointer_type_cache[POINT]
if __name__ == "__main__":
unittest.main()
|
lgpl-3.0
|
kangkot/arangodb
|
3rdParty/V8-4.3.61/build/gyp/test/errors/gyptest-errors.py
|
117
|
1722
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that two targets with the same name generates an error.
"""
import TestGyp
import TestCmd
# TODO(sbc): Remove the use of match_re below, done because scons
# error messages were not consistent with other generators.
# Also remove input.py:generator_wants_absolute_build_file_paths.
test = TestGyp.TestGyp()
stderr = ('gyp: Duplicate target definitions for '
'.*duplicate_targets.gyp:foo#target\n')
test.run_gyp('duplicate_targets.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
stderr = ('.*: Unable to find targets in build file .*missing_targets.gyp.*')
test.run_gyp('missing_targets.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = ('gyp: rule bar exists in duplicate, target '
'.*duplicate_rule.gyp:foo#target\n')
test.run_gyp('duplicate_rule.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
stderr = ("gyp: Key 'targets' repeated at level 1 with key path '' while "
"reading .*duplicate_node.gyp.*")
test.run_gyp('duplicate_node.gyp', '--check', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
test.run_gyp('duplicate_basenames.gyp', status=1, stderr=stderr)
stderr = ("gyp: Dependency '.*missing_dep.gyp:missing.gyp#target' not found "
"while trying to load target .*missing_dep.gyp:foo#target\n")
test.run_gyp('missing_dep.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
test.pass_test()
|
apache-2.0
|
cbenhagen/buildozer
|
buildozer/target.py
|
5
|
3157
|
from sys import exit
def no_config(f):
f.__no_config = True
return f
class Target(object):
def __init__(self, buildozer):
super(Target, self).__init__()
self.buildozer = buildozer
self.build_mode = 'debug'
self.platform_update = False
def check_requirements(self):
pass
def check_configuration_tokens(self, errors=None):
if errors:
self.buildozer.info('Check target configuration tokens')
self.buildozer.error(
'{0} error(s) found in the buildozer.spec'.format(
len(errors)))
for error in errors:
print(error)
exit(1)
def compile_platform(self):
pass
def install_platform(self):
pass
def get_custom_commands(self):
result = []
for x in dir(self):
if not x.startswith('cmd_'):
continue
if x[4:] in self.buildozer.standard_cmds:
continue
result.append((x[4:], getattr(self, x).__doc__))
return result
def get_available_packages(self):
return ['kivy']
def run_commands(self, args):
if not args:
self.buildozer.error('Missing target command')
self.buildozer.usage()
exit(1)
result = []
last_command = []
for arg in args:
if not arg.startswith('--'):
if last_command:
result.append(last_command)
last_command = []
last_command.append(arg)
else:
if not last_command:
self.buildozer.error('Argument passed without a command')
self.buildozer.usage()
exit(1)
last_command.append(arg)
if last_command:
result.append(last_command)
config_check = False
for item in result:
command, args = item[0], item[1:]
if not hasattr(self, 'cmd_{0}'.format(command)):
self.buildozer.error('Unknown command {0}'.format(command))
exit(1)
func = getattr(self, 'cmd_{0}'.format(command))
need_config_check = not hasattr(func, '__no_config')
if need_config_check and not config_check:
config_check = True
self.check_configuration_tokens()
func(args)
def cmd_clean(self, *args):
self.buildozer.clean_platform()
def cmd_update(self, *args):
self.platform_update = True
self.buildozer.prepare_for_build()
def cmd_debug(self, *args):
self.buildozer.prepare_for_build()
self.build_mode = 'debug'
self.buildozer.build()
def cmd_release(self, *args):
self.buildozer.prepare_for_build()
self.build_mode = 'release'
self.buildozer.build()
def cmd_deploy(self, *args):
self.buildozer.prepare_for_build()
def cmd_run(self, *args):
self.buildozer.prepare_for_build()
def cmd_serve(self, *args):
self.buildozer.cmd_serve()
|
mit
|
benschmaus/catapult
|
third_party/gsutil/third_party/rsa/rsa/randnum.py
|
194
|
2414
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions for generating random numbers.'''
# Source inspired by code by Yesudeep Mangalapilly <[email protected]>
import os
from rsa import common, transform
from rsa._compat import byte
def read_random_bits(nbits):
'''Reads 'nbits' random bits.
If nbits isn't a whole number of bytes, an extra byte will be appended with
only the lower bits set.
'''
nbytes, rbits = divmod(nbits, 8)
# Get the random bytes
randomdata = os.urandom(nbytes)
# Add the remaining random bits
if rbits > 0:
randomvalue = ord(os.urandom(1))
randomvalue >>= (8 - rbits)
randomdata = byte(randomvalue) + randomdata
return randomdata
def read_random_int(nbits):
'''Reads a random integer of approximately nbits bits.
'''
randomdata = read_random_bits(nbits)
value = transform.bytes2int(randomdata)
# Ensure that the number is large enough to just fill out the required
# number of bits.
value |= 1 << (nbits - 1)
return value
def randint(maxvalue):
'''Returns a random integer x with 1 <= x <= maxvalue
May take a very long time in specific situations. If maxvalue needs N bits
to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
is.
'''
bit_size = common.bit_size(maxvalue)
tries = 0
while True:
value = read_random_int(bit_size)
if value <= maxvalue:
break
if tries and tries % 10 == 0:
# After a lot of tries to get the right number of bits but still
# smaller than maxvalue, decrease the number of bits by 1. That'll
# dramatically increase the chances to get a large enough number.
bit_size -= 1
tries += 1
return value
|
bsd-3-clause
|
zhanqxun/cv_fish
|
win32/lib/winnt.py
|
9
|
40681
|
# Generated by h2py from \mssdk\include\winnt.h
APPLICATION_ERROR_MASK = 536870912
ERROR_SEVERITY_SUCCESS = 0
ERROR_SEVERITY_INFORMATIONAL = 1073741824
ERROR_SEVERITY_WARNING = -2147483648
ERROR_SEVERITY_ERROR = -1073741824
MINCHAR = 128
MAXCHAR = 127
MINSHORT = 32768
MAXSHORT = 32767
MINLONG = -2147483648
MAXLONG = 2147483647
MAXBYTE = 255
MAXWORD = 65535
MAXDWORD = -1
LANG_NEUTRAL = 0
LANG_AFRIKAANS = 54
LANG_ALBANIAN = 28
LANG_ARABIC = 1
LANG_BASQUE = 45
LANG_BELARUSIAN = 35
LANG_BULGARIAN = 2
LANG_CATALAN = 3
LANG_CHINESE = 4
LANG_CROATIAN = 26
LANG_CZECH = 5
LANG_DANISH = 6
LANG_DUTCH = 19
LANG_ENGLISH = 9
LANG_ESTONIAN = 37
LANG_FAEROESE = 56
LANG_FARSI = 41
LANG_FINNISH = 11
LANG_FRENCH = 12
LANG_GERMAN = 7
LANG_GREEK = 8
LANG_HEBREW = 13
LANG_HINDI = 57
LANG_HUNGARIAN = 14
LANG_ICELANDIC = 15
LANG_INDONESIAN = 33
LANG_ITALIAN = 16
LANG_JAPANESE = 17
LANG_KOREAN = 18
LANG_LATVIAN = 38
LANG_LITHUANIAN = 39
LANG_MACEDONIAN = 47
LANG_MALAY = 62
LANG_NORWEGIAN = 20
LANG_POLISH = 21
LANG_PORTUGUESE = 22
LANG_ROMANIAN = 24
LANG_RUSSIAN = 25
LANG_SERBIAN = 26
LANG_SLOVAK = 27
LANG_SLOVENIAN = 36
LANG_SPANISH = 10
LANG_SWAHILI = 65
LANG_SWEDISH = 29
LANG_THAI = 30
LANG_TURKISH = 31
LANG_UKRAINIAN = 34
LANG_VIETNAMESE = 42
SUBLANG_NEUTRAL = 0
SUBLANG_DEFAULT = 1
SUBLANG_SYS_DEFAULT = 2
SUBLANG_ARABIC_SAUDI_ARABIA = 1
SUBLANG_ARABIC_IRAQ = 2
SUBLANG_ARABIC_EGYPT = 3
SUBLANG_ARABIC_LIBYA = 4
SUBLANG_ARABIC_ALGERIA = 5
SUBLANG_ARABIC_MOROCCO = 6
SUBLANG_ARABIC_TUNISIA = 7
SUBLANG_ARABIC_OMAN = 8
SUBLANG_ARABIC_YEMEN = 9
SUBLANG_ARABIC_SYRIA = 10
SUBLANG_ARABIC_JORDAN = 11
SUBLANG_ARABIC_LEBANON = 12
SUBLANG_ARABIC_KUWAIT = 13
SUBLANG_ARABIC_UAE = 14
SUBLANG_ARABIC_BAHRAIN = 15
SUBLANG_ARABIC_QATAR = 16
SUBLANG_CHINESE_TRADITIONAL = 1
SUBLANG_CHINESE_SIMPLIFIED = 2
SUBLANG_CHINESE_HONGKONG = 3
SUBLANG_CHINESE_SINGAPORE = 4
SUBLANG_CHINESE_MACAU = 5
SUBLANG_DUTCH = 1
SUBLANG_DUTCH_BELGIAN = 2
SUBLANG_ENGLISH_US = 1
SUBLANG_ENGLISH_UK = 2
SUBLANG_ENGLISH_AUS = 3
SUBLANG_ENGLISH_CAN = 4
SUBLANG_ENGLISH_NZ = 5
SUBLANG_ENGLISH_EIRE = 6
SUBLANG_ENGLISH_SOUTH_AFRICA = 7
SUBLANG_ENGLISH_JAMAICA = 8
SUBLANG_ENGLISH_CARIBBEAN = 9
SUBLANG_ENGLISH_BELIZE = 10
SUBLANG_ENGLISH_TRINIDAD = 11
SUBLANG_ENGLISH_ZIMBABWE = 12
SUBLANG_ENGLISH_PHILIPPINES = 13
SUBLANG_FRENCH = 1
SUBLANG_FRENCH_BELGIAN = 2
SUBLANG_FRENCH_CANADIAN = 3
SUBLANG_FRENCH_SWISS = 4
SUBLANG_FRENCH_LUXEMBOURG = 5
SUBLANG_FRENCH_MONACO = 6
SUBLANG_GERMAN = 1
SUBLANG_GERMAN_SWISS = 2
SUBLANG_GERMAN_AUSTRIAN = 3
SUBLANG_GERMAN_LUXEMBOURG = 4
SUBLANG_GERMAN_LIECHTENSTEIN = 5
SUBLANG_ITALIAN = 1
SUBLANG_ITALIAN_SWISS = 2
SUBLANG_KOREAN = 1
SUBLANG_KOREAN_JOHAB = 2
SUBLANG_LITHUANIAN = 1
SUBLANG_LITHUANIAN_CLASSIC = 2
SUBLANG_MALAY_MALAYSIA = 1
SUBLANG_MALAY_BRUNEI_DARUSSALAM = 2
SUBLANG_NORWEGIAN_BOKMAL = 1
SUBLANG_NORWEGIAN_NYNORSK = 2
SUBLANG_PORTUGUESE = 2
SUBLANG_PORTUGUESE_BRAZILIAN = 1
SUBLANG_SERBIAN_LATIN = 2
SUBLANG_SERBIAN_CYRILLIC = 3
SUBLANG_SPANISH = 1
SUBLANG_SPANISH_MEXICAN = 2
SUBLANG_SPANISH_MODERN = 3
SUBLANG_SPANISH_GUATEMALA = 4
SUBLANG_SPANISH_COSTA_RICA = 5
SUBLANG_SPANISH_PANAMA = 6
SUBLANG_SPANISH_DOMINICAN_REPUBLIC = 7
SUBLANG_SPANISH_VENEZUELA = 8
SUBLANG_SPANISH_COLOMBIA = 9
SUBLANG_SPANISH_PERU = 10
SUBLANG_SPANISH_ARGENTINA = 11
SUBLANG_SPANISH_ECUADOR = 12
SUBLANG_SPANISH_CHILE = 13
SUBLANG_SPANISH_URUGUAY = 14
SUBLANG_SPANISH_PARAGUAY = 15
SUBLANG_SPANISH_BOLIVIA = 16
SUBLANG_SPANISH_EL_SALVADOR = 17
SUBLANG_SPANISH_HONDURAS = 18
SUBLANG_SPANISH_NICARAGUA = 19
SUBLANG_SPANISH_PUERTO_RICO = 20
SUBLANG_SWEDISH = 1
SUBLANG_SWEDISH_FINLAND = 2
SORT_DEFAULT = 0
SORT_JAPANESE_XJIS = 0
SORT_JAPANESE_UNICODE = 1
SORT_CHINESE_BIG5 = 0
SORT_CHINESE_PRCP = 0
SORT_CHINESE_UNICODE = 1
SORT_CHINESE_PRC = 2
SORT_KOREAN_KSC = 0
SORT_KOREAN_UNICODE = 1
SORT_GERMAN_PHONE_BOOK = 1
def PRIMARYLANGID(lgid): return ((WORD )(lgid) & 1023)
def SUBLANGID(lgid): return ((WORD )(lgid) >> 10)
NLS_VALID_LOCALE_MASK = 1048575
def LANGIDFROMLCID(lcid): return ((WORD )(lcid))
def SORTIDFROMLCID(lcid): return ((WORD )((((DWORD)(lcid)) & NLS_VALID_LOCALE_MASK) >> 16))
def UNREFERENCED_PARAMETER(P): return (P)
def DBG_UNREFERENCED_PARAMETER(P): return (P)
def DBG_UNREFERENCED_LOCAL_VARIABLE(V): return (V)
def UNREFERENCED_PARAMETER(P): return \
def DBG_UNREFERENCED_PARAMETER(P): return \
def DBG_UNREFERENCED_LOCAL_VARIABLE(V): return \
MAXIMUM_WAIT_OBJECTS = 64
MAXIMUM_SUSPEND_COUNT = MAXCHAR
EXCEPTION_NONCONTINUABLE = 1
EXCEPTION_MAXIMUM_PARAMETERS = 15
PROCESS_TERMINATE = (1)
PROCESS_CREATE_THREAD = (2)
PROCESS_VM_OPERATION = (8)
PROCESS_VM_READ = (16)
PROCESS_VM_WRITE = (32)
PROCESS_DUP_HANDLE = (64)
PROCESS_CREATE_PROCESS = (128)
PROCESS_SET_QUOTA = (256)
PROCESS_SET_INFORMATION = (512)
PROCESS_QUERY_INFORMATION = (1024)
MAXIMUM_PROCESSORS = 32
THREAD_TERMINATE = (1)
THREAD_SUSPEND_RESUME = (2)
THREAD_GET_CONTEXT = (8)
THREAD_SET_CONTEXT = (16)
THREAD_SET_INFORMATION = (32)
THREAD_QUERY_INFORMATION = (64)
THREAD_SET_THREAD_TOKEN = (128)
THREAD_IMPERSONATE = (256)
THREAD_DIRECT_IMPERSONATION = (512)
JOB_OBJECT_ASSIGN_PROCESS = (1)
JOB_OBJECT_SET_ATTRIBUTES = (2)
JOB_OBJECT_QUERY = (4)
JOB_OBJECT_TERMINATE = (8)
TLS_MINIMUM_AVAILABLE = 64
THREAD_BASE_PRIORITY_LOWRT = 15
THREAD_BASE_PRIORITY_MAX = 2
THREAD_BASE_PRIORITY_MIN = -2
THREAD_BASE_PRIORITY_IDLE = -15
JOB_OBJECT_LIMIT_WORKINGSET = 1
JOB_OBJECT_LIMIT_PROCESS_TIME = 2
JOB_OBJECT_LIMIT_JOB_TIME = 4
JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8
JOB_OBJECT_LIMIT_AFFINITY = 16
JOB_OBJECT_LIMIT_PRIORITY_CLASS = 32
JOB_OBJECT_LIMIT_VALID_FLAGS = 63
EVENT_MODIFY_STATE = 2
MUTANT_QUERY_STATE = 1
SEMAPHORE_MODIFY_STATE = 2
TIME_ZONE_ID_UNKNOWN = 0
TIME_ZONE_ID_STANDARD = 1
TIME_ZONE_ID_DAYLIGHT = 2
PROCESSOR_INTEL_386 = 386
PROCESSOR_INTEL_486 = 486
PROCESSOR_INTEL_PENTIUM = 586
PROCESSOR_MIPS_R4000 = 4000
PROCESSOR_ALPHA_21064 = 21064
PROCESSOR_HITACHI_SH3 = 10003
PROCESSOR_HITACHI_SH3E = 10004
PROCESSOR_HITACHI_SH4 = 10005
PROCESSOR_MOTOROLA_821 = 821
PROCESSOR_ARM_7TDMI = 70001
PROCESSOR_ARCHITECTURE_INTEL = 0
PROCESSOR_ARCHITECTURE_MIPS = 1
PROCESSOR_ARCHITECTURE_ALPHA = 2
PROCESSOR_ARCHITECTURE_PPC = 3
PROCESSOR_ARCHITECTURE_SH = 4
PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_IA64 = 6
PROCESSOR_ARCHITECTURE_ALPHA64 = 7
PROCESSOR_ARCHITECTURE_MSIL = 8
PROCESSOR_ARCHITECTURE_AMD64 = 9
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10
PROCESSOR_ARCHITECTURE_UNKNOWN = 65535
PF_FLOATING_POINT_PRECISION_ERRATA = 0
PF_FLOATING_POINT_EMULATED = 1
PF_COMPARE_EXCHANGE_DOUBLE = 2
PF_MMX_INSTRUCTIONS_AVAILABLE = 3
PF_PPC_MOVEMEM_64BIT_OK = 4
PF_ALPHA_BYTE_INSTRUCTIONS = 5
SECTION_QUERY = 1
SECTION_MAP_WRITE = 2
SECTION_MAP_READ = 4
SECTION_MAP_EXECUTE = 8
SECTION_EXTEND_SIZE = 16
PAGE_NOACCESS = 1
PAGE_READONLY = 2
PAGE_READWRITE = 4
PAGE_WRITECOPY = 8
PAGE_EXECUTE = 16
PAGE_EXECUTE_READ = 32
PAGE_EXECUTE_READWRITE = 64
PAGE_EXECUTE_WRITECOPY = 128
PAGE_GUARD = 256
PAGE_NOCACHE = 512
MEM_COMMIT = 4096
MEM_RESERVE = 8192
MEM_DECOMMIT = 16384
MEM_RELEASE = 32768
MEM_FREE = 65536
MEM_PRIVATE = 131072
MEM_MAPPED = 262144
MEM_RESET = 524288
MEM_TOP_DOWN = 1048576
MEM_4MB_PAGES = -2147483648
SEC_FILE = 8388608
SEC_IMAGE = 16777216
SEC_VLM = 33554432
SEC_RESERVE = 67108864
SEC_COMMIT = 134217728
SEC_NOCACHE = 268435456
MEM_IMAGE = SEC_IMAGE
FILE_READ_DATA = ( 1 )
FILE_LIST_DIRECTORY = ( 1 )
FILE_WRITE_DATA = ( 2 )
FILE_ADD_FILE = ( 2 )
FILE_APPEND_DATA = ( 4 )
FILE_ADD_SUBDIRECTORY = ( 4 )
FILE_CREATE_PIPE_INSTANCE = ( 4 )
FILE_READ_EA = ( 8 )
FILE_WRITE_EA = ( 16 )
FILE_EXECUTE = ( 32 )
FILE_TRAVERSE = ( 32 )
FILE_DELETE_CHILD = ( 64 )
FILE_READ_ATTRIBUTES = ( 128 )
FILE_WRITE_ATTRIBUTES = ( 256 )
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
FILE_SHARE_DELETE = 4
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_DEVICE = 64
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192
FILE_ATTRIBUTE_ENCRYPTED = 16384
FILE_ATTRIBUTE_VIRTUAL = 65536
FILE_NOTIFY_CHANGE_FILE_NAME = 1
FILE_NOTIFY_CHANGE_DIR_NAME = 2
FILE_NOTIFY_CHANGE_ATTRIBUTES = 4
FILE_NOTIFY_CHANGE_SIZE = 8
FILE_NOTIFY_CHANGE_LAST_WRITE = 16
FILE_NOTIFY_CHANGE_LAST_ACCESS = 32
FILE_NOTIFY_CHANGE_CREATION = 64
FILE_NOTIFY_CHANGE_SECURITY = 256
FILE_ACTION_ADDED = 1
FILE_ACTION_REMOVED = 2
FILE_ACTION_MODIFIED = 3
FILE_ACTION_RENAMED_OLD_NAME = 4
FILE_ACTION_RENAMED_NEW_NAME = 5
FILE_CASE_SENSITIVE_SEARCH = 1
FILE_CASE_PRESERVED_NAMES = 2
FILE_UNICODE_ON_DISK = 4
FILE_PERSISTENT_ACLS = 8
FILE_FILE_COMPRESSION = 16
FILE_VOLUME_QUOTAS = 32
FILE_SUPPORTS_SPARSE_FILES = 64
FILE_SUPPORTS_REPARSE_POINTS = 128
FILE_SUPPORTS_REMOTE_STORAGE = 256
FILE_VOLUME_IS_COMPRESSED = 32768
FILE_SUPPORTS_OBJECT_IDS = 65536
FILE_SUPPORTS_ENCRYPTION = 131072
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = ( 16 * 1024 )
IO_REPARSE_TAG_RESERVED_ZERO = (0)
IO_REPARSE_TAG_RESERVED_ONE = (1)
IO_REPARSE_TAG_SYMBOLIC_LINK = (2)
IO_REPARSE_TAG_NSS = (5)
IO_REPARSE_TAG_FILTER_MANAGER = -2147483637
IO_REPARSE_TAG_DFS = -2147483638
IO_REPARSE_TAG_SIS = -2147483641
IO_REPARSE_TAG_MOUNT_POINT = -1610612733
IO_REPARSE_TAG_HSM = -1073741820
IO_REPARSE_TAG_NSSRECOVER = (8)
IO_REPARSE_TAG_RESERVED_MS_RANGE = (256)
IO_REPARSE_TAG_RESERVED_RANGE = IO_REPARSE_TAG_RESERVED_ONE
IO_COMPLETION_MODIFY_STATE = 2
DUPLICATE_CLOSE_SOURCE = 1
DUPLICATE_SAME_ACCESS = 2
DELETE = (65536)
READ_CONTROL = (131072)
WRITE_DAC = (262144)
WRITE_OWNER = (524288)
SYNCHRONIZE = (1048576)
STANDARD_RIGHTS_REQUIRED = (983040)
STANDARD_RIGHTS_READ = (READ_CONTROL)
STANDARD_RIGHTS_WRITE = (READ_CONTROL)
STANDARD_RIGHTS_EXECUTE = (READ_CONTROL)
STANDARD_RIGHTS_ALL = (2031616)
SPECIFIC_RIGHTS_ALL = (65535)
IO_COMPLETION_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED|SYNCHRONIZE|0x3
ACCESS_SYSTEM_SECURITY = (16777216)
MAXIMUM_ALLOWED = (33554432)
GENERIC_READ = (-2147483648)
GENERIC_WRITE = (1073741824)
GENERIC_EXECUTE = (536870912)
GENERIC_ALL = (268435456)
# Included from pshpack4.h
# Included from poppack.h
SID_REVISION = (1)
SID_MAX_SUB_AUTHORITIES = (15)
SID_RECOMMENDED_SUB_AUTHORITIES = (1)
SidTypeUser = 1
SidTypeGroup = 2
SidTypeDomain =3
SidTypeAlias = 4
SidTypeWellKnownGroup = 5
SidTypeDeletedAccount = 6
SidTypeInvalid = 7
SidTypeUnknown = 8
SECURITY_NULL_RID = (0)
SECURITY_WORLD_RID = (0)
SECURITY_LOCAL_RID = (0X00000000)
SECURITY_CREATOR_OWNER_RID = (0)
SECURITY_CREATOR_GROUP_RID = (1)
SECURITY_CREATOR_OWNER_SERVER_RID = (2)
SECURITY_CREATOR_GROUP_SERVER_RID = (3)
SECURITY_DIALUP_RID = (1)
SECURITY_NETWORK_RID = (2)
SECURITY_BATCH_RID = (3)
SECURITY_INTERACTIVE_RID = (4)
SECURITY_SERVICE_RID = (6)
SECURITY_ANONYMOUS_LOGON_RID = (7)
SECURITY_PROXY_RID = (8)
SECURITY_SERVER_LOGON_RID = (9)
SECURITY_PRINCIPAL_SELF_RID = (10)
SECURITY_AUTHENTICATED_USER_RID = (11)
SECURITY_LOGON_IDS_RID = (5)
SECURITY_LOGON_IDS_RID_COUNT = (3)
SECURITY_LOCAL_SYSTEM_RID = (18)
SECURITY_NT_NON_UNIQUE = (21)
SECURITY_BUILTIN_DOMAIN_RID = (32)
DOMAIN_USER_RID_ADMIN = (500)
DOMAIN_USER_RID_GUEST = (501)
DOMAIN_GROUP_RID_ADMINS = (512)
DOMAIN_GROUP_RID_USERS = (513)
DOMAIN_GROUP_RID_GUESTS = (514)
DOMAIN_ALIAS_RID_ADMINS = (544)
DOMAIN_ALIAS_RID_USERS = (545)
DOMAIN_ALIAS_RID_GUESTS = (546)
DOMAIN_ALIAS_RID_POWER_USERS = (547)
DOMAIN_ALIAS_RID_ACCOUNT_OPS = (548)
DOMAIN_ALIAS_RID_SYSTEM_OPS = (549)
DOMAIN_ALIAS_RID_PRINT_OPS = (550)
DOMAIN_ALIAS_RID_BACKUP_OPS = (551)
DOMAIN_ALIAS_RID_REPLICATOR = (552)
SE_GROUP_MANDATORY = (1)
SE_GROUP_ENABLED_BY_DEFAULT = (2)
SE_GROUP_ENABLED = (4)
SE_GROUP_OWNER = (8)
SE_GROUP_LOGON_ID = (-1073741824)
ACL_REVISION = (2)
ACL_REVISION_DS = (4)
ACL_REVISION1 = (1)
ACL_REVISION2 = (2)
ACL_REVISION3 = (3)
ACL_REVISION4 = (4)
MAX_ACL_REVISION = ACL_REVISION4
## ACE types
ACCESS_MIN_MS_ACE_TYPE = (0)
ACCESS_ALLOWED_ACE_TYPE = (0)
ACCESS_DENIED_ACE_TYPE = (1)
SYSTEM_AUDIT_ACE_TYPE = (2)
SYSTEM_ALARM_ACE_TYPE = (3)
ACCESS_MAX_MS_V2_ACE_TYPE = (3)
ACCESS_ALLOWED_COMPOUND_ACE_TYPE = (4)
ACCESS_MAX_MS_V3_ACE_TYPE = (4)
ACCESS_MIN_MS_OBJECT_ACE_TYPE = (5)
ACCESS_ALLOWED_OBJECT_ACE_TYPE = (5)
ACCESS_DENIED_OBJECT_ACE_TYPE = (6)
SYSTEM_AUDIT_OBJECT_ACE_TYPE = (7)
SYSTEM_ALARM_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_V4_ACE_TYPE = (8)
ACCESS_MAX_MS_ACE_TYPE = (8)
ACCESS_ALLOWED_CALLBACK_ACE_TYPE = 9
ACCESS_DENIED_CALLBACK_ACE_TYPE = 10
ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE = 11
ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE = 12
SYSTEM_AUDIT_CALLBACK_ACE_TYPE = 13
SYSTEM_ALARM_CALLBACK_ACE_TYPE = 14
SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE = 15
SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE = 16
SYSTEM_MANDATORY_LABEL_ACE_TYPE = 17
ACCESS_MAX_MS_V5_ACE_TYPE = 17
## ACE inheritance flags
OBJECT_INHERIT_ACE = (1)
CONTAINER_INHERIT_ACE = (2)
NO_PROPAGATE_INHERIT_ACE = (4)
INHERIT_ONLY_ACE = (8)
INHERITED_ACE = (16)
VALID_INHERIT_FLAGS = (31)
SUCCESSFUL_ACCESS_ACE_FLAG = (64)
FAILED_ACCESS_ACE_FLAG = (128)
ACE_OBJECT_TYPE_PRESENT = 1
ACE_INHERITED_OBJECT_TYPE_PRESENT = 2
SECURITY_DESCRIPTOR_REVISION = (1)
SECURITY_DESCRIPTOR_REVISION1 = (1)
SECURITY_DESCRIPTOR_MIN_LENGTH = (20)
SE_OWNER_DEFAULTED = (1)
SE_GROUP_DEFAULTED = (2)
SE_DACL_PRESENT = (4)
SE_DACL_DEFAULTED = (8)
SE_SACL_PRESENT = (16)
SE_SACL_DEFAULTED = (32)
SE_DACL_AUTO_INHERIT_REQ = (256)
SE_SACL_AUTO_INHERIT_REQ = (512)
SE_DACL_AUTO_INHERITED = (1024)
SE_SACL_AUTO_INHERITED = (2048)
SE_DACL_PROTECTED = (4096)
SE_SACL_PROTECTED = (8192)
SE_SELF_RELATIVE = (32768)
ACCESS_OBJECT_GUID = 0
ACCESS_PROPERTY_SET_GUID = 1
ACCESS_PROPERTY_GUID = 2
ACCESS_MAX_LEVEL = 4
AUDIT_ALLOW_NO_PRIVILEGE = 1
ACCESS_DS_SOURCE_A = "Directory Service"
ACCESS_DS_OBJECT_TYPE_NAME_A = "Directory Service Object"
SE_PRIVILEGE_ENABLED_BY_DEFAULT = (1)
SE_PRIVILEGE_ENABLED = (2)
SE_PRIVILEGE_USED_FOR_ACCESS = (-2147483648)
PRIVILEGE_SET_ALL_NECESSARY = (1)
SE_CREATE_TOKEN_NAME = "SeCreateTokenPrivilege"
SE_ASSIGNPRIMARYTOKEN_NAME = "SeAssignPrimaryTokenPrivilege"
SE_LOCK_MEMORY_NAME = "SeLockMemoryPrivilege"
SE_INCREASE_QUOTA_NAME = "SeIncreaseQuotaPrivilege"
SE_UNSOLICITED_INPUT_NAME = "SeUnsolicitedInputPrivilege"
SE_MACHINE_ACCOUNT_NAME = "SeMachineAccountPrivilege"
SE_TCB_NAME = "SeTcbPrivilege"
SE_SECURITY_NAME = "SeSecurityPrivilege"
SE_TAKE_OWNERSHIP_NAME = "SeTakeOwnershipPrivilege"
SE_LOAD_DRIVER_NAME = "SeLoadDriverPrivilege"
SE_SYSTEM_PROFILE_NAME = "SeSystemProfilePrivilege"
SE_SYSTEMTIME_NAME = "SeSystemtimePrivilege"
SE_PROF_SINGLE_PROCESS_NAME = "SeProfileSingleProcessPrivilege"
SE_INC_BASE_PRIORITY_NAME = "SeIncreaseBasePriorityPrivilege"
SE_CREATE_PAGEFILE_NAME = "SeCreatePagefilePrivilege"
SE_CREATE_PERMANENT_NAME = "SeCreatePermanentPrivilege"
SE_BACKUP_NAME = "SeBackupPrivilege"
SE_RESTORE_NAME = "SeRestorePrivilege"
SE_SHUTDOWN_NAME = "SeShutdownPrivilege"
SE_DEBUG_NAME = "SeDebugPrivilege"
SE_AUDIT_NAME = "SeAuditPrivilege"
SE_SYSTEM_ENVIRONMENT_NAME = "SeSystemEnvironmentPrivilege"
SE_CHANGE_NOTIFY_NAME = "SeChangeNotifyPrivilege"
SE_REMOTE_SHUTDOWN_NAME = "SeRemoteShutdownPrivilege"
TOKEN_ASSIGN_PRIMARY = (1)
TOKEN_DUPLICATE = (2)
TOKEN_IMPERSONATE = (4)
TOKEN_QUERY = (8)
TOKEN_QUERY_SOURCE = (16)
TOKEN_ADJUST_PRIVILEGES = (32)
TOKEN_ADJUST_GROUPS = (64)
TOKEN_ADJUST_DEFAULT = (128)
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |\
TOKEN_ASSIGN_PRIMARY |\
TOKEN_DUPLICATE |\
TOKEN_IMPERSONATE |\
TOKEN_QUERY |\
TOKEN_QUERY_SOURCE |\
TOKEN_ADJUST_PRIVILEGES |\
TOKEN_ADJUST_GROUPS |\
TOKEN_ADJUST_DEFAULT)
TOKEN_READ = (STANDARD_RIGHTS_READ |\
TOKEN_QUERY)
TOKEN_WRITE = (STANDARD_RIGHTS_WRITE |\
TOKEN_ADJUST_PRIVILEGES |\
TOKEN_ADJUST_GROUPS |\
TOKEN_ADJUST_DEFAULT)
TOKEN_EXECUTE = (STANDARD_RIGHTS_EXECUTE)
TOKEN_SOURCE_LENGTH = 8
# Token types
TokenPrimary = 1
TokenImpersonation = 2
# TOKEN_INFORMATION_CLASS, used with Get/SetTokenInformation
TokenUser = 1
TokenGroups = 2
TokenPrivileges = 3
TokenOwner = 4
TokenPrimaryGroup = 5
TokenDefaultDacl = 6
TokenSource = 7
TokenType = 8
TokenImpersonationLevel = 9
TokenStatistics = 10
TokenRestrictedSids = 11
TokenSessionId = 12
TokenGroupsAndPrivileges = 13
TokenSessionReference = 14
TokenSandBoxInert = 15
TokenAuditPolicy = 16
TokenOrigin = 17
TokenElevationType = 18
TokenLinkedToken = 19
TokenElevation = 20
TokenHasRestrictions = 21
TokenAccessInformation = 22
TokenVirtualizationAllowed = 23
TokenVirtualizationEnabled = 24
TokenIntegrityLevel = 25
TokenUIAccess = 26
TokenMandatoryPolicy = 27
TokenLogonSid = 28
OWNER_SECURITY_INFORMATION = (0X00000001)
GROUP_SECURITY_INFORMATION = (0X00000002)
DACL_SECURITY_INFORMATION = (0X00000004)
SACL_SECURITY_INFORMATION = (0X00000008)
LABEL_SECURITY_INFORMATION = 0x00000010
IMAGE_DOS_SIGNATURE = 23117
IMAGE_OS2_SIGNATURE = 17742
IMAGE_OS2_SIGNATURE_LE = 17740
IMAGE_VXD_SIGNATURE = 17740
IMAGE_NT_SIGNATURE = 17744
IMAGE_SIZEOF_FILE_HEADER = 20
IMAGE_FILE_RELOCS_STRIPPED = 1
IMAGE_FILE_EXECUTABLE_IMAGE = 2
IMAGE_FILE_LINE_NUMS_STRIPPED = 4
IMAGE_FILE_LOCAL_SYMS_STRIPPED = 8
IMAGE_FILE_AGGRESIVE_WS_TRIM = 16
IMAGE_FILE_LARGE_ADDRESS_AWARE = 32
IMAGE_FILE_BYTES_REVERSED_LO = 128
IMAGE_FILE_32BIT_MACHINE = 256
IMAGE_FILE_DEBUG_STRIPPED = 512
IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 1024
IMAGE_FILE_NET_RUN_FROM_SWAP = 2048
IMAGE_FILE_SYSTEM = 4096
IMAGE_FILE_DLL = 8192
IMAGE_FILE_UP_SYSTEM_ONLY = 16384
IMAGE_FILE_BYTES_REVERSED_HI = 32768
IMAGE_FILE_MACHINE_UNKNOWN = 0
IMAGE_FILE_MACHINE_I386 = 332
IMAGE_FILE_MACHINE_R3000 = 354
IMAGE_FILE_MACHINE_R4000 = 358
IMAGE_FILE_MACHINE_R10000 = 360
IMAGE_FILE_MACHINE_WCEMIPSV2 = 361
IMAGE_FILE_MACHINE_ALPHA = 388
IMAGE_FILE_MACHINE_POWERPC = 496
IMAGE_FILE_MACHINE_SH3 = 418
IMAGE_FILE_MACHINE_SH3E = 420
IMAGE_FILE_MACHINE_SH4 = 422
IMAGE_FILE_MACHINE_ARM = 448
IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16
IMAGE_SIZEOF_ROM_OPTIONAL_HEADER = 56
IMAGE_SIZEOF_STD_OPTIONAL_HEADER = 28
IMAGE_SIZEOF_NT_OPTIONAL_HEADER = 224
IMAGE_NT_OPTIONAL_HDR_MAGIC = 267
IMAGE_ROM_OPTIONAL_HDR_MAGIC = 263
IMAGE_SUBSYSTEM_UNKNOWN = 0
IMAGE_SUBSYSTEM_NATIVE = 1
IMAGE_SUBSYSTEM_WINDOWS_GUI = 2
IMAGE_SUBSYSTEM_WINDOWS_CUI = 3
IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 4
IMAGE_SUBSYSTEM_OS2_CUI = 5
IMAGE_SUBSYSTEM_POSIX_CUI = 7
IMAGE_SUBSYSTEM_RESERVED8 = 8
IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 8192
IMAGE_DIRECTORY_ENTRY_EXPORT = 0
IMAGE_DIRECTORY_ENTRY_IMPORT = 1
IMAGE_DIRECTORY_ENTRY_RESOURCE = 2
IMAGE_DIRECTORY_ENTRY_EXCEPTION = 3
IMAGE_DIRECTORY_ENTRY_SECURITY = 4
IMAGE_DIRECTORY_ENTRY_BASERELOC = 5
IMAGE_DIRECTORY_ENTRY_DEBUG = 6
IMAGE_DIRECTORY_ENTRY_COPYRIGHT = 7
IMAGE_DIRECTORY_ENTRY_GLOBALPTR = 8
IMAGE_DIRECTORY_ENTRY_TLS = 9
IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG = 10
IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT = 11
IMAGE_DIRECTORY_ENTRY_IAT = 12
IMAGE_SIZEOF_SHORT_NAME = 8
IMAGE_SIZEOF_SECTION_HEADER = 40
IMAGE_SCN_TYPE_NO_PAD = 8
IMAGE_SCN_CNT_CODE = 32
IMAGE_SCN_CNT_INITIALIZED_DATA = 64
IMAGE_SCN_CNT_UNINITIALIZED_DATA = 128
IMAGE_SCN_LNK_OTHER = 256
IMAGE_SCN_LNK_INFO = 512
IMAGE_SCN_LNK_REMOVE = 2048
IMAGE_SCN_LNK_COMDAT = 4096
IMAGE_SCN_MEM_FARDATA = 32768
IMAGE_SCN_MEM_PURGEABLE = 131072
IMAGE_SCN_MEM_16BIT = 131072
IMAGE_SCN_MEM_LOCKED = 262144
IMAGE_SCN_MEM_PRELOAD = 524288
IMAGE_SCN_ALIGN_1BYTES = 1048576
IMAGE_SCN_ALIGN_2BYTES = 2097152
IMAGE_SCN_ALIGN_4BYTES = 3145728
IMAGE_SCN_ALIGN_8BYTES = 4194304
IMAGE_SCN_ALIGN_16BYTES = 5242880
IMAGE_SCN_ALIGN_32BYTES = 6291456
IMAGE_SCN_ALIGN_64BYTES = 7340032
IMAGE_SCN_LNK_NRELOC_OVFL = 16777216
IMAGE_SCN_MEM_DISCARDABLE = 33554432
IMAGE_SCN_MEM_NOT_CACHED = 67108864
IMAGE_SCN_MEM_NOT_PAGED = 134217728
IMAGE_SCN_MEM_SHARED = 268435456
IMAGE_SCN_MEM_EXECUTE = 536870912
IMAGE_SCN_MEM_READ = 1073741824
IMAGE_SCN_MEM_WRITE = -2147483648
IMAGE_SCN_SCALE_INDEX = 1
IMAGE_SIZEOF_SYMBOL = 18
IMAGE_SYM_TYPE_NULL = 0
IMAGE_SYM_TYPE_VOID = 1
IMAGE_SYM_TYPE_CHAR = 2
IMAGE_SYM_TYPE_SHORT = 3
IMAGE_SYM_TYPE_INT = 4
IMAGE_SYM_TYPE_LONG = 5
IMAGE_SYM_TYPE_FLOAT = 6
IMAGE_SYM_TYPE_DOUBLE = 7
IMAGE_SYM_TYPE_STRUCT = 8
IMAGE_SYM_TYPE_UNION = 9
IMAGE_SYM_TYPE_ENUM = 10
IMAGE_SYM_TYPE_MOE = 11
IMAGE_SYM_TYPE_BYTE = 12
IMAGE_SYM_TYPE_WORD = 13
IMAGE_SYM_TYPE_UINT = 14
IMAGE_SYM_TYPE_DWORD = 15
IMAGE_SYM_TYPE_PCODE = 32768
IMAGE_SYM_DTYPE_NULL = 0
IMAGE_SYM_DTYPE_POINTER = 1
IMAGE_SYM_DTYPE_FUNCTION = 2
IMAGE_SYM_DTYPE_ARRAY = 3
IMAGE_SYM_CLASS_NULL = 0
IMAGE_SYM_CLASS_AUTOMATIC = 1
IMAGE_SYM_CLASS_EXTERNAL = 2
IMAGE_SYM_CLASS_STATIC = 3
IMAGE_SYM_CLASS_REGISTER = 4
IMAGE_SYM_CLASS_EXTERNAL_DEF = 5
IMAGE_SYM_CLASS_LABEL = 6
IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7
IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8
IMAGE_SYM_CLASS_ARGUMENT = 9
IMAGE_SYM_CLASS_STRUCT_TAG = 10
IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11
IMAGE_SYM_CLASS_UNION_TAG = 12
IMAGE_SYM_CLASS_TYPE_DEFINITION = 13
IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14
IMAGE_SYM_CLASS_ENUM_TAG = 15
IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16
IMAGE_SYM_CLASS_REGISTER_PARAM = 17
IMAGE_SYM_CLASS_BIT_FIELD = 18
IMAGE_SYM_CLASS_FAR_EXTERNAL = 68
IMAGE_SYM_CLASS_BLOCK = 100
IMAGE_SYM_CLASS_FUNCTION = 101
IMAGE_SYM_CLASS_END_OF_STRUCT = 102
IMAGE_SYM_CLASS_FILE = 103
IMAGE_SYM_CLASS_SECTION = 104
IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105
N_BTMASK = 15
N_TMASK = 48
N_TMASK1 = 192
N_TMASK2 = 240
N_BTSHFT = 4
N_TSHIFT = 2
def BTYPE(x): return ((x) & N_BTMASK)
def ISPTR(x): return (((x) & N_TMASK) == (IMAGE_SYM_DTYPE_POINTER << N_BTSHFT))
def ISFCN(x): return (((x) & N_TMASK) == (IMAGE_SYM_DTYPE_FUNCTION << N_BTSHFT))
def ISARY(x): return (((x) & N_TMASK) == (IMAGE_SYM_DTYPE_ARRAY << N_BTSHFT))
def INCREF(x): return ((((x)&~N_BTMASK)<<N_TSHIFT)|(IMAGE_SYM_DTYPE_POINTER<<N_BTSHFT)|((x)&N_BTMASK))
def DECREF(x): return ((((x)>>N_TSHIFT)&~N_BTMASK)|((x)&N_BTMASK))
IMAGE_SIZEOF_AUX_SYMBOL = 18
IMAGE_COMDAT_SELECT_NODUPLICATES = 1
IMAGE_COMDAT_SELECT_ANY = 2
IMAGE_COMDAT_SELECT_SAME_SIZE = 3
IMAGE_COMDAT_SELECT_EXACT_MATCH = 4
IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5
IMAGE_COMDAT_SELECT_LARGEST = 6
IMAGE_COMDAT_SELECT_NEWEST = 7
IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1
IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2
IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3
IMAGE_SIZEOF_RELOCATION = 10
IMAGE_REL_I386_ABSOLUTE = 0
IMAGE_REL_I386_DIR16 = 1
IMAGE_REL_I386_REL16 = 2
IMAGE_REL_I386_DIR32 = 6
IMAGE_REL_I386_DIR32NB = 7
IMAGE_REL_I386_SEG12 = 9
IMAGE_REL_I386_SECTION = 10
IMAGE_REL_I386_SECREL = 11
IMAGE_REL_I386_REL32 = 20
IMAGE_REL_MIPS_ABSOLUTE = 0
IMAGE_REL_MIPS_REFHALF = 1
IMAGE_REL_MIPS_REFWORD = 2
IMAGE_REL_MIPS_JMPADDR = 3
IMAGE_REL_MIPS_REFHI = 4
IMAGE_REL_MIPS_REFLO = 5
IMAGE_REL_MIPS_GPREL = 6
IMAGE_REL_MIPS_LITERAL = 7
IMAGE_REL_MIPS_SECTION = 10
IMAGE_REL_MIPS_SECREL = 11
IMAGE_REL_MIPS_SECRELLO = 12
IMAGE_REL_MIPS_SECRELHI = 13
IMAGE_REL_MIPS_REFWORDNB = 34
IMAGE_REL_MIPS_PAIR = 37
IMAGE_REL_ALPHA_ABSOLUTE = 0
IMAGE_REL_ALPHA_REFLONG = 1
IMAGE_REL_ALPHA_REFQUAD = 2
IMAGE_REL_ALPHA_GPREL32 = 3
IMAGE_REL_ALPHA_LITERAL = 4
IMAGE_REL_ALPHA_LITUSE = 5
IMAGE_REL_ALPHA_GPDISP = 6
IMAGE_REL_ALPHA_BRADDR = 7
IMAGE_REL_ALPHA_HINT = 8
IMAGE_REL_ALPHA_INLINE_REFLONG = 9
IMAGE_REL_ALPHA_REFHI = 10
IMAGE_REL_ALPHA_REFLO = 11
IMAGE_REL_ALPHA_PAIR = 12
IMAGE_REL_ALPHA_MATCH = 13
IMAGE_REL_ALPHA_SECTION = 14
IMAGE_REL_ALPHA_SECREL = 15
IMAGE_REL_ALPHA_REFLONGNB = 16
IMAGE_REL_ALPHA_SECRELLO = 17
IMAGE_REL_ALPHA_SECRELHI = 18
IMAGE_REL_PPC_ABSOLUTE = 0
IMAGE_REL_PPC_ADDR64 = 1
IMAGE_REL_PPC_ADDR32 = 2
IMAGE_REL_PPC_ADDR24 = 3
IMAGE_REL_PPC_ADDR16 = 4
IMAGE_REL_PPC_ADDR14 = 5
IMAGE_REL_PPC_REL24 = 6
IMAGE_REL_PPC_REL14 = 7
IMAGE_REL_PPC_TOCREL16 = 8
IMAGE_REL_PPC_TOCREL14 = 9
IMAGE_REL_PPC_ADDR32NB = 10
IMAGE_REL_PPC_SECREL = 11
IMAGE_REL_PPC_SECTION = 12
IMAGE_REL_PPC_IFGLUE = 13
IMAGE_REL_PPC_IMGLUE = 14
IMAGE_REL_PPC_SECREL16 = 15
IMAGE_REL_PPC_REFHI = 16
IMAGE_REL_PPC_REFLO = 17
IMAGE_REL_PPC_PAIR = 18
IMAGE_REL_PPC_SECRELLO = 19
IMAGE_REL_PPC_SECRELHI = 20
IMAGE_REL_PPC_TYPEMASK = 255
IMAGE_REL_PPC_NEG = 256
IMAGE_REL_PPC_BRTAKEN = 512
IMAGE_REL_PPC_BRNTAKEN = 1024
IMAGE_REL_PPC_TOCDEFN = 2048
IMAGE_REL_SH3_ABSOLUTE = 0
IMAGE_REL_SH3_DIRECT16 = 1
IMAGE_REL_SH3_DIRECT32 = 2
IMAGE_REL_SH3_DIRECT8 = 3
IMAGE_REL_SH3_DIRECT8_WORD = 4
IMAGE_REL_SH3_DIRECT8_LONG = 5
IMAGE_REL_SH3_DIRECT4 = 6
IMAGE_REL_SH3_DIRECT4_WORD = 7
IMAGE_REL_SH3_DIRECT4_LONG = 8
IMAGE_REL_SH3_PCREL8_WORD = 9
IMAGE_REL_SH3_PCREL8_LONG = 10
IMAGE_REL_SH3_PCREL12_WORD = 11
IMAGE_REL_SH3_STARTOF_SECTION = 12
IMAGE_REL_SH3_SIZEOF_SECTION = 13
IMAGE_REL_SH3_SECTION = 14
IMAGE_REL_SH3_SECREL = 15
IMAGE_REL_SH3_DIRECT32_NB = 16
IMAGE_SIZEOF_LINENUMBER = 6
IMAGE_SIZEOF_BASE_RELOCATION = 8
IMAGE_REL_BASED_ABSOLUTE = 0
IMAGE_REL_BASED_HIGH = 1
IMAGE_REL_BASED_LOW = 2
IMAGE_REL_BASED_HIGHLOW = 3
IMAGE_REL_BASED_HIGHADJ = 4
IMAGE_REL_BASED_MIPS_JMPADDR = 5
IMAGE_REL_BASED_SECTION = 6
IMAGE_REL_BASED_REL32 = 7
IMAGE_ARCHIVE_START_SIZE = 8
IMAGE_ARCHIVE_START = "!<arch>\n"
IMAGE_ARCHIVE_END = "`\n"
IMAGE_ARCHIVE_PAD = "\n"
IMAGE_ARCHIVE_LINKER_MEMBER = "/ "
IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR = 60
IMAGE_ORDINAL_FLAG = -2147483648
def IMAGE_SNAP_BY_ORDINAL(Ordina): return ((Ordinal & IMAGE_ORDINAL_FLAG) != 0)
def IMAGE_ORDINAL(Ordina): return (Ordinal & 65535)
IMAGE_RESOURCE_NAME_IS_STRING = -2147483648
IMAGE_RESOURCE_DATA_IS_DIRECTORY = -2147483648
IMAGE_DEBUG_TYPE_UNKNOWN = 0
IMAGE_DEBUG_TYPE_COFF = 1
IMAGE_DEBUG_TYPE_CODEVIEW = 2
IMAGE_DEBUG_TYPE_FPO = 3
IMAGE_DEBUG_TYPE_MISC = 4
IMAGE_DEBUG_TYPE_EXCEPTION = 5
IMAGE_DEBUG_TYPE_FIXUP = 6
IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7
IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8
IMAGE_DEBUG_TYPE_BORLAND = 9
FRAME_FPO = 0
FRAME_TRAP = 1
FRAME_TSS = 2
FRAME_NONFPO = 3
SIZEOF_RFPO_DATA = 16
IMAGE_DEBUG_MISC_EXENAME = 1
IMAGE_SEPARATE_DEBUG_SIGNATURE = 18756
IMAGE_SEPARATE_DEBUG_FLAGS_MASK = 32768
IMAGE_SEPARATE_DEBUG_MISMATCH = 32768
# Included from string.h
_NLSCMPERROR = 2147483647
NULL = 0
HEAP_NO_SERIALIZE = 1
HEAP_GROWABLE = 2
HEAP_GENERATE_EXCEPTIONS = 4
HEAP_ZERO_MEMORY = 8
HEAP_REALLOC_IN_PLACE_ONLY = 16
HEAP_TAIL_CHECKING_ENABLED = 32
HEAP_FREE_CHECKING_ENABLED = 64
HEAP_DISABLE_COALESCE_ON_FREE = 128
HEAP_CREATE_ALIGN_16 = 65536
HEAP_CREATE_ENABLE_TRACING = 131072
HEAP_MAXIMUM_TAG = 4095
HEAP_PSEUDO_TAG_FLAG = 32768
HEAP_TAG_SHIFT = 16
IS_TEXT_UNICODE_ASCII16 = 1
IS_TEXT_UNICODE_REVERSE_ASCII16 = 16
IS_TEXT_UNICODE_STATISTICS = 2
IS_TEXT_UNICODE_REVERSE_STATISTICS = 32
IS_TEXT_UNICODE_CONTROLS = 4
IS_TEXT_UNICODE_REVERSE_CONTROLS = 64
IS_TEXT_UNICODE_SIGNATURE = 8
IS_TEXT_UNICODE_REVERSE_SIGNATURE = 128
IS_TEXT_UNICODE_ILLEGAL_CHARS = 256
IS_TEXT_UNICODE_ODD_LENGTH = 512
IS_TEXT_UNICODE_DBCS_LEADBYTE = 1024
IS_TEXT_UNICODE_NULL_BYTES = 4096
IS_TEXT_UNICODE_UNICODE_MASK = 15
IS_TEXT_UNICODE_REVERSE_MASK = 240
IS_TEXT_UNICODE_NOT_UNICODE_MASK = 3840
IS_TEXT_UNICODE_NOT_ASCII_MASK = 61440
COMPRESSION_FORMAT_NONE = (0)
COMPRESSION_FORMAT_DEFAULT = (1)
COMPRESSION_FORMAT_LZNT1 = (2)
COMPRESSION_ENGINE_STANDARD = (0)
COMPRESSION_ENGINE_MAXIMUM = (256)
MESSAGE_RESOURCE_UNICODE = 1
RTL_CRITSECT_TYPE = 0
RTL_RESOURCE_TYPE = 1
SEF_DACL_AUTO_INHERIT = 1
SEF_SACL_AUTO_INHERIT = 2
SEF_DEFAULT_DESCRIPTOR_FOR_OBJECT = 4
SEF_AVOID_PRIVILEGE_CHECK = 8
DLL_PROCESS_ATTACH = 1
DLL_THREAD_ATTACH = 2
DLL_THREAD_DETACH = 3
DLL_PROCESS_DETACH = 0
EVENTLOG_SEQUENTIAL_READ = 0X0001
EVENTLOG_SEEK_READ = 0X0002
EVENTLOG_FORWARDS_READ = 0X0004
EVENTLOG_BACKWARDS_READ = 0X0008
EVENTLOG_SUCCESS = 0X0000
EVENTLOG_ERROR_TYPE = 1
EVENTLOG_WARNING_TYPE = 2
EVENTLOG_INFORMATION_TYPE = 4
EVENTLOG_AUDIT_SUCCESS = 8
EVENTLOG_AUDIT_FAILURE = 16
EVENTLOG_START_PAIRED_EVENT = 1
EVENTLOG_END_PAIRED_EVENT = 2
EVENTLOG_END_ALL_PAIRED_EVENTS = 4
EVENTLOG_PAIRED_EVENT_ACTIVE = 8
EVENTLOG_PAIRED_EVENT_INACTIVE = 16
KEY_QUERY_VALUE = (1)
KEY_SET_VALUE = (2)
KEY_CREATE_SUB_KEY = (4)
KEY_ENUMERATE_SUB_KEYS = (8)
KEY_NOTIFY = (16)
KEY_CREATE_LINK = (32)
KEY_READ = ((STANDARD_RIGHTS_READ |\
KEY_QUERY_VALUE |\
KEY_ENUMERATE_SUB_KEYS |\
KEY_NOTIFY) \
& \
(~SYNCHRONIZE))
KEY_WRITE = ((STANDARD_RIGHTS_WRITE |\
KEY_SET_VALUE |\
KEY_CREATE_SUB_KEY) \
& \
(~SYNCHRONIZE))
KEY_EXECUTE = ((KEY_READ) \
& \
(~SYNCHRONIZE))
KEY_ALL_ACCESS = ((STANDARD_RIGHTS_ALL |\
KEY_QUERY_VALUE |\
KEY_SET_VALUE |\
KEY_CREATE_SUB_KEY |\
KEY_ENUMERATE_SUB_KEYS |\
KEY_NOTIFY |\
KEY_CREATE_LINK) \
& \
(~SYNCHRONIZE))
REG_OPTION_RESERVED = (0)
REG_OPTION_NON_VOLATILE = (0)
REG_OPTION_VOLATILE = (1)
REG_OPTION_CREATE_LINK = (2)
REG_OPTION_BACKUP_RESTORE = (4)
REG_OPTION_OPEN_LINK = (8)
REG_LEGAL_OPTION = \
(REG_OPTION_RESERVED |\
REG_OPTION_NON_VOLATILE |\
REG_OPTION_VOLATILE |\
REG_OPTION_CREATE_LINK |\
REG_OPTION_BACKUP_RESTORE |\
REG_OPTION_OPEN_LINK)
## dispositions returned from RegCreateKeyEx
REG_CREATED_NEW_KEY = 1
REG_OPENED_EXISTING_KEY = 2
## flags used with RegSaveKeyEx
REG_STANDARD_FORMAT = 1
REG_LATEST_FORMAT = 2
REG_NO_COMPRESSION = 4
## flags used with RegRestoreKey
REG_WHOLE_HIVE_VOLATILE = 1
REG_REFRESH_HIVE = 2
REG_NO_LAZY_FLUSH = 4
REG_FORCE_RESTORE = 8
REG_NOTIFY_CHANGE_NAME = (1)
REG_NOTIFY_CHANGE_ATTRIBUTES = (2)
REG_NOTIFY_CHANGE_LAST_SET = (4)
REG_NOTIFY_CHANGE_SECURITY = (8)
REG_LEGAL_CHANGE_FILTER = \
(REG_NOTIFY_CHANGE_NAME |\
REG_NOTIFY_CHANGE_ATTRIBUTES |\
REG_NOTIFY_CHANGE_LAST_SET |\
REG_NOTIFY_CHANGE_SECURITY)
REG_NONE = ( 0 )
REG_SZ = ( 1 )
REG_EXPAND_SZ = ( 2 )
REG_BINARY = ( 3 )
REG_DWORD = ( 4 )
REG_DWORD_LITTLE_ENDIAN = ( 4 )
REG_DWORD_BIG_ENDIAN = ( 5 )
REG_LINK = ( 6 )
REG_MULTI_SZ = ( 7 )
REG_RESOURCE_LIST = ( 8 )
REG_FULL_RESOURCE_DESCRIPTOR = ( 9 )
REG_RESOURCE_REQUIREMENTS_LIST = ( 10 )
SERVICE_KERNEL_DRIVER = 1
SERVICE_FILE_SYSTEM_DRIVER = 2
SERVICE_ADAPTER = 4
SERVICE_RECOGNIZER_DRIVER = 8
SERVICE_DRIVER = (SERVICE_KERNEL_DRIVER | \
SERVICE_FILE_SYSTEM_DRIVER | \
SERVICE_RECOGNIZER_DRIVER)
SERVICE_WIN32_OWN_PROCESS = 16
SERVICE_WIN32_SHARE_PROCESS = 32
SERVICE_WIN32 = (SERVICE_WIN32_OWN_PROCESS | \
SERVICE_WIN32_SHARE_PROCESS)
SERVICE_INTERACTIVE_PROCESS = 256
SERVICE_TYPE_ALL = (SERVICE_WIN32 | \
SERVICE_ADAPTER | \
SERVICE_DRIVER | \
SERVICE_INTERACTIVE_PROCESS)
SERVICE_BOOT_START = 0
SERVICE_SYSTEM_START = 1
SERVICE_AUTO_START = 2
SERVICE_DEMAND_START = 3
SERVICE_DISABLED = 4
SERVICE_ERROR_IGNORE = 0
SERVICE_ERROR_NORMAL = 1
SERVICE_ERROR_SEVERE = 2
SERVICE_ERROR_CRITICAL = 3
TAPE_ERASE_SHORT = 0
TAPE_ERASE_LONG = 1
TAPE_LOAD = 0
TAPE_UNLOAD = 1
TAPE_TENSION = 2
TAPE_LOCK = 3
TAPE_UNLOCK = 4
TAPE_FORMAT = 5
TAPE_SETMARKS = 0
TAPE_FILEMARKS = 1
TAPE_SHORT_FILEMARKS = 2
TAPE_LONG_FILEMARKS = 3
TAPE_ABSOLUTE_POSITION = 0
TAPE_LOGICAL_POSITION = 1
TAPE_PSEUDO_LOGICAL_POSITION = 2
TAPE_REWIND = 0
TAPE_ABSOLUTE_BLOCK = 1
TAPE_LOGICAL_BLOCK = 2
TAPE_PSEUDO_LOGICAL_BLOCK = 3
TAPE_SPACE_END_OF_DATA = 4
TAPE_SPACE_RELATIVE_BLOCKS = 5
TAPE_SPACE_FILEMARKS = 6
TAPE_SPACE_SEQUENTIAL_FMKS = 7
TAPE_SPACE_SETMARKS = 8
TAPE_SPACE_SEQUENTIAL_SMKS = 9
TAPE_DRIVE_FIXED = 1
TAPE_DRIVE_SELECT = 2
TAPE_DRIVE_INITIATOR = 4
TAPE_DRIVE_ERASE_SHORT = 16
TAPE_DRIVE_ERASE_LONG = 32
TAPE_DRIVE_ERASE_BOP_ONLY = 64
TAPE_DRIVE_ERASE_IMMEDIATE = 128
TAPE_DRIVE_TAPE_CAPACITY = 256
TAPE_DRIVE_TAPE_REMAINING = 512
TAPE_DRIVE_FIXED_BLOCK = 1024
TAPE_DRIVE_VARIABLE_BLOCK = 2048
TAPE_DRIVE_WRITE_PROTECT = 4096
TAPE_DRIVE_EOT_WZ_SIZE = 8192
TAPE_DRIVE_ECC = 65536
TAPE_DRIVE_COMPRESSION = 131072
TAPE_DRIVE_PADDING = 262144
TAPE_DRIVE_REPORT_SMKS = 524288
TAPE_DRIVE_GET_ABSOLUTE_BLK = 1048576
TAPE_DRIVE_GET_LOGICAL_BLK = 2097152
TAPE_DRIVE_SET_EOT_WZ_SIZE = 4194304
TAPE_DRIVE_EJECT_MEDIA = 16777216
TAPE_DRIVE_RESERVED_BIT = -2147483648
TAPE_DRIVE_LOAD_UNLOAD = -2147483647
TAPE_DRIVE_TENSION = -2147483646
TAPE_DRIVE_LOCK_UNLOCK = -2147483644
TAPE_DRIVE_REWIND_IMMEDIATE = -2147483640
TAPE_DRIVE_SET_BLOCK_SIZE = -2147483632
TAPE_DRIVE_LOAD_UNLD_IMMED = -2147483616
TAPE_DRIVE_TENSION_IMMED = -2147483584
TAPE_DRIVE_LOCK_UNLK_IMMED = -2147483520
TAPE_DRIVE_SET_ECC = -2147483392
TAPE_DRIVE_SET_COMPRESSION = -2147483136
TAPE_DRIVE_SET_PADDING = -2147482624
TAPE_DRIVE_SET_REPORT_SMKS = -2147481600
TAPE_DRIVE_ABSOLUTE_BLK = -2147479552
TAPE_DRIVE_ABS_BLK_IMMED = -2147475456
TAPE_DRIVE_LOGICAL_BLK = -2147467264
TAPE_DRIVE_LOG_BLK_IMMED = -2147450880
TAPE_DRIVE_END_OF_DATA = -2147418112
TAPE_DRIVE_RELATIVE_BLKS = -2147352576
TAPE_DRIVE_FILEMARKS = -2147221504
TAPE_DRIVE_SEQUENTIAL_FMKS = -2146959360
TAPE_DRIVE_SETMARKS = -2146435072
TAPE_DRIVE_SEQUENTIAL_SMKS = -2145386496
TAPE_DRIVE_REVERSE_POSITION = -2143289344
TAPE_DRIVE_SPACE_IMMEDIATE = -2139095040
TAPE_DRIVE_WRITE_SETMARKS = -2130706432
TAPE_DRIVE_WRITE_FILEMARKS = -2113929216
TAPE_DRIVE_WRITE_SHORT_FMKS = -2080374784
TAPE_DRIVE_WRITE_LONG_FMKS = -2013265920
TAPE_DRIVE_WRITE_MARK_IMMED = -1879048192
TAPE_DRIVE_FORMAT = -1610612736
TAPE_DRIVE_FORMAT_IMMEDIATE = -1073741824
TAPE_DRIVE_HIGH_FEATURES = -2147483648
TAPE_FIXED_PARTITIONS = 0
TAPE_SELECT_PARTITIONS = 1
TAPE_INITIATOR_PARTITIONS = 2
TRANSACTIONMANAGER_QUERY_INFORMATION = 0x0001
TRANSACTIONMANAGER_SET_INFORMATION = 0x0002
TRANSACTIONMANAGER_RECOVER = 0x0004
TRANSACTIONMANAGER_RENAME = 0x0008
TRANSACTIONMANAGER_CREATE_RM = 0x0010
TRANSACTIONMANAGER_BIND_TRANSACTION = 0x0020
TRANSACTIONMANAGER_GENERIC_READ = STANDARD_RIGHTS_READ|TRANSACTIONMANAGER_QUERY_INFORMATION
TRANSACTIONMANAGER_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |\
TRANSACTIONMANAGER_SET_INFORMATION |\
TRANSACTIONMANAGER_RECOVER |\
TRANSACTIONMANAGER_RENAME |\
TRANSACTIONMANAGER_CREATE_RM
TRANSACTIONMANAGER_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE
TRANSACTIONMANAGER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |\
TRANSACTIONMANAGER_GENERIC_READ |\
TRANSACTIONMANAGER_GENERIC_WRITE |\
TRANSACTIONMANAGER_GENERIC_EXECUTE |\
TRANSACTIONMANAGER_BIND_TRANSACTION
TRANSACTION_QUERY_INFORMATION = 0x0001
TRANSACTION_SET_INFORMATION = 0x0002
TRANSACTION_ENLIST = 0x0004
TRANSACTION_COMMIT = 0x0008
TRANSACTION_ROLLBACK = 0x0010
TRANSACTION_PROPAGATE = 0x0020
TRANSACTION_SAVEPOINT = 0x0040
TRANSACTION_MARSHALL = TRANSACTION_QUERY_INFORMATION
TRANSACTION_GENERIC_READ = STANDARD_RIGHTS_READ |\
TRANSACTION_QUERY_INFORMATION |\
SYNCHRONIZE
TRANSACTION_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |\
TRANSACTION_SET_INFORMATION |\
TRANSACTION_COMMIT |\
TRANSACTION_ENLIST |\
TRANSACTION_ROLLBACK |\
TRANSACTION_PROPAGATE |\
TRANSACTION_SAVEPOINT |\
SYNCHRONIZE
TRANSACTION_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE |\
TRANSACTION_COMMIT |\
TRANSACTION_ROLLBACK |\
SYNCHRONIZE
TRANSACTION_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |\
TRANSACTION_GENERIC_READ |\
TRANSACTION_GENERIC_WRITE |\
TRANSACTION_GENERIC_EXECUTE
TRANSACTION_RESOURCE_MANAGER_RIGHTS = TRANSACTION_GENERIC_READ |\
STANDARD_RIGHTS_WRITE |\
TRANSACTION_SET_INFORMATION |\
TRANSACTION_ENLIST |\
TRANSACTION_ROLLBACK |\
TRANSACTION_PROPAGATE |\
SYNCHRONIZE
RESOURCEMANAGER_QUERY_INFORMATION = 0x0001
RESOURCEMANAGER_SET_INFORMATION = 0x0002
RESOURCEMANAGER_RECOVER = 0x0004
RESOURCEMANAGER_ENLIST = 0x0008
RESOURCEMANAGER_GET_NOTIFICATION = 0x0010
RESOURCEMANAGER_REGISTER_PROTOCOL = 0x0020
RESOURCEMANAGER_COMPLETE_PROPAGATION = 0x0040
RESOURCEMANAGER_GENERIC_READ = STANDARD_RIGHTS_READ |\
RESOURCEMANAGER_QUERY_INFORMATION |\
SYNCHRONIZE
RESOURCEMANAGER_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |\
RESOURCEMANAGER_SET_INFORMATION |\
RESOURCEMANAGER_RECOVER |\
RESOURCEMANAGER_ENLIST |\
RESOURCEMANAGER_GET_NOTIFICATION |\
RESOURCEMANAGER_REGISTER_PROTOCOL |\
RESOURCEMANAGER_COMPLETE_PROPAGATION |\
SYNCHRONIZE
RESOURCEMANAGER_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE |\
RESOURCEMANAGER_RECOVER |\
RESOURCEMANAGER_ENLIST |\
RESOURCEMANAGER_GET_NOTIFICATION |\
RESOURCEMANAGER_COMPLETE_PROPAGATION |\
SYNCHRONIZE
RESOURCEMANAGER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |\
RESOURCEMANAGER_GENERIC_READ |\
RESOURCEMANAGER_GENERIC_WRITE |\
RESOURCEMANAGER_GENERIC_EXECUTE
ENLISTMENT_QUERY_INFORMATION = 0x0001
ENLISTMENT_SET_INFORMATION = 0x0002
ENLISTMENT_RECOVER = 0x0004
ENLISTMENT_SUBORDINATE_RIGHTS = 0x0008
ENLISTMENT_SUPERIOR_RIGHTS = 0x0010
ENLISTMENT_GENERIC_READ = STANDARD_RIGHTS_READ | ENLISTMENT_QUERY_INFORMATION
ENLISTMENT_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |\
ENLISTMENT_SET_INFORMATION |\
ENLISTMENT_RECOVER |\
ENLISTMENT_SUBORDINATE_RIGHTS |\
ENLISTMENT_SUPERIOR_RIGHTS
ENLISTMENT_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE |\
ENLISTMENT_RECOVER |\
ENLISTMENT_SUBORDINATE_RIGHTS |\
ENLISTMENT_SUPERIOR_RIGHTS
ENLISTMENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |\
ENLISTMENT_GENERIC_READ |\
ENLISTMENT_GENERIC_WRITE |\
ENLISTMENT_GENERIC_EXECUTE
## TRANSACTION_OUTCOME enum
TransactionOutcomeUndetermined = 1
TransactionOutcomeCommitted = 2
TransactionOutcomeAborted = 3
## TRANSACTION_STATE enum
TransactionStateNormal = 1
TransactionStateIndoubt = 2
TransactionStateCommittedNotify = 3
## TRANSACTION_INFORMATION_CLASS enum
TransactionBasicInformation = 0
TransactionPropertiesInformation = 1
TransactionEnlistmentInformation = 2
TransactionFullInformation = 3
## TRANSACTIONMANAGER_INFORMATION_CLASS enum
TransactionManagerBasicInformation = 0
TransactionManagerLogInformation = 1
TransactionManagerLogPathInformation = 2
TransactionManagerOnlineProbeInformation = 3
## RESOURCEMANAGER_INFORMATION_CLASS ENUM
ResourceManagerBasicInformation = 0
ResourceManagerCompletionInformation = 1
ResourceManagerFullInformation = 2
ResourceManagerNameInformation = 3
## ENLISTMENT_INFORMATION_CLASS enum
EnlistmentBasicInformation = 0
EnlistmentRecoveryInformation = 1
EnlistmentFullInformation = 2
EnlistmentNameInformation = 3
## KTMOBJECT_TYPE enum
KTMOBJECT_TRANSACTION = 0
KTMOBJECT_TRANSACTION_MANAGER = 1
KTMOBJECT_RESOURCE_MANAGER = 2
KTMOBJECT_ENLISTMENT = 3
KTMOBJECT_INVALID = 4
|
apache-2.0
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/IPython/parallel/apps/iploggerapp.py
|
14
|
2907
|
#!/usr/bin/env python
# encoding: utf-8
"""
A simple IPython logger application
Authors:
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import zmq
from IPython.core.profiledir import ProfileDir
from IPython.utils.traitlets import Bool, Dict, Unicode
from IPython.parallel.apps.baseapp import (
BaseParallelApplication,
base_aliases,
catch_config_error,
)
from IPython.parallel.apps.logwatcher import LogWatcher
#-----------------------------------------------------------------------------
# Module level variables
#-----------------------------------------------------------------------------
#: The default config file name for this application
_description = """Start an IPython logger for parallel computing.
IPython controllers and engines (and your own processes) can broadcast log messages
by registering a `zmq.log.handlers.PUBHandler` with the `logging` module. The
logger can be configured using command line options or using a cluster
directory. Cluster directories contain config, log and security files and are
usually located in your ipython directory and named as "profile_name".
See the `profile` and `profile-dir` options for details.
"""
#-----------------------------------------------------------------------------
# Main application
#-----------------------------------------------------------------------------
aliases = {}
aliases.update(base_aliases)
aliases.update(dict(url='LogWatcher.url', topics='LogWatcher.topics'))
class IPLoggerApp(BaseParallelApplication):
name = u'iplogger'
description = _description
classes = [LogWatcher, ProfileDir]
aliases = Dict(aliases)
@catch_config_error
def initialize(self, argv=None):
super(IPLoggerApp, self).initialize(argv)
self.init_watcher()
def init_watcher(self):
try:
self.watcher = LogWatcher(parent=self, log=self.log)
except:
self.log.error("Couldn't start the LogWatcher", exc_info=True)
self.exit(1)
self.log.info("Listening for log messages on %r"%self.watcher.url)
def start(self):
self.watcher.start()
try:
self.watcher.loop.start()
except KeyboardInterrupt:
self.log.critical("Logging Interrupted, shutting down...\n")
launch_new_instance = IPLoggerApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
|
gpl-3.0
|
amondnet/gitinspector
|
gitinspector/timeline.py
|
47
|
8918
|
# coding: utf-8
#
# Copyright © 2012-2013 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
from localization import N_
from outputable import Outputable
import datetime
import format
import gravatar
import terminal
import textwrap
class TimelineData:
def __init__(self, changes, useweeks):
authordateinfo_list = sorted(changes.get_authordateinfo_list().items())
self.changes = changes
self.entries = {}
self.total_changes_by_period = {}
self.useweeks = useweeks
for i in authordateinfo_list:
key = None
if useweeks:
yearweek = datetime.date(int(i[0][0][0:4]), int(i[0][0][5:7]), int(i[0][0][8:10])).isocalendar()
key = (i[0][1], str(yearweek[0]) + "W" + "{0:02d}".format(yearweek[1]))
else:
key = (i[0][1], i[0][0][0:7])
if self.entries.get(key, None) == None:
self.entries[key] = i[1]
else:
self.entries[key].insertions += i[1].insertions
self.entries[key].deletions += i[1].deletions
for period in self.get_periods():
total_insertions = 0
total_deletions = 0
for author in self.get_authors():
entry = self.entries.get((author[0], period), None)
if entry != None:
total_insertions += entry.insertions
total_deletions += entry.deletions
self.total_changes_by_period[period] = (total_insertions, total_deletions,
total_insertions + total_deletions)
def get_periods(self):
return sorted(set([i[1] for i in self.entries]))
def get_total_changes_in_period(self, period):
return self.total_changes_by_period[period]
def get_authors(self):
return sorted(set([(i[0][0], self.changes.get_latest_email_by_author(i[0][0])) for i in self.entries.items()]))
def get_author_signs_in_period(self, author, period, multiplier):
authorinfo = self.entries.get((author, period), None)
total = float(self.total_changes_by_period[period][2])
if authorinfo:
i = multiplier * (self.entries[(author, period)].insertions / total)
j = multiplier * (self.entries[(author, period)].deletions / total)
return (int(i), int(j))
else:
return (0, 0)
def get_multiplier(self, period, max_width):
multiplier = 0
while True:
for i in self.entries:
entry = self.entries.get(i)
if period == i[1]:
changes_in_period = float(self.total_changes_by_period[i[1]][2])
if multiplier * (entry.insertions + entry.deletions) / changes_in_period > max_width:
return multiplier
multiplier += 0.25
def is_author_in_period(self, period, author):
return self.entries.get((author, period), None) != None
def is_author_in_periods(self, periods, author):
for period in periods:
if self.is_author_in_period(period, author):
return True
return False
TIMELINE_INFO_TEXT = N_("The following history timeline has been gathered from the repository")
MODIFIED_ROWS_TEXT = N_("Modified Rows:")
def __output_row__text__(timeline_data, periods, names):
print("\n" + terminal.__bold__ + terminal.ljust(_("Author"), 20), end=" ")
for period in periods:
print(terminal.rjust(period, 10), end=" ")
print(terminal.__normal__)
for name in names:
if timeline_data.is_author_in_periods(periods, name[0]):
print(terminal.ljust(name[0], 20)[0:20 - terminal.get_excess_column_count(name[0])], end=" ")
for period in periods:
multiplier = timeline_data.get_multiplier(period, 9)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "-" + signs[0] * "+")
print (("." if timeline_data.is_author_in_period(period, name[0]) and
len(signs_str) == 0 else signs_str).rjust(10), end=" ")
print("")
print(terminal.__bold__ + terminal.ljust(_(MODIFIED_ROWS_TEXT), 20) + terminal.__normal__, end=" ")
for period in periods:
total_changes = str(timeline_data.get_total_changes_in_period(period)[2])
if hasattr(total_changes, 'decode'):
total_changes = total_changes.decode("utf-8", "replace")
print(terminal.rjust(total_changes, 10), end=" ")
print("")
def __output_row__html__(timeline_data, periods, names):
timeline_xml = "<table class=\"git full\"><thead><tr><th>" + _("Author") + "</th>"
for period in periods:
timeline_xml += "<th>" + str(period) + "</th>"
timeline_xml += "</tr></thead><tbody>"
i = 0
for name in names:
if timeline_data.is_author_in_periods(periods, name[0]):
timeline_xml += "<tr" + (" class=\"odd\">" if i % 2 == 1 else ">")
if format.get_selected() == "html":
timeline_xml += "<td><img src=\"{0}\"/>{1}</td>".format(gravatar.get_url(name[1]), name[0])
else:
timeline_xml += "<td>" + name[0] + "</td>"
for period in periods:
multiplier = timeline_data.get_multiplier(period, 18)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "<div class=\"remove\"> </div>" + signs[0] * "<div class=\"insert\"> </div>")
timeline_xml += "<td>" + ("." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str)
timeline_xml += "</td>"
timeline_xml += "</tr>"
i = i + 1
timeline_xml += "<tfoot><tr><td><strong>" + _(MODIFIED_ROWS_TEXT) + "</strong></td>"
for period in periods:
total_changes = timeline_data.get_total_changes_in_period(period)
timeline_xml += "<td>" + str(total_changes[2]) + "</td>"
timeline_xml += "</tr></tfoot></tbody></table>"
print(timeline_xml)
class Timeline(Outputable):
def __init__(self, changes, useweeks):
self.changes = changes
self.useweeks = useweeks
Outputable.__init__(self)
def output_text(self):
if self.changes.get_commits():
print("\n" + textwrap.fill(_(TIMELINE_INFO_TEXT) + ":", width=terminal.get_size()[0]))
timeline_data = TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
(width, _unused) = terminal.get_size()
max_periods_per_row = int((width - 21) / 11)
for i in range(0, len(periods), max_periods_per_row):
__output_row__text__(timeline_data, periods[i:i+max_periods_per_row], names)
def output_html(self):
if self.changes.get_commits():
timeline_data = TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
max_periods_per_row = 8
timeline_xml = "<div><div id=\"timeline\" class=\"box\">"
timeline_xml += "<p>" + _(TIMELINE_INFO_TEXT) + ".</p>"
print(timeline_xml)
for i in range(0, len(periods), max_periods_per_row):
__output_row__html__(timeline_data, periods[i:i+max_periods_per_row], names)
timeline_xml = "</div></div>"
print(timeline_xml)
def output_xml(self):
if self.changes.get_commits():
message_xml = "\t\t<message>" + _(TIMELINE_INFO_TEXT) + "</message>\n"
timeline_xml = ""
periods_xml = "\t\t<periods length=\"{0}\">\n".format("week" if self.useweeks else "month")
timeline_data = TimelineData(self.changes, self.useweeks)
periods = timeline_data.get_periods()
names = timeline_data.get_authors()
for period in periods:
name_xml = "\t\t\t\t<name>" + str(period) + "</name>\n"
authors_xml = "\t\t\t\t<authors>\n"
for name in names:
if timeline_data.is_author_in_period(period, name[0]):
multiplier = timeline_data.get_multiplier(period, 24)
signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier)
signs_str = (signs[1] * "-" + signs[0] * "+")
if len(signs_str) == 0:
signs_str = "."
authors_xml += "\t\t\t\t\t<author>\n\t\t\t\t\t\t<name>" + name[0] + "</name>\n"
authors_xml += "\t\t\t\t\t\t<gravatar>" + gravatar.get_url(name[1]) + "</gravatar>\n"
authors_xml += "\t\t\t\t\t\t<work>" + signs_str + "</work>\n\t\t\t\t\t</author>\n"
authors_xml += "\t\t\t\t</authors>\n"
modified_rows_xml = "\t\t\t\t<modified_rows>" + \
str(timeline_data.get_total_changes_in_period(period)[2]) + "</modified_rows>\n"
timeline_xml += "\t\t\t<period>\n" + name_xml + authors_xml + modified_rows_xml + "\t\t\t</period>\n"
print("\t<timeline>\n" + message_xml + periods_xml + timeline_xml + "\t\t</periods>\n\t</timeline>")
|
gpl-3.0
|
civisanalytics/ansible
|
lib/ansible/modules/cloud/docker/docker_login.py
|
26
|
10928
|
#!/usr/bin/python
#
# (c) 2016 Olaf Kilian <[email protected]>
# Chris Houseknecht, <[email protected]>
# James Tanner, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: docker_login
short_description: Log into a Docker registry.
version_added: "2.0"
description:
- Provides functionality similar to the "docker login" command.
- Authenticate with a docker registry and add the credentials to your local Docker config file. Adding the
credentials to the config files allows future connections to the registry using tools such as Ansible's Docker
modules, the Docker CLI and docker-py without needing to provide credentials.
- Running in check mode will perform the authentication without updating the config file.
options:
registry_url:
required: False
description:
- The registry URL.
default: "https://index.docker.io/v1/"
aliases:
- registry
- url
username:
description:
- The username for the registry account
required: True
password:
description:
- The plaintext password for the registry account
required: True
email:
required: False
description:
- "The email address for the registry account. NOTE: private registries may not require this,
but Docker Hub requires it."
default: None
reauthorize:
required: False
description:
- Refresh exiting authentication found in the configuration file.
default: no
choices: ['yes', 'no']
aliases:
- reauth
config_path:
description:
- Custom path to the Docker CLI configuration file.
default: ~/.docker/config.json
required: False
aliases:
- self.config_path
- dockercfg_path
state:
version_added: '2.3'
description:
- This controls the current state of the user. C(present) will login in a user, C(absent) will log him out.
- To logout you only need the registry server, which defaults to DockerHub.
- Before 2.1 you could ONLY log in.
- docker does not support 'logout' with a custom config file.
choices: ['present', 'absent']
default: 'present'
required: False
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
- 'Only to be able to logout (state=absent): the docker command line utility'
authors:
- "Olaf Kilian <[email protected]>"
- "Chris Houseknecht (@chouseknecht)"
- "James Tanner (@jctanner)"
'''
EXAMPLES = '''
- name: Log into DockerHub
docker_login:
username: docker
password: rekcod
email: [email protected]
- name: Log into private registry and force re-authorization
docker_login:
registry: your.private.registry.io
username: yourself
password: secrets3
reauthorize: yes
- name: Log into DockerHub using a custom config file
docker_login:
username: docker
password: rekcod
email: [email protected]
config_path: /tmp/.mydockercfg
- name: Log out of DockerHub
docker_login:
state: absent
email: [email protected]
'''
RETURN = '''
login_results:
description: Results from the login.
returned: when state='present'
type: dict
sample: {
"email": "[email protected]",
"password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"serveraddress": "localhost:5000",
"username": "testuser"
}
'''
import base64
from ansible.module_utils.docker_common import *
class LoginManager(DockerBaseClass):
def __init__(self, client, results):
super(LoginManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.registry_url = parameters.get('registry_url')
self.username = parameters.get('username')
self.password = parameters.get('password')
self.email = parameters.get('email')
self.reauthorize = parameters.get('reauthorize')
self.config_path = parameters.get('config_path')
if parameters['state'] == 'present':
self.login()
else:
self.logout()
def fail(self, msg):
self.client.fail(msg)
def login(self):
'''
Log into the registry with provided username/password. On success update the config
file with the new authorization.
:return: None
'''
if self.email and not re.match(EMAIL_REGEX, self.email):
self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
"/%s/" % (EMAIL_REGEX))
self.results['actions'].append("Logged into %s" % (self.registry_url))
self.log("Log into %s with username %s" % (self.registry_url, self.username))
try:
response = self.client.login(
self.username,
password=self.password,
email=self.email,
registry=self.registry_url,
reauth=self.reauthorize,
dockercfg_path=self.config_path
)
except Exception as exc:
self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
self.results['login_result'] = response
if not self.check_mode:
self.update_config_file()
def logout(self):
'''
Log out of the registry. On success update the config file.
TODO: port to API once docker.py supports this.
:return: None
'''
cmd = "%s logout " % self.client.module.get_bin_path('docker', True)
#TODO: docker does not support config file in logout, restore this when they do
#if self.config_path and self.config_file_exists(self.config_path):
# cmd += "--config '%s' " % self.config_path
cmd += "'%s'" % self.registry_url
(rc, out, err) = self.client.module.run_command(cmd)
if rc != 0:
self.fail("Could not log out: %s" % err)
def config_file_exists(self, path):
if os.path.exists(path):
self.log("Configuration file %s exists" % (path))
return True
self.log("Configuration file %s not found." % (path))
return False
def create_config_file(self, path):
'''
Create a config file with a JSON blob containing an auths key.
:return: None
'''
self.log("Creating docker config file %s" % (path))
config_path_dir = os.path.dirname(path)
if not os.path.exists(config_path_dir):
try:
os.makedirs(config_path_dir)
except Exception as exc:
self.fail("Error: failed to create %s - %s" % (config_path_dir, str(exc)))
self.write_config(path, dict(auths=dict()))
def write_config(self, path, config):
try:
json.dump(config, open(path, "w"), indent=5, sort_keys=True)
except Exception as exc:
self.fail("Error: failed to write config to %s - %s" % (path, str(exc)))
def update_config_file(self):
'''
If the authorization not stored in the config file or reauthorize is True,
update the config file with the new authorization.
:return: None
'''
path = os.path.expanduser(self.config_path)
if not self.config_file_exists(path):
self.create_config_file(path)
try:
# read the existing config
config = json.load(open(path, "r"))
except ValueError:
self.log("Error reading config from %s" % (path))
config = dict()
if not config.get('auths'):
self.log("Adding auths dict to config.")
config['auths'] = dict()
if not config['auths'].get(self.registry_url):
self.log("Adding registry_url %s to auths." % (self.registry_url))
config['auths'][self.registry_url] = dict()
encoded_credentials = dict(
auth=base64.b64encode(self.username + b':' + self.password),
email=self.email
)
if config['auths'][self.registry_url] != encoded_credentials or self.reauthorize:
# Update the config file with the new authorization
config['auths'][self.registry_url] = encoded_credentials
self.log("Updating config file %s with new authorization for %s" % (path, self.registry_url))
self.results['actions'].append("Updated config file %s with new authorization for %s" % (
path, self.registry_url))
self.results['changed'] = True
self.write_config(path, config)
def main():
argument_spec=dict(
registry_url=dict(type='str', required=False, default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
username=dict(type='str', required=False),
password=dict(type='str', required=False, no_log=True),
email=dict(type='str'),
reauthorize=dict(type='bool', default=False, aliases=['reauth']),
state=dict(type='str', default='present', choices=['present', 'absent']),
config_path=dict(type='str', default='~/.docker/config.json', aliases=['self.config_path', 'dockercfg_path']),
)
required_if = [
('state', 'present', ['username', 'password']),
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if
)
results = dict(
changed=False,
actions=[],
login_result={}
)
if client.module.params['state'] == 'present' and client.module.params['registry_url'] == DEFAULT_DOCKER_REGISTRY and not client.module.params['email']:
client.module.fail_json(msg="'email' is required when logging into DockerHub")
LoginManager(client, results)
if 'actions' in results:
del results['actions']
client.module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
varunarya10/nova_test_latest
|
nova/tests/unit/objects/test_pci_device.py
|
22
|
11196
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_utils import timeutils
from nova import context
from nova import db
from nova.objects import instance
from nova.objects import pci_device
from nova.tests.unit.objects import test_objects
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 0,
'status': 'available'}
fake_db_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'product_id': 'p',
'numa_node': 0,
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
'request_id': None,
}
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'product_id': 'p1',
'numa_node': 1,
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
'request_id': None,
}
class _TestPciDeviceObject(object):
def _create_fake_instance(self):
self.inst = instance.Instance()
self.inst.uuid = 'fake-inst-uuid'
self.inst.pci_devices = pci_device.PciDeviceList()
def _create_fake_pci_device(self, ctxt=None):
if not ctxt:
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_create_pci_device(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'product_id', 'vendor_id',
'numa_node', 'status', 'address', 'extra_info']))
def test_pci_device_extra_info(self):
self.dev_dict = copy.copy(dev_dict)
self.dev_dict['k1'] = 'v1'
self.dev_dict['k2'] = 'v2'
self.pci_device = pci_device.PciDevice.create(self.dev_dict)
extra_value = self.pci_device.extra_info
self.assertEqual(extra_value.get('k1'), 'v1')
self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'address', 'product_id',
'vendor_id', 'numa_node', 'status',
'extra_info']))
def test_update_device(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p2', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['vendor_id', 'product_id']))
def test_update_device_same_value(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['vendor_id', 'product_id']))
def test_get_by_dev_addr(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(), set())
def test_get_by_dev_id(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_id')
db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(), set())
def test_save(self):
ctxt = context.get_admin_context()
self._create_fake_pci_device(ctxt=ctxt)
return_dev = dict(fake_db_dev, status='available',
instance_uuid='fake-uuid-3')
self.pci_device.status = 'allocated'
self.pci_device.instance_uuid = 'fake-uuid-2'
expected_updates = dict(status='allocated',
instance_uuid='fake-uuid-2')
self.mox.StubOutWithMock(db, 'pci_device_update')
db.pci_device_update(ctxt, 1, 'a',
expected_updates).AndReturn(return_dev)
self.mox.ReplayAll()
self.pci_device.save()
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid,
'fake-uuid-3')
def test_save_no_extra_info(self):
return_dev = dict(fake_db_dev, status='available',
instance_uuid='fake-uuid-3')
def _fake_update(ctxt, node_id, addr, updates):
self.extra_info = updates.get('extra_info')
return return_dev
ctxt = context.get_admin_context()
self.stubs.Set(db, 'pci_device_update', _fake_update)
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device._context = ctxt
self.pci_device.save()
self.assertEqual(self.extra_info, '{}')
def test_save_removed(self):
ctxt = context.get_admin_context()
self._create_fake_pci_device(ctxt=ctxt)
self.pci_device.status = 'removed'
self.mox.StubOutWithMock(db, 'pci_device_destroy')
db.pci_device_destroy(ctxt, 1, 'a')
self.mox.ReplayAll()
self.pci_device.save()
self.assertEqual(self.pci_device.status, 'deleted')
def test_save_deleted(self):
def _fake_destroy(ctxt, node_id, addr):
self.called = True
def _fake_update(ctxt, node_id, addr, updates):
self.called = True
self.stubs.Set(db, 'pci_device_destroy', _fake_destroy)
self.stubs.Set(db, 'pci_device_update', _fake_update)
self._create_fake_pci_device()
self.pci_device.status = 'deleted'
self.called = False
self.pci_device.save()
self.assertEqual(self.called, False)
def test_update_numa_node(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.assertEqual(0, self.pci_device.numa_node)
self.dev_dict = copy.copy(dev_dict)
self.dev_dict['numa_node'] = '1'
self.pci_device = pci_device.PciDevice.create(self.dev_dict)
self.assertEqual(1, self.pci_device.numa_node)
def test_pci_device_equivalent(self):
pci_device1 = pci_device.PciDevice.create(dev_dict)
pci_device2 = pci_device.PciDevice.create(dev_dict)
self.assertEqual(pci_device1, pci_device2)
def test_pci_device_equivalent_with_ignore_field(self):
pci_device1 = pci_device.PciDevice.create(dev_dict)
pci_device2 = pci_device.PciDevice.create(dev_dict)
pci_device2.updated_at = timeutils.utcnow()
self.assertEqual(pci_device1, pci_device2)
def test_pci_device_not_equivalent1(self):
pci_device1 = pci_device.PciDevice.create(dev_dict)
dev_dict2 = copy.copy(dev_dict)
dev_dict2['address'] = 'b'
pci_device2 = pci_device.PciDevice.create(dev_dict2)
self.assertNotEqual(pci_device1, pci_device2)
def test_pci_device_not_equivalent2(self):
pci_device1 = pci_device.PciDevice.create(dev_dict)
pci_device2 = pci_device.PciDevice.create(dev_dict)
delattr(pci_device2, 'address')
self.assertNotEqual(pci_device1, pci_device2)
def test_pci_device_not_equivalent_with_none(self):
pci_device1 = pci_device.PciDevice.create(dev_dict)
pci_device2 = pci_device.PciDevice.create(dev_dict)
pci_device1.instance_uuid = 'aaa'
pci_device2.instance_uuid = None
self.assertNotEqual(pci_device1, pci_device2)
class TestPciDeviceObject(test_objects._LocalTest,
_TestPciDeviceObject):
pass
class TestPciDeviceObjectRemote(test_objects._RemoteTest,
_TestPciDeviceObject):
pass
fake_pci_devs = [fake_db_dev, fake_db_dev_1]
class _TestPciDeviceListObject(object):
def test_get_by_compute_node(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node')
db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs)
self.mox.ReplayAll()
devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1)
for i in range(len(fake_pci_devs)):
self.assertIsInstance(devs[i], pci_device.PciDevice)
self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id)
def test_get_by_instance_uuid(self):
ctxt = context.get_admin_context()
fake_db_1 = dict(fake_db_dev, address='a1',
status='allocated', instance_uuid='1')
fake_db_2 = dict(fake_db_dev, address='a2',
status='allocated', instance_uuid='1')
self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid')
db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn(
[fake_db_1, fake_db_2])
self.mox.ReplayAll()
devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1')
self.assertEqual(len(devs), 2)
for i in range(len(fake_pci_devs)):
self.assertIsInstance(devs[i], pci_device.PciDevice)
self.assertEqual(devs[0].vendor_id, 'v')
self.assertEqual(devs[1].vendor_id, 'v')
class TestPciDeviceListObject(test_objects._LocalTest,
_TestPciDeviceListObject):
pass
class TestPciDeviceListObjectRemote(test_objects._RemoteTest,
_TestPciDeviceListObject):
pass
|
apache-2.0
|
lehinevych/cfme_tests
|
utils/tests/test_wait.py
|
2
|
2434
|
# -*- coding: utf-8 -*-
# pylint: disable=W0621
import pytest
import time
from functools import partial
from utils.wait import wait_for, TimedOutError
pytestmark = [
pytest.mark.nondestructive,
pytest.mark.skip_selenium,
]
class Incrementor():
value = 0
def i_sleep_a_lot(self):
time.sleep(.1)
self.value += 1
return self.value
def test_simple_wait():
incman = Incrementor()
ec, tc = wait_for(incman.i_sleep_a_lot,
fail_condition=0,
delay=.05)
print("Function output {} in time {} ".format(ec, tc))
assert tc < 1, "Should take less than 1 seconds"
def test_lambda_wait():
incman = Incrementor()
ec, tc = wait_for(lambda self: self.i_sleep_a_lot() > 10,
[incman],
delay=.05)
print("Function output {} in time {} ".format(ec, tc))
assert tc < 2, "Should take less than 2 seconds"
def test_lambda_long_wait():
incman = Incrementor()
with pytest.raises(TimedOutError):
wait_for(lambda self: self.i_sleep_a_lot() > 10, [incman],
num_sec=1, message="lambda_long_wait")
def test_partial():
incman = Incrementor()
func = partial(lambda: incman.i_sleep_a_lot() > 10)
with pytest.raises(TimedOutError):
wait_for(func,
num_sec=2, delay=1)
def test_callable_fail_condition():
incman = Incrementor()
with pytest.raises(TimedOutError):
wait_for(
incman.i_sleep_a_lot,
fail_condition=lambda value: value <= 10, num_sec=2, delay=1)
def test_wait_decorator():
incman = Incrementor()
@pytest.wait_for(fail_condition=0, delay=.05)
def a_test():
incman.i_sleep_a_lot()
print("Function output {} in time {} ".format(a_test.out, a_test.duration))
assert a_test.duration < 1, "Should take less than 1 seconds"
def test_wait_decorator_noparams():
incman = Incrementor()
@pytest.wait_for
def a_test():
return incman.i_sleep_a_lot() != 0
print("Function output {} in time {} ".format(a_test.out, a_test.duration))
assert a_test.duration < 1, "Should take less than 1 seconds"
def test_nonnumeric_numsec_timedelta_via_string():
incman = Incrementor()
func = partial(lambda: incman.i_sleep_a_lot() > 10)
with pytest.raises(TimedOutError):
wait_for(func,
timeout="2s", delay=1)
|
gpl-2.0
|
mszewczy/odoo
|
addons/l10n_lu/scripts/tax2csv.py
|
257
|
7763
|
from collections import OrderedDict
import csv
import xlrd
def _e(s):
if type(s) is unicode:
return s.encode('utf8')
elif s is None:
return ''
else:
return str(s)
def _is_true(s):
return s not in ('F', 'False', 0, '', None, False)
class LuxTaxGenerator:
def __init__(self, filename):
self.workbook = xlrd.open_workbook('tax.xls')
self.sheet_info = \
self.workbook.sheet_by_name('INFO')
self.sheet_taxes = \
self.workbook.sheet_by_name('TAXES')
self.sheet_tax_codes = \
self.workbook.sheet_by_name('TAX.CODES')
self.sheet_fiscal_pos_map = \
self.workbook.sheet_by_name('FISCAL.POSITION.MAPPINGS')
self.suffix = self.sheet_info.cell_value(4, 2)
def iter_tax_codes(self):
keys = map(lambda c: c.value, self.sheet_tax_codes.row(0))
yield keys
for i in range(1, self.sheet_tax_codes.nrows):
row = map(lambda c: c.value, self.sheet_tax_codes.row(i))
d = OrderedDict(zip(keys, row))
d['sign'] = int(d['sign'])
d['sequence'] = int(d['sequence'])
yield d
def iter_taxes(self):
keys = map(lambda c: c.value, self.sheet_taxes.row(0))
yield keys
for i in range(1, self.sheet_taxes.nrows):
row = map(lambda c: c.value, self.sheet_taxes.row(i))
yield OrderedDict(zip(keys, row))
def iter_fiscal_pos_map(self):
keys = map(lambda c: c.value, self.sheet_fiscal_pos_map.row(0))
yield keys
for i in range(1, self.sheet_fiscal_pos_map.nrows):
row = map(lambda c: c.value, self.sheet_fiscal_pos_map.row(i))
yield OrderedDict(zip(keys, row))
def tax_codes_to_csv(self):
writer = csv.writer(open('account.tax.code.template-%s.csv' %
self.suffix, 'wb'))
tax_codes_iterator = self.iter_tax_codes()
keys = tax_codes_iterator.next()
writer.writerow(keys)
# write structure tax codes
tax_codes = {} # code: id
for row in tax_codes_iterator:
tax_code = row['code']
if tax_code in tax_codes:
raise RuntimeError('duplicate tax code %s' % tax_code)
tax_codes[tax_code] = row['id']
writer.writerow(map(_e, row.values()))
# read taxes and add leaf tax codes
new_tax_codes = {} # id: parent_code
def add_new_tax_code(tax_code_id, new_name, new_parent_code):
if not tax_code_id:
return
name, parent_code = new_tax_codes.get(tax_code_id, (None, None))
if parent_code and parent_code != new_parent_code:
raise RuntimeError('tax code "%s" already exist with '
'parent %s while trying to add it with '
'parent %s' %
(tax_code_id, parent_code, new_parent_code))
else:
new_tax_codes[tax_code_id] = (new_name, new_parent_code)
taxes_iterator = self.iter_taxes()
keys = taxes_iterator.next()
for row in taxes_iterator:
if not _is_true(row['active']):
continue
if row['child_depend'] and row['amount'] != 1:
raise RuntimeError('amount must be one if child_depend '
'for %s' % row['id'])
# base parent
base_code = row['BASE_CODE']
if not base_code or base_code == '/':
base_code = 'NA'
if base_code not in tax_codes:
raise RuntimeError('undefined tax code %s' % base_code)
if base_code != 'NA':
if row['child_depend']:
raise RuntimeError('base code specified '
'with child_depend for %s' % row['id'])
if not row['child_depend']:
# ... in lux, we have the same code for invoice and refund
if base_code != 'NA':
assert row['base_code_id:id'], 'missing base_code_id for %s' % row['id']
assert row['ref_base_code_id:id'] == row['base_code_id:id']
add_new_tax_code(row['base_code_id:id'],
'Base - ' + row['name'],
base_code)
# tax parent
tax_code = row['TAX_CODE']
if not tax_code or tax_code == '/':
tax_code = 'NA'
if tax_code not in tax_codes:
raise RuntimeError('undefined tax code %s' % tax_code)
if tax_code == 'NA':
if row['amount'] and not row['child_depend']:
raise RuntimeError('TAX_CODE not specified '
'for non-zero tax %s' % row['id'])
if row['tax_code_id:id']:
raise RuntimeError('tax_code_id specified '
'for tax %s' % row['id'])
else:
if row['child_depend']:
raise RuntimeError('TAX_CODE specified '
'with child_depend for %s' % row['id'])
if not row['amount']:
raise RuntimeError('TAX_CODE specified '
'for zero tax %s' % row['id'])
if not row['tax_code_id:id']:
raise RuntimeError('tax_code_id not specified '
'for tax %s' % row['id'])
if not row['child_depend'] and row['amount']:
# ... in lux, we have the same code for invoice and refund
assert row['tax_code_id:id'], 'missing tax_code_id for %s' % row['id']
assert row['ref_tax_code_id:id'] == row['tax_code_id:id']
add_new_tax_code(row['tax_code_id:id'],
'Taxe - ' + row['name'],
tax_code)
for tax_code_id in sorted(new_tax_codes):
name, parent_code = new_tax_codes[tax_code_id]
writer.writerow((tax_code_id,
'lu_tct_m' + parent_code,
tax_code_id.replace('lu_tax_code_template_', ''),
'1',
'',
_e(name),
''))
def taxes_to_csv(self):
writer = csv.writer(open('account.tax.template-%s.csv' %
self.suffix, 'wb'))
taxes_iterator = self.iter_taxes()
keys = taxes_iterator.next()
writer.writerow(keys[3:] + ['sequence'])
seq = 100
for row in sorted(taxes_iterator, key=lambda r: r['description']):
if not _is_true(row['active']):
continue
seq += 1
if row['parent_id:id']:
cur_seq = seq + 1000
else:
cur_seq = seq
writer.writerow(map(_e, row.values()[3:]) + [cur_seq])
def fiscal_pos_map_to_csv(self):
writer = csv.writer(open('account.fiscal.'
'position.tax.template-%s.csv' %
self.suffix, 'wb'))
fiscal_pos_map_iterator = self.iter_fiscal_pos_map()
keys = fiscal_pos_map_iterator.next()
writer.writerow(keys)
for row in fiscal_pos_map_iterator:
writer.writerow(map(_e, row.values()))
if __name__ == '__main__':
o = LuxTaxGenerator('tax.xls')
o.tax_codes_to_csv()
o.taxes_to_csv()
o.fiscal_pos_map_to_csv()
|
agpl-3.0
|
lduarte1991/edx-platform
|
openedx/core/djangoapps/content/block_structure/api.py
|
25
|
1921
|
"""
Higher order functions built on the BlockStructureManager to interact with a django cache.
"""
from django.core.cache import cache
from xmodule.modulestore.django import modulestore
from .manager import BlockStructureManager
def get_course_in_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.get_collected function that returns the block
structure in the cache for the given course_key.
Returns:
BlockStructureBlockData - The collected block structure,
starting at root_block_usage_key.
"""
return get_block_structure_manager(course_key).get_collected()
def update_course_in_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.updated_collected function that updates the block
structure in the cache for the given course_key.
"""
return get_block_structure_manager(course_key).update_collected_if_needed()
def clear_course_from_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.clear_block_cache function that clears the block
structure from the cache for the given course_key.
Note: See Note in get_course_blocks. Even after MA-1604 is
implemented, this implementation should still be valid since the
entire block structure of the course is cached, even though
arbitrary access to an intermediate block will be supported.
"""
get_block_structure_manager(course_key).clear()
def get_block_structure_manager(course_key):
"""
Returns the manager for managing Block Structures for the given course.
"""
store = modulestore()
course_usage_key = store.make_course_usage_key(course_key)
return BlockStructureManager(course_usage_key, store, get_cache())
def get_cache():
"""
Returns the storage for caching Block Structures.
"""
return cache
|
agpl-3.0
|
chanceraine/nupic
|
tests/swarming/nupic/swarming/experiments/field_threshold_temporal/permutations.py
|
38
|
5399
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
import os
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'attendance'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'attendance': PermuteEncoder(fieldName='attendance', encoderClass='AdaptiveScalarEncoder', maxval=36067, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
'visitor_winloss': PermuteEncoder(fieldName='visitor_winloss', encoderClass='AdaptiveScalarEncoder', maxval=0.786, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
'timestamp_dayOfWeek': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.dayOfWeek', radius=PermuteChoices([1, 3]), w=7),
'precip': PermuteEncoder(fieldName='precip', encoderClass='SDRCategoryEncoder', w=7, n=100),
'timestamp_timeOfDay': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.timeOfDay', radius=PermuteChoices([1, 8]), w=7),
'daynight': PermuteEncoder(fieldName='daynight', encoderClass='SDRCategoryEncoder', w=7, n=100),
'home_winloss': PermuteEncoder(fieldName='home_winloss', encoderClass='AdaptiveScalarEncoder', maxval=0.7, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*attendance.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'nonprediction:aae:window=1000:field=attendance')
minimize = 'prediction:aae:window=1000:field=attendance'
def dummyModelParams(perm):
""" This function can be used for Hypersearch algorithm development. When
present, we don't actually run the CLA model in the OPF, but instead run
a dummy model. This function returns the dummy model params that will be
used. See the OPFDummyModelRunner class source code (in
nupic.swarming.ModelRunner) for a description of the schema for
the dummy model params.
"""
#By contribution order from most to least A,B,C,D,E,F
#Want A,E combo to give the most contribution followed by A,D then A,C
errScore = 100
#A
if not perm['modelParams']['sensorParams']['encoders']['visitor_winloss'] \
is None:
errScore -= 25
#B
if not perm['modelParams']['sensorParams']['encoders']['home_winloss'] \
is None:
errScore -= 20
#C
if not perm['modelParams']['sensorParams']['encoders']\
['timestamp_timeOfDay'] is None:
errScore -= 15
#D
if not perm['modelParams']['sensorParams']['encoders']\
['timestamp_dayOfWeek'] is None:
errScore -= 10
#E
if not perm['modelParams']['sensorParams']['encoders']['precip'] is None:
errScore -= 5
#F
if not perm['modelParams']['sensorParams']['encoders']['daynight'] is None:
errScore += 10
dummyModelParams = dict(
metricValue = errScore,
metricFunctions = None,
)
return dummyModelParams
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
|
agpl-3.0
|
40223114/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/numbers.py
|
883
|
10398
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from abc import ABCMeta, abstractmethod
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(metaclass=ABCMeta):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
def __bool__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@property
@abstractmethod
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@property
@abstractmethod
def imag(self):
"""Retrieve the imaginary component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other: Should promote to float when necessary."""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
@abstractmethod
def __floor__(self):
"""Finds the greatest Integral <= self."""
raise NotImplementedError
@abstractmethod
def __ceil__(self):
"""Finds the least Integral >= self."""
raise NotImplementedError
@abstractmethod
def __round__(self, ndigits=None):
"""Rounds self to ndigits decimal places, defaulting to 0.
If ndigits is omitted or None, returns an Integral, otherwise
returns a Real. Rounds half toward even.
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@property
@abstractmethod
def numerator(self):
raise NotImplementedError
@property
@abstractmethod
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to int and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __int__(self):
"""int(self)"""
raise NotImplementedError
def __index__(self):
"""Called whenever an index is needed, such as in slicing"""
return int(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(int(self))"""
return float(int(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
|
gpl-3.0
|
rwl/PyCIM
|
CIM15/IEC61970/Informative/InfWork/TypeMaterial.py
|
1
|
7646
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Document import Document
class TypeMaterial(Document):
"""Documentation for a generic material item that may be used for design, work and other purposes. Any number of MaterialItems manufactured by various vendors may be used to perform this TypeMaterial. Note that class analagous to 'AssetModel' is not used for material items. This is because in some cases, for example, a utility sets up a Master material record for a 3 inch long half inch diameter steel bolt and they do not necessarily care what specific supplier is providing the material item. As different vendors are used to supply the part, the Stock Code of the material item can stay the same. In other cases, each time the vendor changes, a new stock code is set up so they can track material used by vendor. Therefore a Material Item 'Model' is not typically needed.Documentation for a generic material item that may be used for design, work and other purposes. Any number of MaterialItems manufactured by various vendors may be used to perform this TypeMaterial. Note that class analagous to 'AssetModel' is not used for material items. This is because in some cases, for example, a utility sets up a Master material record for a 3 inch long half inch diameter steel bolt and they do not necessarily care what specific supplier is providing the material item. As different vendors are used to supply the part, the Stock Code of the material item can stay the same. In other cases, each time the vendor changes, a new stock code is set up so they can track material used by vendor. Therefore a Material Item 'Model' is not typically needed.
"""
def __init__(self, stockItem=False, costType='', quantity='', estUnitCost=0.0, ErpIssueInventories=None, MaterialItems=None, CUMaterialItems=None, ErpReqLineItems=None, *args, **kw_args):
"""Initialises a new 'TypeMaterial' instance.
@param stockItem: True if item is a stock item (default).
@param costType: The category of cost to which this Material Item belongs.
@param quantity: The value, unit of measure, and multiplier for the quantity.
@param estUnitCost: The estimated unit cost of this type of material, either for a unit cost or cost per unit length. Cost is for material or asset only and does not include labor to install/construct or configure it.
@param ErpIssueInventories:
@param MaterialItems:
@param CUMaterialItems:
@param ErpReqLineItems:
"""
#: True if item is a stock item (default).
self.stockItem = stockItem
#: The category of cost to which this Material Item belongs.
self.costType = costType
#: The value, unit of measure, and multiplier for the quantity.
self.quantity = quantity
#: The estimated unit cost of this type of material, either for a unit cost or cost per unit length. Cost is for material or asset only and does not include labor to install/construct or configure it.
self.estUnitCost = estUnitCost
self._ErpIssueInventories = []
self.ErpIssueInventories = [] if ErpIssueInventories is None else ErpIssueInventories
self._MaterialItems = []
self.MaterialItems = [] if MaterialItems is None else MaterialItems
self._CUMaterialItems = []
self.CUMaterialItems = [] if CUMaterialItems is None else CUMaterialItems
self._ErpReqLineItems = []
self.ErpReqLineItems = [] if ErpReqLineItems is None else ErpReqLineItems
super(TypeMaterial, self).__init__(*args, **kw_args)
_attrs = ["stockItem", "costType", "quantity", "estUnitCost"]
_attr_types = {"stockItem": bool, "costType": str, "quantity": str, "estUnitCost": float}
_defaults = {"stockItem": False, "costType": '', "quantity": '', "estUnitCost": 0.0}
_enums = {}
_refs = ["ErpIssueInventories", "MaterialItems", "CUMaterialItems", "ErpReqLineItems"]
_many_refs = ["ErpIssueInventories", "MaterialItems", "CUMaterialItems", "ErpReqLineItems"]
def getErpIssueInventories(self):
return self._ErpIssueInventories
def setErpIssueInventories(self, value):
for x in self._ErpIssueInventories:
x.TypeMaterial = None
for y in value:
y._TypeMaterial = self
self._ErpIssueInventories = value
ErpIssueInventories = property(getErpIssueInventories, setErpIssueInventories)
def addErpIssueInventories(self, *ErpIssueInventories):
for obj in ErpIssueInventories:
obj.TypeMaterial = self
def removeErpIssueInventories(self, *ErpIssueInventories):
for obj in ErpIssueInventories:
obj.TypeMaterial = None
def getMaterialItems(self):
return self._MaterialItems
def setMaterialItems(self, value):
for x in self._MaterialItems:
x.TypeMaterial = None
for y in value:
y._TypeMaterial = self
self._MaterialItems = value
MaterialItems = property(getMaterialItems, setMaterialItems)
def addMaterialItems(self, *MaterialItems):
for obj in MaterialItems:
obj.TypeMaterial = self
def removeMaterialItems(self, *MaterialItems):
for obj in MaterialItems:
obj.TypeMaterial = None
def getCUMaterialItems(self):
return self._CUMaterialItems
def setCUMaterialItems(self, value):
for x in self._CUMaterialItems:
x.TypeMaterial = None
for y in value:
y._TypeMaterial = self
self._CUMaterialItems = value
CUMaterialItems = property(getCUMaterialItems, setCUMaterialItems)
def addCUMaterialItems(self, *CUMaterialItems):
for obj in CUMaterialItems:
obj.TypeMaterial = self
def removeCUMaterialItems(self, *CUMaterialItems):
for obj in CUMaterialItems:
obj.TypeMaterial = None
def getErpReqLineItems(self):
return self._ErpReqLineItems
def setErpReqLineItems(self, value):
for x in self._ErpReqLineItems:
x.TypeMaterial = None
for y in value:
y._TypeMaterial = self
self._ErpReqLineItems = value
ErpReqLineItems = property(getErpReqLineItems, setErpReqLineItems)
def addErpReqLineItems(self, *ErpReqLineItems):
for obj in ErpReqLineItems:
obj.TypeMaterial = self
def removeErpReqLineItems(self, *ErpReqLineItems):
for obj in ErpReqLineItems:
obj.TypeMaterial = None
|
mit
|
teslaji/homebase
|
venv/HomeBase/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py
|
1004
|
9544
|
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
|
gpl-3.0
|
avanov/django
|
tests/gis_tests/test_measure.py
|
325
|
7363
|
"""
Distance and Area objects to allow for sensible and convenient calculation
and conversions. Here are some tests.
"""
import unittest
from django.contrib.gis.measure import A, D, Area, Distance
class DistanceTest(unittest.TestCase):
"Testing the Distance object"
def testInit(self):
"Testing initialization from valid units"
d = Distance(m=100)
self.assertEqual(d.m, 100)
d1, d2, d3 = D(m=100), D(meter=100), D(metre=100)
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
y1, y2, y3 = D(yd=100), D(yard=100), D(Yard=100)
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
mm1, mm2 = D(millimeter=1000), D(MiLLiMeTeR=1000)
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
def testInitInvalid(self):
"Testing initialization from invalid units"
self.assertRaises(AttributeError, D, banana=100)
def testAccess(self):
"Testing access in different units"
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
def testAccessInvalid(self):
"Testing access in invalid units"
d = D(m=100)
self.assertFalse(hasattr(d, 'banana'))
def testAddition(self):
"Test addition & subtraction"
d1 = D(m=100)
d2 = D(m=200)
d3 = d1 + d2
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = d1 - d2
self.assertEqual(d4.m, -100)
d4 -= d1
self.assertEqual(d4.m, -200)
with self.assertRaises(TypeError):
d1 + 1
with self.assertRaises(TypeError):
d1 - 1
with self.assertRaises(TypeError):
d1 += 1
with self.assertRaises(TypeError):
d1 -= 1
def testMultiplication(self):
"Test multiplication & division"
d1 = D(m=100)
d3 = d1 * 2
self.assertEqual(d3.m, 200)
d3 = 2 * d1
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = d1 / 2
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
d5 = d1 / D(m=2)
self.assertEqual(d5, 50)
a5 = d1 * D(m=10)
self.assertIsInstance(a5, Area)
self.assertEqual(a5.sq_m, 100 * 10)
with self.assertRaises(TypeError):
d1 *= D(m=1)
with self.assertRaises(TypeError):
d1 /= D(m=1)
def testUnitConversions(self):
"Testing default units during maths"
d1 = D(m=100)
d2 = D(km=1)
d3 = d1 + d2
self.assertEqual(d3._default_unit, 'm')
d4 = d2 + d1
self.assertEqual(d4._default_unit, 'km')
d5 = d1 * 2
self.assertEqual(d5._default_unit, 'm')
d6 = d1 / 2
self.assertEqual(d6._default_unit, 'm')
def testComparisons(self):
"Testing comparisons"
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertGreater(d2, d1)
self.assertEqual(d1, d1)
self.assertLess(d1, d2)
self.assertFalse(d3)
def testUnitsStr(self):
"Testing conversion to strings"
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
def testUnitAttName(self):
"Testing the `unit_attname` class method"
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'),
('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for nm, att in unit_tuple:
self.assertEqual(att, D.unit_attname(nm))
class AreaTest(unittest.TestCase):
"Testing the Area object"
def testInit(self):
"Testing initialization from valid units"
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
def testInitInvaliA(self):
"Testing initialization from invalid units"
self.assertRaises(AttributeError, A, banana=100)
def testAccess(self):
"Testing access in different units"
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
def testAccessInvaliA(self):
"Testing access in invalid units"
a = A(sq_m=100)
self.assertFalse(hasattr(a, 'banana'))
def testAddition(self):
"Test addition & subtraction"
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = a1 + a2
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = a1 - a2
self.assertEqual(a4.sq_m, -100)
a4 -= a1
self.assertEqual(a4.sq_m, -200)
with self.assertRaises(TypeError):
a1 + 1
with self.assertRaises(TypeError):
a1 - 1
with self.assertRaises(TypeError):
a1 += 1
with self.assertRaises(TypeError):
a1 -= 1
def testMultiplication(self):
"Test multiplication & division"
a1 = A(sq_m=100)
a3 = a1 * 2
self.assertEqual(a3.sq_m, 200)
a3 = 2 * a1
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = a1 / 2
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
with self.assertRaises(TypeError):
a1 * A(sq_m=1)
with self.assertRaises(TypeError):
a1 *= A(sq_m=1)
with self.assertRaises(TypeError):
a1 / A(sq_m=1)
with self.assertRaises(TypeError):
a1 /= A(sq_m=1)
def testUnitConversions(self):
"Testing default units during maths"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = a1 + a2
self.assertEqual(a3._default_unit, 'sq_m')
a4 = a2 + a1
self.assertEqual(a4._default_unit, 'sq_km')
a5 = a1 * 2
self.assertEqual(a5._default_unit, 'sq_m')
a6 = a1 / 2
self.assertEqual(a6._default_unit, 'sq_m')
def testComparisons(self):
"Testing comparisons"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertGreater(a2, a1)
self.assertEqual(a1, a1)
self.assertLess(a1, a2)
self.assertFalse(a3)
def testUnitsStr(self):
"Testing conversion to strings"
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DistanceTest))
s.addTest(unittest.makeSuite(AreaTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__ == "__main__":
run()
|
bsd-3-clause
|
TeutoNet-Netzdienste/ansible
|
v2/ansible/inventory/expand_hosts.py
|
148
|
4326
|
# (c) 2012, Zettar Inc.
# Written by Chin Fang <[email protected]>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
'''
This module is for enhancing ansible's inventory parsing capability such
that it can deal with hostnames specified using a simple pattern in the
form of [beg:end], example: [1:5], [a:c], [D:G]. If beg is not specified,
it defaults to 0.
If beg is given and is left-zero-padded, e.g. '001', it is taken as a
formatting hint when the range is expanded. e.g. [001:010] is to be
expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else an exception is raised.
'''
import string
from ansible import errors
def detect_range(line = None):
'''
A helper function that checks a given host line to see if it contains
a range pattern described in the docstring above.
Returnes True if the given line contains a pattern, else False.
'''
if 0 <= line.find("[") < line.find(":") < line.find("]"):
return True
else:
return False
def expand_hostname_range(line = None):
'''
A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: http://ansible.github.com/patterns.html#hosts-and-groups
'''
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
# Add support for multiple ranges in a host so:
# db[01:10:3]node-[01:10]
# - to do this we split off at the first [...] set, getting the list
# of hosts and then repeat until none left.
# - also add an optional third parameter which contains the step. (Default: 1)
# so range can be [01:10:2] -> 01 03 05 07 09
# FIXME: make this work for alphabetic sequences too.
(head, nrange, tail) = line.replace('[','|',1).replace(']','|',1).split('|')
bounds = nrange.split(":")
if len(bounds) != 2 and len(bounds) != 3:
raise errors.AnsibleError("host range incorrectly specified")
beg = bounds[0]
end = bounds[1]
if len(bounds) == 2:
step = 1
else:
step = bounds[2]
if not beg:
beg = "0"
if not end:
raise errors.AnsibleError("host range end value missing")
if beg[0] == '0' and len(beg) > 1:
rlen = len(beg) # range length formatting hint
if rlen != len(end):
raise errors.AnsibleError("host range format incorrectly specified!")
fill = lambda _: str(_).zfill(rlen) # range sequence
else:
fill = str
try:
i_beg = string.ascii_letters.index(beg)
i_end = string.ascii_letters.index(end)
if i_beg > i_end:
raise errors.AnsibleError("host range format incorrectly specified!")
seq = string.ascii_letters[i_beg:i_end+1]
except ValueError: # not an alpha range
seq = range(int(beg), int(end)+1, int(step))
for rseq in seq:
hname = ''.join((head, fill(rseq), tail))
if detect_range(hname):
all_hosts.extend( expand_hostname_range( hname ) )
else:
all_hosts.append(hname)
return all_hosts
|
gpl-3.0
|
mhnatiuk/phd_sociology_of_religion
|
scrapper/build/Twisted/twisted/scripts/htmlizer.py
|
53
|
1787
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""HTML pretty-printing for Python source code."""
__version__ = '$Revision: 1.8 $'[11:-2]
from twisted.python import htmlizer, usage
from twisted import copyright
import os, sys
header = '''<html><head>
<title>%(title)s</title>
<meta name=\"Generator\" content="%(generator)s" />
%(alternate)s
%(stylesheet)s
</head>
<body>
'''
footer = """</body>"""
styleLink = '<link rel="stylesheet" href="%s" type="text/css" />'
alternateLink = '<link rel="alternate" href="%(source)s" type="text/x-python" />'
class Options(usage.Options):
synopsis = """%s [options] source.py
""" % (
os.path.basename(sys.argv[0]),)
optParameters = [
('stylesheet', 's', None, "URL of stylesheet to link to."),
]
compData = usage.Completions(
extraActions=[usage.CompleteFiles('*.py', descr='source python file')]
)
def parseArgs(self, filename):
self['filename'] = filename
def run():
options = Options()
try:
options.parseOptions()
except usage.UsageError, e:
print str(e)
sys.exit(1)
filename = options['filename']
if options.get('stylesheet') is not None:
stylesheet = styleLink % (options['stylesheet'],)
else:
stylesheet = ''
output = open(filename + '.html', 'w')
try:
output.write(header % {
'title': filename,
'generator': 'htmlizer/%s' % (copyright.longversion,),
'alternate': alternateLink % {'source': filename},
'stylesheet': stylesheet
})
htmlizer.filter(open(filename), output,
htmlizer.SmallerHTMLWriter)
output.write(footer)
finally:
output.close()
|
gpl-2.0
|
petrvanblokland/Xierpa3
|
xierpa3/components/logo.py
|
1
|
3750
|
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# logo.py
#
from xierpa3.components.component import Component
from xierpa3.attributes import Em, Perc, Color
#from xierpa3.descriptors.media import Media
from xierpa3.descriptors.blueprint import BluePrint
class Logo(Component):
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = Component.C
BLUEPRINT = BluePrint(
# Layout stuff
colWidth=4, doc_colWidth=u'Default amount of columns for this component.',
maxWidth=Perc(100), doc_maxWidth=u'Maximal width of the component',
minWidth=Perc(100), doc_minWidth=u'Minimal width of the component',
# Logo data can be set as through text/style attribute or through the adapter.
# The optional url creates a link from the logo, e.g. to the home page.
text=None, doc_text=u'Optional text of the logo. Otherwise query the adapter.',
src=None, doc_src=u'Optional src for logo image. Otherwise query the adapter.',
url=None, doc_url=u'Optional url for logo link. Otherwise query the adapter.',
# Logo stuff
width=280, doc_logoWidth=u'Logo width',
backgroundColor=None, doc_backgroundColor=u'Background color of the logo component.',
height=C.AUTO, doc_height=u'Logo height',
marginTop=0, doc_marginTop=u'Logo margin top',
marginLeft=0, doc_marginLeft=u'Logo margin left',
marginRight=Perc(1.8), doc_marginRight=u'Logo margin right',
marginBottom=0, doc_marginBottom=u'Logo margin bottom',
logoFloat=C.LEFT, doc_logoFloat=u'Logo div float',
# H2 heading, in case style.text is defined and not style.src (for an image)
fontFamily=C.LOGOFAMILY, doc_fontFamily=u'h2 font family',
fontSize=Em(2.9), doc_fontSize=u'h2 Logo font size',
lineHeight=Em(1.4), doc_lineHeight=u'h2 leading',
fontWeight=C.BOLD, doc_fontWeight=u'h2 font weight',
fontStyle=None, doc_fontStyle=u'h2 font style',
paddingTop=Em(0.2), doc_paddingTop=u'h2 padding top',
paddingBottom=Em(0.2), doc_paddingBottom=u'h2 padding bottom',
color=Color('#323A47'), doc_color=u'h2 color',
)
def buildBlock(self, b):
s = self.style
colClass = self.getColClass(s.colWidth)
b.div(class_=colClass, float=s.logoFloat, marginleft=s.marginLeft, backgroundcolor=s.backgroundColor,
marginright=s.marginRight, margintop=s.marginTop, marginbottom=s.marginBottom)
# @text: text to show instead of the logo image.
# @url: url of href link. If None no link is made
# @src: url of the image source.
data = self.adapter.getLogo()
if s.text:
data.text = s.text
data.src = None
if s.src:
data.src = s.src
data.text = None
if s.url:
data.url = s.url
if data.url:
b.a(href=data.url)
if data.src:
b.img(src=data.src, width=s.width, maxwidth=s.maxWidth, height=s.height)
else:
b.h2(fontsize=s.fontSize, lineheight=s.lineHeight, fontfamily=s.fontFamily,
fontWeight=s.h2Weight, fontStyle=s.h2Style, color=s.color,
paddingtop=s.paddingTop, paddingbottom=s.paddingBottom,
)
b.text(data.text)
b._h2()
if data.url:
b._a()
b._div(comment=colClass)
|
mit
|
ambikeshwar1991/sandhi-2
|
module/gr36/gr-filter/examples/interpolate.py
|
13
|
8584
|
#!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5)
self.signal = gr.add_cc()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = gr.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = gr.vector_sink_c()
self.snk2 = gr.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
adrn/TwoFace
|
twoface/data.py
|
1
|
1645
|
# Third-party
import astropy.units as u
from astropy.time import Time
import numpy as np
from thejoker.data import RVData
def star_to_apogeervdata(star, clean=False):
"""Return a `twoface.data.APOGEERVData` instance for this star.
Parameters
----------
"""
jd = []
rv = []
rv_rand_err = []
for v in star.visits:
rv.append(float(v.vhelio))
jd.append(float(v.jd))
rv_rand_err.append(float(v.vrelerr))
t = Time(jd, format='jd', scale='utc')
rv = rv * u.km/u.s
rv_err = rv_rand_err*u.km/u.s
data = APOGEERVData(t=t, rv=rv, stddev=rv_err)
if clean:
bad_mask = (np.isclose(np.abs(data.rv.value), 9999.) |
(data.stddev.to(u.km/u.s).value >= 100.))
data = data[~bad_mask]
return data
class APOGEERVData(RVData):
@classmethod
def from_visits(cls, visits):
"""
Parameters
----------
visits : list, structured array, `astropy.table.Table`
List of ``AllVisit`` database row instances, or rows sub-selected
from the allVisit FITS file.
"""
from .db import AllVisit
if isinstance(visits[0], AllVisit):
jd = [float(v.jd) for v in visits]
rv = [float(v.vhelio) for v in visits]
rv_rand_err = [float(v.vrelerr) for v in visits]
else:
jd = visits['JD']
rv = visits['VHELIO']
rv_rand_err = visits['VRELERR']
t = Time(jd, format='jd', scale='utc')
rv = rv * u.km/u.s
rv_err = rv_rand_err*u.km/u.s
return cls(t=t, rv=rv, stddev=rv_err)
|
mit
|
robdennis/sideboard
|
tests/plugins/manypackages/multi/env/lib/python2.7/site-packages/paver/svn.py
|
13
|
2061
|
"""Convenience functions for working with svn.
This module does not include any tasks, only functions.
At this point, these functions do not use any kind of library. They require
the svn binary on the path."""
from paver.easy import sh, Bunch, path
def _format_revision(revision):
if revision:
revision = "-r %s " % (revision)
return revision
def checkout(url, dest, revision=""):
"""Checks out the specified URL to the given destination."""
revision = _format_revision(revision)
sh("svn co %s%s %s" % (revision, url, dest))
def update(path="", revision=""):
"""Run an svn update on the given path."""
revision = _format_revision(revision)
command = "svn up %s" % revision
if path:
command += path
sh(command)
def checkup(url, dest, revision=""):
"""Does a checkout or update, depending on whether the destination
exists and is up to date (if a revision is passed in). Returns
true if a checkout or update was performed. False otherwise."""
dest = path(dest)
if not dest.exists():
checkout(url, dest, revision)
return True
else:
vinfo = info(dest)
if not vinfo or vinfo.revision != revision:
update(dest, revision)
return True
return False
def export(url, dest, revision=""):
"""Exports the specified URL to the given destination."""
revision = _format_revision(revision)
cmd = 'svn export %s%s %s' % (revision, url, dest)
sh(cmd)
def info(path=""):
"""Retrieves the svn info for the path and returns a dictionary of
the values. Names are normalized to lower case with spaces converted
to underscores."""
output = sh("svn info %s" % path, capture=True)
if not output:
return Bunch()
lines = output.splitlines()
data = Bunch()
for line in lines:
colon = line.find(":")
if colon == -1:
continue
key = line[:colon].lower().replace(" ", "_")
value = line[colon+2:]
data[key] = value
return data
|
bsd-3-clause
|
dharamgollapudi/jaikuengine
|
common/templatetags/test/format.py
|
30
|
3368
|
# -*- coding: utf-8 -*-
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import test
from common import exception
from common import models
from common import util
from common.templatetags import format
from common.test import base
from common.test import util as test_util
class FormatTest(test.TestCase):
def assertLinkTransformed(self, value):
expected = """<a href="%s" target="_new">%s</a>""" % (value, value)
self.assertEqual(expected, format.format_autolinks(value))
def assertLinkNotTransformed(self, value):
self.assertEqual(value, format.format_autolinks(value))
def test_truncate(self):
test_strings = [(u"Testing", 7, u"Testing"),
("Testing", 6, u"Testin\u2026"),
(u"åäöåäöåäöåäö", 10, u"åäöåäöåäöå…")]
for orig_str, max_len, trunc_str in test_strings:
a = format.truncate(orig_str, max_len)
self.assertEqual(a, trunc_str)
def test_format_transforms_valid_links(self):
# We should accept links that start with http
self.assertLinkTransformed('http://example.com')
self.assertLinkTransformed('http://example.com/')
self.assertLinkTransformed('http://www.example.com')
self.assertLinkTransformed('http://www.example.com/~someuser')
self.assertLinkTransformed('http://www.example.com/a/b/c/d/e/f/g')
self.assertLinkTransformed('https://example.com')
self.assertLinkTransformed('https://example.com/')
self.assertLinkTransformed('https://www.example.com')
self.assertLinkTransformed('https://www.example.com/~someuser')
self.assertLinkTransformed('https://www.example.com/a/b/c/d/e/f/g')
def test_format_ignores_invalid_links(self):
# Any link that doesn't start with http should be ignored
# otherwise we run the risk of security problems from data: and javascript: links
self.assertLinkNotTransformed("""javascript:document.location="http://localhost:8080/?evil=" """ )
self.assertLinkNotTransformed('''javascript:document.location="http://localhost:8080/?evil="''')
self.assertLinkNotTransformed('''data:text/html;base64,PHNjcmlwdD5hbGVydChkb2N1bWVudC5jb29raWUpPC9zY3JpcHQ+Cg==''')
class FormatFixtureTest(base.FixturesTestCase):
# TODO(jonasnockert): Improve test method... but how?
def test_linked_entry_truncated_title(self):
# Get all StreamEntries to make sure both posts and comments are
# tested.
entries = models.StreamEntry.all()
for e in entries:
# Truncate to one character to ensure truncation takes place and
# an ellipsis is added.
trunc_url = format.linked_entry_truncated_title(e, 1)
# Construct a link with made-up one character+ellipsis entry title.
trunc_ref_url = u"<a href=\"%s\">x\u2026</a>" % e.url()
self.assertEqual(len(trunc_url), len(trunc_ref_url))
|
apache-2.0
|
nkgilley/home-assistant
|
homeassistant/components/avri/sensor.py
|
2
|
2931
|
"""Support for Avri waste curbside collection pickup."""
import logging
from avri.api import Avri, AvriException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ID, DEVICE_CLASS_TIMESTAMP
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN, ICON
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Avri Waste platform."""
client = hass.data[DOMAIN][entry.entry_id]
integration_id = entry.data[CONF_ID]
try:
each_upcoming = client.upcoming_of_each()
except AvriException as ex:
raise PlatformNotReady from ex
else:
entities = [
AvriWasteUpcoming(client, upcoming.name, integration_id)
for upcoming in each_upcoming
]
async_add_entities(entities, True)
class AvriWasteUpcoming(Entity):
"""Avri Waste Sensor."""
def __init__(self, client: Avri, waste_type: str, integration_id: str):
"""Initialize the sensor."""
self._waste_type = waste_type
self._name = f"{self._waste_type}".title()
self._state = None
self._client = client
self._state_available = False
self._integration_id = integration_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return (f"{self._integration_id}" f"-{self._waste_type}").replace(" ", "")
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if entity is available."""
return self._state_available
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_TIMESTAMP
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
async def async_update(self):
"""Update the data."""
if not self.enabled:
return
try:
pickup_events = self._client.upcoming_of_each()
except AvriException as ex:
_LOGGER.error(
"There was an error retrieving upcoming garbage pickups: %s", ex
)
self._state_available = False
self._state = None
else:
self._state_available = True
matched_events = list(
filter(lambda event: event.name == self._waste_type, pickup_events)
)
if not matched_events:
self._state = None
else:
self._state = matched_events[0].day.date()
|
apache-2.0
|
PhloxAR/PhloxAR-dc1394
|
dc1394/frame.py
|
1
|
6790
|
# -----------------------------------------------------------------------------
#
# -*- coding: utf-8 -*-
#
# phlox-libdc1394/phloxar-dc1394/frame.py
#
# Copyright (C) 2016, by Matthias Yang Chen <[email protected]>
# All rights reserved.
#
# phlox-libdc1394 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# phlox-libdc1394 is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with phlox-libdc1394. If not,
# see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from ctypes import ARRAY, c_byte
from numpy import ndarray
from .core import *
__all__ = ['Frame']
class Frame(ndarray):
"""
A frame returned by the camera.
All metadata are retained as attributes of the resulting image.
"""
_cam = None
_frame = None
def __new__(cls, camera, frame):
"""
Convert a phloxar-dc1394 frame into a Frame instance.
:param camera:
:param frame:
:return:
"""
dtype = ARRAY(c_byte, frame.contents.image_bytes)
buf = dtype.from_address(frame.contents.image)
width, height = frame.contents.size
pixels = width * height
endian = frame.contents.little_endian and '<' or '>'
type_str = '%su%i' % (endian, frame.contents.image_bytes / pixels)
img = ndarray.__new__(cls, shape=(height, width), dtype=type_str, buffer=buf)
img.frame_id = frame.contents.id
img.frames_behind = frame.contents.frames_behind
img.position = frame.contents.position
img.packet_size = frame.contents.packet_size
img.packets_per_frame = frame.contents.packet_per_frame
img.timestamp = frame.contents.timestamp
img.video_mode = video_modes[frame.contents.video_mode]
img.data_depth = frame.contents.data_depth
img.color_coding = color_codings[frame.contents.color_coding]
img.color_filter = frame.contents.color_filter
img.yuv_byte_order = frame.contents.yuv_byte_order
img.stride = frame.contents.stride
# save camera and frame for enqueue()
img._frame = frame
img._cam = camera
return img
def __array_finalize__(self, img):
"""
Finalize the new Image class array.
If called with an image object, inherit the properties of that image.
"""
if img is None:
return
# do not inherit _frame and _cam since we also get called on copy()
# and should not hold references to the frame in this case
for key in ["position", "color_coding", "color_filter",
"yuv_byte_order", "stride", "packet_size",
"packets_per_frame", "timestamp", "frames_behind",
"frame_id", "data_depth", "video_mode"]:
setattr(self, key, getattr(img, key, None))
def enqueue(self):
"""
Returns a frame to the ring buffer once it has been used.
This method is also called implicitly on ``del``.
Only call this method on the original frame obtained from
Camera.dequeue` and not on its views, new-from-templates or
copies. Otherwise an AttributeError will be raised.
"""
if not hasattr(self, "_frame"): # or self.base is not None:
raise AttributeError("can only enqueue the original frame")
if self._frame is not None:
dll.dc1394_capture_enqueue(self._cam, self._frame)
self._frame = None
self._cam = None
# from contextlib iport closing
# with closing(camera.dequeue()) as im:
# do stuff with im
close = enqueue
def __del__(self):
try:
self.enqueue()
except AttributeError:
pass
@property
def corrupt(self):
"""
Whether this frame corrupt.
Returns ``True`` if the given frame has been detected to be
corrupt (missing data, corrupted data, overrun buffer, etc.) and
``False`` otherwise.
.. note::
Certain types of corruption may go undetected in which case
``False`` will be returned erroneously. The ability to
detect corruption also varies between platforms.
.. note::
Corrupt frames still need to be enqueued with `enqueue`
when no longer needed by the user.
"""
return bool(dll.dc1394_capture_is_frame_corrupt(self._cam, self._frame))
def to_rgb(self):
"""
Convert the image to an RGB image.
Array shape is: (image.shape[0], image.shape[1], 3)
Uses the dc1394_convert_to_RGB() function for the conversion.
"""
res = ndarray(3 * self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_RGB8(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
res.shape = shape[0], shape[1], 3
return res
def to_mono8(self):
"""
Convert he image to 8 bit gray scale.
Uses the dc1394_convert_to_MONO8() function
"""
res = ndarray(self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_MONO8(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
res.shape = shape
return res
def to_yuv422(self):
"""
Convert he image to YUV422 color format.
Uses the dc1394_convert_to_YUV422() function
"""
res = ndarray(self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_YUV422(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
return ndarray(shape=shape, buffer=res.data, dtype='u2')
|
gpl-3.0
|
sudheesh001/oh-mainline
|
vendor/packages/requests/requests/packages/urllib3/fields.py
|
1007
|
5833
|
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
agpl-3.0
|
vladmm/intellij-community
|
python/helpers/pycharm/lettuce_runner.py
|
43
|
8795
|
# coding=utf-8
"""
BDD lettuce framework runner
TODO: Support other params (like tags) as well.
Supports only 2 params now: folder to search "features" for or file and "-s scenario_index"
"""
import inspect
import optparse
import os
import _bdd_utils
__author__ = 'Ilya.Kazakevich'
from lettuce.exceptions import ReasonToFail
import lettuce
from lettuce import core
class _LettuceRunner(_bdd_utils.BddRunner):
"""
Lettuce runner (BddRunner for lettuce)
"""
def __init__(self, base_dir, what_to_run, scenarios, options):
"""
:param scenarios scenario numbers to run
:type scenarios list
:param base_dir base directory to run tests in
:type base_dir: str
:param what_to_run folder or file to run
:type options optparse.Values
:param options optparse options passed by user
:type what_to_run str
"""
super(_LettuceRunner, self).__init__(base_dir)
# TODO: Copy/Paste with lettuce.bin, need to reuse somehow
# Delete args that do not exist in constructor
args_to_pass = options.__dict__
runner_args = inspect.getargspec(lettuce.Runner.__init__)[0]
unknown_args = set(args_to_pass.keys()) - set(runner_args)
map(args_to_pass.__delitem__, unknown_args)
# Tags is special case and need to be preprocessed
self.__tags = None # Store tags in field
if 'tags' in args_to_pass.keys() and args_to_pass['tags']:
args_to_pass['tags'] = [tag.strip('@') for tag in args_to_pass['tags']]
self.__tags = set(args_to_pass['tags'])
# Special cases we pass directly
args_to_pass['base_path'] = what_to_run
args_to_pass['scenarios'] = ",".join(scenarios)
self.__runner = lettuce.Runner(**args_to_pass)
def _get_features_to_run(self):
super(_LettuceRunner, self)._get_features_to_run()
features = []
if self.__runner.single_feature: # We need to run one and only one feature
features = [core.Feature.from_file(self.__runner.single_feature)]
else:
# Find all features in dir
for feature_file in self.__runner.loader.find_feature_files():
feature = core.Feature.from_file(feature_file)
assert isinstance(feature, core.Feature), feature
# TODO: cut out due to https://github.com/gabrielfalcao/lettuce/issues/451 Fix when this issue fixed
feature.scenarios = filter(lambda s: not s.outlines, feature.scenarios)
if feature.scenarios:
features.append(feature)
# Choose only selected scenarios
if self.__runner.scenarios:
for feature in features:
filtered_feature_scenarios = []
for index in [i - 1 for i in self.__runner.scenarios]: # decrease index by 1
if index < len(feature.scenarios):
filtered_feature_scenarios.append(feature.scenarios[index])
feature.scenarios = filtered_feature_scenarios
# Filter out tags TODO: Share with behave_runner.py#__filter_scenarios_by_args
if self.__tags:
for feature in features:
feature.scenarios = filter(lambda s: set(s.tags) & self.__tags, feature.scenarios)
return features
def _run_tests(self):
super(_LettuceRunner, self)._run_tests()
self.__install_hooks()
self.__runner.run()
def __step(self, is_started, step):
"""
Reports step start / stop
:type step core.Step
:param step: step
"""
test_name = step.sentence
if is_started:
self._test_started(test_name, step.described_at)
elif step.passed:
self._test_passed(test_name)
elif step.failed:
reason = step.why
assert isinstance(reason, ReasonToFail), reason
self._test_failed(test_name, message=reason.exception.message, details=reason.traceback)
elif step.has_definition:
self._test_skipped(test_name, "In lettuce, we do know the reason", step.described_at)
else:
self._test_undefined(test_name, step.described_at)
def __install_hooks(self):
"""
Installs required hooks
"""
# Install hooks
lettuce.before.each_feature(
lambda f: self._feature_or_scenario(True, f.name, f.described_at))
lettuce.after.each_feature(
lambda f: self._feature_or_scenario(False, f.name, f.described_at))
lettuce.before.each_scenario(
lambda s: self.__scenario(True, s))
lettuce.after.each_scenario(
lambda s: self.__scenario(False, s))
lettuce.before.each_background(
lambda b, *args: self._background(True, b.feature.described_at))
lettuce.after.each_background(
lambda b, *args: self._background(False, b.feature.described_at))
lettuce.before.each_step(lambda s: self.__step(True, s))
lettuce.after.each_step(lambda s: self.__step(False, s))
def __scenario(self, is_started, scenario):
"""
Reports scenario launched
:type scenario core.Scenario
:param scenario: scenario
"""
if scenario.outlines:
scenario.steps = [] # Clear to prevent running. TODO: Fix when this issue fixed
scenario.background = None # TODO: undocumented
return
self._feature_or_scenario(is_started, scenario.name, scenario.described_at)
def _get_args():
"""
Get options passed by user
:return: tuple (options, args), see optparse
"""
# TODO: Copy/Paste with lettuce.bin, need to reuse somehow
parser = optparse.OptionParser()
parser.add_option("-v", "--verbosity",
dest="verbosity",
default=0, # We do not need verbosity due to GUI we use (although user may override it)
help='The verbosity level')
parser.add_option("-s", "--scenarios",
dest="scenarios",
default=None,
help='Comma separated list of scenarios to run')
parser.add_option("-t", "--tag",
dest="tags",
default=None,
action='append',
help='Tells lettuce to run the specified tags only; '
'can be used multiple times to define more tags'
'(prefixing tags with "-" will exclude them and '
'prefixing with "~" will match approximate words)')
parser.add_option("-r", "--random",
dest="random",
action="store_true",
default=False,
help="Run scenarios in a more random order to avoid interference")
parser.add_option("--with-xunit",
dest="enable_xunit",
action="store_true",
default=False,
help='Output JUnit XML test results to a file')
parser.add_option("--xunit-file",
dest="xunit_file",
default=None,
type="string",
help='Write JUnit XML to this file. Defaults to '
'lettucetests.xml')
parser.add_option("--with-subunit",
dest="enable_subunit",
action="store_true",
default=False,
help='Output Subunit test results to a file')
parser.add_option("--subunit-file",
dest="subunit_filename",
default=None,
help='Write Subunit data to this file. Defaults to '
'subunit.bin')
parser.add_option("--failfast",
dest="failfast",
default=False,
action="store_true",
help='Stop running in the first failure')
parser.add_option("--pdb",
dest="auto_pdb",
default=False,
action="store_true",
help='Launches an interactive debugger upon error')
return parser.parse_args()
if __name__ == "__main__":
options, args = _get_args()
(base_dir, scenarios, what_to_run) = _bdd_utils.get_what_to_run_by_env(os.environ)
if len(what_to_run) > 1:
raise Exception("Lettuce can't run more than one file now")
_bdd_utils.fix_win_drive(what_to_run[0])
_LettuceRunner(base_dir, what_to_run[0], scenarios, options).run()
|
apache-2.0
|
hectoruelo/scrapy
|
scrapy/commands/runspider.py
|
109
|
3523
|
import sys
import os
from importlib import import_module
from scrapy.utils.spider import iter_spider_classes
from scrapy.commands import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy.utils.conf import arglist_to_dict
def _import_file(filepath):
abspath = os.path.abspath(filepath)
dirname, file = os.path.split(abspath)
fname, fext = os.path.splitext(file)
if fext != '.py':
raise ValueError("Not a Python source file: %s" % abspath)
if dirname:
sys.path = [dirname] + sys.path
try:
module = import_module(fname)
finally:
if dirname:
sys.path.pop(0)
return module
class Command(ScrapyCommand):
requires_project = False
def syntax(self):
return "[options] <spider_file>"
def short_desc(self):
return "Run a self-contained spider (without creating a project)"
def long_desc(self):
return "Run the spider defined in the given file"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
help="set spider argument (may be repeated)")
parser.add_option("-o", "--output", metavar="FILE",
help="dump scraped items into FILE (use - for stdout)")
parser.add_option("-t", "--output-format", metavar="FORMAT",
help="format to use for dumping items with -o")
def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
if opts.output:
if opts.output == '-':
self.settings.set('FEED_URI', 'stdout:', priority='cmdline')
else:
self.settings.set('FEED_URI', opts.output, priority='cmdline')
valid_output_formats = (
list(self.settings.getdict('FEED_EXPORTERS').keys()) +
list(self.settings.getdict('FEED_EXPORTERS_BASE').keys())
)
if not opts.output_format:
opts.output_format = os.path.splitext(opts.output)[1].replace(".", "")
if opts.output_format not in valid_output_formats:
raise UsageError("Unrecognized output format '%s', set one"
" using the '-t' switch or as a file extension"
" from the supported list %s" % (opts.output_format,
tuple(valid_output_formats)))
self.settings.set('FEED_FORMAT', opts.output_format, priority='cmdline')
def run(self, args, opts):
if len(args) != 1:
raise UsageError()
filename = args[0]
if not os.path.exists(filename):
raise UsageError("File not found: %s\n" % filename)
try:
module = _import_file(filename)
except (ImportError, ValueError) as e:
raise UsageError("Unable to load %r: %s\n" % (filename, e))
spclasses = list(iter_spider_classes(module))
if not spclasses:
raise UsageError("No spider found in file: %s\n" % filename)
spidercls = spclasses.pop()
self.crawler_process.crawl(spidercls, **opts.spargs)
self.crawler_process.start()
|
bsd-3-clause
|
lhotchkiss/mousetrap
|
src/mousetrap/ocvfw/_ocv.py
|
1
|
16960
|
#
# Copyright 2009 Flavio Percoco Premoli
#
# This file is part of Ocvfw.
#
# Ocvfw is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2 as published
# by the Free Software Foundation.
#
# Ocvfw is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ocvfw. If not, see <http://www.gnu.org/licenses/>>.
"""Little Framework for OpenCV Library."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2008 Flavio Percoco Premoli"
__license__ = "GPLv2"
import time
import debug
import commons as co
import cv2 #remove
import cv2.cv as cv
import numpy
import array
class OcvfwBase:
def __init__( self ):
"""
Initialize the module and set its main variables.
"""
self.img = None
self.mhi = None
self.img_lkpoints = { "current" : [],
"last" : [],
"points" : [] }
self.__lk_swap = False
self.imageScale = 1.5
def set(self, key, value):
"""
"""
if hasattr(self, "%s" % key):
getattr(self, "%s" % key)(value)
debug.debug("_ocv - set", "Changed %s value to %s" % (key, value))
return True
debug.debug("_ocv - set", "%s not found" % (key))
return False
def lk_swap(self, set=None):
"""
Enables/Disable the lk points swapping action.
Arguments:
- self: The main object pointer.
- set: The new value. If None returns the current state.
"""
if set is None:
return self.__lk_swap
self.__lk_swap = set
def new_image(self, size, num, ch):
"""
Creates a new image
"""
#if type(size) == "<type 'tuple'>":
#size = co.cv.cvSize( size[0], size[1])
return co.cv.CreateImage( (size[0], size[1]), num, ch)# was size'
def set_camera_idx(self, idx):
"""
Changes the camera device index.
Arguments:
- self: The main object pointer.
- idx: The camera index. For Example: 0 for /dev/video0
"""
self.idx = idx
def wait_key(self, num):
"""
Simple call to the co.cv.WaitKey function, which has to be called periodically.
Arguments:
- self: The main object pointer.
- num: An int value.
"""
return co.cv.WaitKey(num)
def start_camera(self, params = None):
"""
Starts the camera capture
Arguments:
- params: A list with the capture properties. NOTE: Not implemented yet.
"""
self.capture = cv.CaptureFromCAM(self.idx )
debug.debug( "ocvfw", "start_camera: Camera Started" )
def query_image(self, bgr=False, flip=False):
"""
Queries the new frame.
Arguments:
- self: The main object pointer.
- bgr: If True. The image will be converted from RGB to BGR.
Returns The image even if it was stored in self.img
"""
frame = cv.QueryFrame( self.capture )
#Test to make sure camera starts properly
#cv.ShowImage("webcam", frame)
if not self.img:
self.storage = co.cv.CreateMemStorage(0)
self.imgSize = co.cv.GetSize (frame)
self.img = co.cv.CreateImage ( self.imgSize, 8, 3 )
#self.img.origin = frame.origin
self.grey = co.cv.CreateImage ( self.imgSize, 8, 1 )
self.yCrCb = co.cv.CreateImage ( self.imgSize, 8, 3 )
self.prevGrey = co.cv.CreateImage ( self.imgSize, 8, 1 )
self.pyramid = co.cv.CreateImage ( self.imgSize, 8, 1 )
self.prevPyramid = co.cv.CreateImage ( self.imgSize, 8, 1 )
#a = co.cv.Round(self.img.width/self.imageScale)
#b = co.cv.Round(self.img.height/self.imageScale)
#c = (a, b)
self.small_img = co.cv.CreateImage(
( co.cv.Round(self.img.width/self.imageScale),
co.cv.Round(self.img.height/self.imageScale) ),
8, 3 )
self.img = frame
self.wait_key(10)
return True
def set_lkpoint(self, point):
"""
Set a point to follow it using the L. Kallman method.
Arguments:
- self: The main object pointer.
- point: A co.cv.Point Point.
"""
#Point = co.cv.Point( point.x, point.y )
self.img_lkpoints["current"] = numpy.zeros((point.x, point.y), numpy.float32)
self.img_lkpoints["current"] = cv.fromarray(self.img_lkpoints["current"])
self.grey = numpy.asarray(self.grey[:,:]) #new
if numpy.all(self.img_lkpoints["current"]):
#co.cv.FindCornerSubPix(
cv2.cornerSubPix( # was cv.FindCornerSubPix
self.grey,
self.img_lkpoints["current"],
(20, 20), (0,0),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))
point.set_opencv( point )
self.img_lkpoints["points"].append(point)
setattr(point.parent, point.label, point)
if len(self.img_lkpoints["last"]) > 0:
self.img_lkpoints["last"].append( self.img_lkpoints["current"][0] )
debug.debug( "ocvfw", "cmSetLKPoints: New LK Point Added" )
else:
self.img_lkpoints["current"] = []
def clean_lkpoints(self):
"""
Cleans all the registered points.
Arguments:
- self: The main object pointer
"""
self.img_lkpoints = { "current" : [],
"last" : [],
"points" : [] }
def show_lkpoints(self):
"""
Calculate the optical flow of the set points and draw them in the image.
Arguments:
- self: The main object pointer.
"""
self.prevGrey = numpy.asarray(self.prevGrey[:,:])
prevGrey = cv2.cvtColor(self.prevGrey, cv2.COLOR_BGR2GRAY)
self.grey = numpy.asarray(self.grey[:,:])
grey = cv2.cvtColor(self.grey, cv2.COLOR_BGR2GRAY)
# calculate the optical flow
nextPts, status, err = cv2.calcOpticalFlowPyrLK (
prevGrey, #prevImg
grey, #nextImg
self.prevPyramid, #prevPts
self.pyramid, #nextPts
None, #status
(20, 20), #winSize
2, #maxLevel
(cv2.TERM_CRITERIA_MAX_ITER|cv2.TERM_CRITERIA_EPS, 20, 0.03), #criteria
cv2.OPTFLOW_USE_INITIAL_FLOW #flags
)
cv.ShowImage("test",self.grey)
if isinstance(optical_flow[0], tuple):
self.img_lkpoints["current"], status = optical_flow[0]
else:
self.img_lkpoints["current"], status = optical_flow
# initializations
counter = 0
new_points = []
for point in self.img_lkpoints["current"]:
if not status[counter]:
continue
# this point is a correct point
current = self.img_lkpoints["points"][counter]
current.set_opencv(co.cv.Point(int(point.x), int(point.y)))
new_points.append( point )
setattr(current.parent, current.label, current)
# draw the current point
current.parent.draw_point(point.x, point.y)
# increment the counter
counter += 1
#debug.debug( "ocvfw", "cmShowLKPoints: Showing %d LK Points" % counter )
# set back the self.imgPoints we keep
self.img_lkpoints["current"] = new_points
def swap_lkpoints(self):
"""
Swap the LK method variables so the new points will be the last points.
This function has to be called after showing the new points.
Arguments:
- self: The main object pointer.
"""
# swapping
self.prevGrey, self.grey = self.grey, self.prevGrey
self.prevPyramid, self.pyramid = self.pyramid, self.prevPyramid
self.img_lkpoints["last"], self.img_lkpoints["current"] = \
self.img_lkpoints["current"], self.img_lkpoints["last"]
class OcvfwCtypes(OcvfwBase):
"""
This Class controlls the main camera functions.
It works as a little framework for Opencv.cv.
This Backend uses ctypes opencv python bindings.
"""
def __init__(self):
"""
Initialize the module and set its main variables.
"""
co.cv = __import__("pyopencv.cv",
globals(),
locals(),
[''])
co.hg = __import__("pyopencv.cv",
globals(),
locals(),
[''])#should be removed
OcvfwBase.__init__(self)
class OcvfwPython(OcvfwBase):
"""
This Class controlls the main camera functions.
It works as a little framework for Openco.cv.
This Backend uses normal opencv python bindings.
"""
co.cv = __import__("cv",
globals(),
locals(),
[''])
co.hg = __import__("cv",
globals(),
locals(),
['']) #should be removed
def __init__( self ):
"""
Initialize the module and set its main variables.
"""
OcvfwBase.__init__(self)
def add_message(self, message, font=co.cv.CV_FONT_HERSHEY_COMPLEX, poss=None):
"""
Write a message into the image.
Arguments:
- self: The main object pointer.
- message: A string with the message.
- font: An OpenCV font to use.
- poss: The position of the message in the image. NOTE: Not enabled yet.
"""
font = co.cv.InitFont ( font, 1, 1, 0.0, 1, co.cv.CV_AA)
textSize, ymin = co.cv.GetTextSize (message, font)
pt1 = (( self.img.width - textSize.width ) / 2 , 20 )
co.cv.PutText (self.img, message, pt1, font, co.cv.Scalar (255, 0, 0))
def get_haar_points(self, haarCascade, method=co.cv.CV_HAAR_DO_CANNY_PRUNING):
"""
Search for points matching the haarcascade selected.
Arguments:
- self: The main object pointer.
- haarCascade: The selected cascade.
- methode: The search method to use. DEFAULT: co.cv.CV_HAAR_DO_CANNY_PRUNING.
Returns a list with the matches.
"""
cascade = co.cv.Load( haarCascade) #, self.imgSize )
if not cascade:
debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
co.cv.Resize( self.img, self.small_img, co.cv.CV_INTER_LINEAR )
#co.cv.ClearMemStorage( self.storage )
points = co.cv.HaarDetectObjects( self.small_img, cascade, self.storage, 1.2, 2, method, (20, 20) )
if points:
matches = [ [ ( int(r[0][0]*self.imageScale), int(r[0][1]*self.imageScale)), \
( int((r[0][0]+r[0][3])*self.imageScale), int((r[0][0]+r[0][2])*self.imageScale) )] \
for r in points]
debug.debug( "ocvfw", "cmGetHaarPoints: detected some matches" )
return matches
def get_haar_roi_points(self, haarCascade, rect, origSize=(0, 0), method=co.cv.CV_HAAR_DO_CANNY_PRUNING):
"""
Search for points matching the haarcascade selected.
Arguments:
- self: The main object pointer.
- haarCascade: The selected cascade.
- methode: The search method to use. DEFAULT: co.cv.CV_HAAR_DO_CANNY_PRUNING.
Returns a list with the matches.
"""
cascade = co.cv.Load( haarCascade ) #, self.imgSize )
if not cascade:
debug.exception( "ocvfw", "The Haar Classifier Cascade load failed" )
debug.debug( "ocvfw-get_haar_roi_points", self.img)
#FIXME: Work around to fix when the rect is too big
if (rect[0]+rect[2]) > self.img.width:
rect = (rect[0], rect[1], self.img.width-rect[0],self.img.height-rect[1])
if (rect[1]+rect[3]) > self.img.height:
rect = (rect[0], rect[1], self.img.width-rect[0],self.img.height-rect[1])
try:
imageROI = co.cv.GetSubRect(self.img, rect)
except cv2.error:
print "****** imageROI error _ocv", self.img, rect
if cascade:
points = co.cv.HaarDetectObjects( imageROI, cascade, self.storage,
1.2, 2, method, (20,20) )
else:
debug.exception( "ocvfw", "The Haar Classifier Cascade load Failed (ROI)" )
if points:
matches = [ [ ( int(r[0][0]*origSize[0]), int(r[0][1]*origSize[1])), \
( int((r[0][0]+r[0][3])+origSize[0]), int((r[0][1]+r[0][2])*origSize[1]) )] \
for r in points]
#matches = [ [ ( int(r[0][0]), int(r[0][1])), \
# ( int((r[0][0]+r[0][3])), int((r[0][1]+r[0][2])) )] \
# for r in points]
#FIXME: I don't think the matches are right
return matches
##########################################
# #
# THIS IS NOT USED YET #
# #
##########################################
def get_motion_points(self, imgRoi=None):
"""
Calculate the motion points in the image.
Arguments:
- self: The main object pointer.
- start: The start ROI point.
- end: The end ROI point.
- num: The nomber of points to return
Returns A list with the points found.
"""
mv = []
n_ = 4
timestamp = time.clock()/1.0
if imgRoi:
img = co.cv.GetSubRect( self.img, imgRoi )
imgSize = co.cv.cvSize( imgRoi.width, imgRoi.height )
self.imgRoi = img
else:
img = self.img
imgSize = self.imgSize
# Motion Related Variables
if not self.mhi or self.mhi.width != imgSize.width or self.mhi.height != imgSize.height:
self.buf = [ 0, 0, 0, 0 ]
self.lastFm = 0
self.mhiD = 1
self.maxTD = 0.5
self.minTD = 0.05
self.mask = co.cv.CreateImage( imgSize, 8, 1 )
self.mhi = co.cv.CreateImage( imgSize, 32, 1 )
self.orient = co.cv.CreateImage( imgSize, 32, 1 )
self.segmask = co.cv.CreateImage( imgSize, 32, 1 )
co.cv.SetZero( self.mhi )
for i in range( n_ ):
self.buf[i] = co.cv.CreateImage( imgSize, 8, 1 )
co.cv.cvZero( self.buf[i] )
idx1 = self.lastFm
# convert frame to grayscale
cv2.cvtColor( img, self.buf[self.lastFm], cv2.CV_BGR2GRAY )
# index of (self.lastFm - (n_-1))th frame
idx2 = ( self.lastFm + 1 ) % n_
self.lastFm = idx2
silh = self.buf[idx2]
# Get difference between frames
co.cv.cvAbsDiff( self.buf[idx1], self.buf[idx2], silh )
# Threshold it
co.cv.cvThreshold( silh, silh, 30, 1, co.cv.CV_THRESH_BINARY )
# Update MHI
co.cv.cvUpdateMotionHistory( silh, self.mhi, timestamp, self.mhiD )
co.cv.cvCvtScale( self.mhi, self.mask, 255./self.mhiD, (self.mhiD - timestamp)*255./self.mhiD )
co.cv.cvCalcMotionGradient( self.mhi, self.mask, self.orient, self.maxTD, self.minTD, 3 )
co.cv.cvClearMemStorage( self.storage )
seq = co.cv.cvSegmentMotion( self.mhi, self.segmask, self.storage, timestamp, self.maxTD )
for i in range(0, seq.total):
if i < 0: # case of the whole image
continue
else: # i-th motion component
# Movement Rectangle
mRect = seq[i].rect
# reject very small components
if( mRect.width + mRect.height < 30 ):
continue
center = co.cv.Point( (mRect.x + mRect.width/2), (mRect.y + mRect.height/2) )
silhRoi = co.cv.cvGetSubRect(silh, mRect)
count = co.cv.cvNorm( silhRoi, None, co.cv.CV_L1, None )
# calculate number of points within silhouette ROI
if( count < mRect.width * mRect.height * 0.05 ):
continue
mv.append(center)
return mv
|
gpl-2.0
|
justyns/emacs-for-python
|
python-libs/ropemacs/__init__.py
|
10
|
19234
|
"""ropemacs, an emacs mode for using rope refactoring library"""
import sys
import ropemode.decorators
import ropemode.environment
import ropemode.interface
from Pymacs import lisp
from rope.base import utils
class LispUtils(ropemode.environment.Environment):
def ask(self, prompt, default=None, starting=None):
if default is not None:
prompt = prompt + ('[%s] ' % default)
result = lisp.read_from_minibuffer(prompt, starting, None, None,
None, default, None)
if result == '' and default is not None:
return default
return result
def ask_values(self, prompt, values, default=None, starting=None, exact=True):
if self._emacs_version() < 22:
values = [[value, value] for value in values]
if exact and default is not None:
prompt = prompt + ('[%s] ' % default)
reader = lisp['ropemacs-completing-read-function'].value()
result = reader(prompt, values, None, exact, starting)
if result == '' and exact:
return default
return result
def ask_completion(self, prompt, values, starting=None):
return self.ask_values(prompt, values, starting=starting, exact=None)
def ask_directory(self, prompt, default=None, starting=None):
location = starting or default
if location is not None:
prompt = prompt + ('[%s] ' % location)
if lisp.fboundp(lisp['read-directory-name']):
# returns default when starting is entered
result = lisp.read_directory_name(prompt, location, location)
else:
result = lisp.read_file_name(prompt, location, location)
if result == '' and location is not None:
return location
return result
def message(self, msg):
message(msg)
def yes_or_no(self, prompt):
return lisp.yes_or_no_p(prompt)
def y_or_n(self, prompt):
return lisp.y_or_n_p(prompt)
def get(self, name, default=None):
lispname = 'ropemacs-' + name.replace('_', '-')
if lisp.boundp(lisp[lispname]):
return lisp[lispname].value()
return default
def get_offset(self):
return lisp.point() - 1
def get_text(self):
end = lisp.buffer_size() + 1
old_min = lisp.point_min()
old_max = lisp.point_max()
narrowed = (old_min != 1 or old_max != end)
if narrowed:
lisp.narrow_to_region(1, lisp.buffer_size() + 1)
try:
return lisp.buffer_string()
finally:
if narrowed:
lisp.narrow_to_region(old_min, old_max)
def get_region(self):
offset1 = self.get_offset()
lisp.exchange_point_and_mark()
offset2 = self.get_offset()
lisp.exchange_point_and_mark()
return min(offset1, offset2), max(offset1, offset2)
def filename(self):
return lisp.buffer_file_name()
def is_modified(self):
return lisp.buffer_modified_p()
def goto_line(self, lineno):
lisp.goto_line(lineno)
def insert_line(self, line, lineno):
current = lisp.point()
lisp.goto_line(lineno)
lisp.insert(line + '\n')
lisp.goto_char(current + len(line) + 1)
def insert(self, text):
lisp.insert(text)
def delete(self, start, end):
lisp.delete_region(start, end)
def filenames(self):
result = []
for buffer in lisp.buffer_list():
filename = lisp.buffer_file_name(buffer)
if filename:
result.append(filename)
return result
def save_files(self, filenames):
ask = self.get('confirm_saving')
initial = lisp.current_buffer()
for filename in filenames:
buffer = lisp.find_buffer_visiting(filename)
if buffer:
if lisp.buffer_modified_p(buffer):
if not ask or lisp.y_or_n_p('Save %s buffer?' % filename):
lisp.set_buffer(buffer)
lisp.save_buffer()
lisp.set_buffer(initial)
def reload_files(self, filenames, moves={}):
if self.filename() in moves:
initial = None
else:
initial = lisp.current_buffer()
for filename in filenames:
buffer = lisp.find_buffer_visiting(filename)
if buffer:
if filename in moves:
lisp.kill_buffer(buffer)
lisp.find_file(moves[filename])
else:
lisp.set_buffer(buffer)
lisp.revert_buffer(False, True)
if initial is not None:
lisp.set_buffer(initial)
def find_file(self, filename, readonly=False, other=False):
if other:
lisp.find_file_other_window(filename)
elif readonly:
lisp.find_file_read_only(filename)
else:
lisp.find_file(filename)
def _make_buffer(self, name, contents, empty_goto=True, switch=False,
window='other', modes=[], fit_lines=None):
"""Make an emacs buffer
`window` can be one of `None`, 'current' or 'other'.
"""
new_buffer = lisp.get_buffer_create(name)
lisp.set_buffer(new_buffer)
lisp.toggle_read_only(-1)
lisp.erase_buffer()
if contents or empty_goto:
lisp.insert(contents)
for mode in modes:
lisp[mode + '-mode']()
lisp.buffer_disable_undo(new_buffer)
lisp.toggle_read_only(1)
if switch:
if window == 'current':
lisp.switch_to_buffer(new_buffer)
else:
lisp.switch_to_buffer_other_window(new_buffer)
lisp.goto_char(lisp.point_min())
elif window == 'other':
new_window = lisp.display_buffer(new_buffer)
lisp.set_window_point(new_window, lisp.point_min())
if fit_lines and lisp.fboundp(lisp['fit-window-to-buffer']):
lisp.fit_window_to_buffer(new_window, fit_lines)
lisp.bury_buffer(new_buffer)
return new_buffer
def _hide_buffer(self, name, delete=True):
buffer = lisp.get_buffer(name)
if buffer is not None:
window = lisp.get_buffer_window(buffer)
if window is not None:
lisp.bury_buffer(buffer)
if delete:
lisp.delete_window(window)
else:
if lisp.buffer_name(lisp.current_buffer()) == name:
lisp.switch_to_buffer(None)
def _emacs_version(self):
return int(lisp['emacs-version'].value().split('.')[0])
def create_progress(self, name):
if lisp.fboundp(lisp['make-progress-reporter']):
progress = _LispProgress(name)
else:
progress = _OldProgress(name)
return progress
def current_word(self):
return lisp.current_word()
def push_mark(self):
lisp.push_mark()
def prefix_value(self, prefix):
return lisp.prefix_numeric_value(prefix)
def show_occurrences(self, locations):
text = ['List of occurrences:', '']
for location in locations:
line = '%s : %s %s %s' % (location.filename, location.lineno,
location.note, location.offset)
text.append(line)
text = '\n'.join(text) + '\n'
buffer = self._make_buffer('*rope-occurrences*', text, switch=False)
lisp.set_buffer(buffer)
lisp.toggle_read_only(1)
lisp.set(lisp["next-error-function"], lisp.rope_occurrences_next)
lisp.local_set_key('\r', lisp.rope_occurrences_goto)
lisp.local_set_key('q', lisp.delete_window)
def show_doc(self, docs, altview=False):
use_minibuffer = not altview
if self.get('separate_doc_buffer'):
use_minibuffer = not use_minibuffer
if not use_minibuffer:
fit_lines = self.get('max_doc_buffer_height')
buffer = self._make_buffer('*rope-pydoc*', docs,
empty_goto=False,
fit_lines=fit_lines)
lisp.local_set_key('q', lisp.bury_buffer)
elif docs:
docs = '\n'.join(docs.split('\n')[:7])
self.message(docs)
def preview_changes(self, diffs):
self._make_buffer('*rope-preview*', diffs, switch=True,
modes=['diff'], window='current')
try:
return self.yes_or_no('Do the changes? ')
finally:
self._hide_buffer('*rope-preview*', delete=False)
def local_command(self, name, callback, key=None, prefix=False):
globals()[name] = callback
self._set_interaction(callback, prefix)
if self.local_prefix and key:
key = self._key_sequence(self.local_prefix + ' ' + key)
self._bind_local(_lisp_name(name), key)
def _bind_local(self, name, key):
lisp('(define-key ropemacs-local-keymap "%s" \'%s)' %
(self._key_sequence(key), name))
def global_command(self, name, callback, key=None, prefix=False):
globals()[name] = callback
self._set_interaction(callback, prefix)
if self.global_prefix and key:
key = self._key_sequence(self.global_prefix + ' ' + key)
lisp.global_set_key(key, lisp[_lisp_name(name)])
def _key_sequence(self, sequence):
result = []
for key in sequence.split():
if key.startswith('C-'):
number = ord(key[-1].upper()) - ord('A') + 1
result.append(chr(number))
elif key.startswith('M-'):
number = ord(key[-1].upper()) + 0x80
result.append(chr(number))
else:
result.append(key)
return ''.join(result)
def _set_interaction(self, callback, prefix):
if hasattr(callback, 'im_func'):
callback = callback.im_func
if prefix:
callback.interaction = 'P'
else:
callback.interaction = ''
def add_hook(self, name, callback, hook):
mapping = {'before_save': 'before-save-hook',
'after_save': 'after-save-hook',
'exit': 'kill-emacs-hook'}
globals()[name] = callback
lisp.add_hook(lisp[mapping[hook]], lisp[_lisp_name(name)])
def project_opened(self):
'''
This method is called when a new project is opened, it runs
the hooks associated with rope-open-project-hook.
'''
lisp.run_hooks(lisp["rope-open-project-hook"])
@property
@utils.saveit
def global_prefix(self):
return self.get('global_prefix')
@property
@utils.saveit
def local_prefix(self):
return self.get('local_prefix')
def _lisp_name(name):
return 'rope-' + name.replace('_', '-')
class _LispProgress(object):
def __init__(self, name):
self.progress = lisp.make_progress_reporter('%s ... ' % name, 0, 100)
def update(self, percent):
lisp.progress_reporter_update(self.progress, percent)
def done(self):
lisp.progress_reporter_done(self.progress)
class _OldProgress(object):
def __init__(self, name):
self.name = name
self.update(0)
def update(self, percent):
if percent != 0:
message('%s ... %s%%%%' % (self.name, percent))
else:
message('%s ... ' % self.name)
def done(self):
message('%s ... done' % self.name)
def message(message):
lisp.message(message.replace('%', '%%'))
def occurrences_goto():
if lisp.line_number_at_pos() < 3:
lisp.forward_line(3 - lisp.line_number_at_pos())
lisp.end_of_line()
end = lisp.point()
lisp.beginning_of_line()
line = lisp.buffer_substring_no_properties(lisp.point(), end)
tokens = line.split()
if tokens:
filename = tokens[0]
offset = int(tokens[-1])
resource = _interface._get_resource(filename)
LispUtils().find_file(resource.real_path, other=True)
lisp.goto_char(offset + 1)
occurrences_goto.interaction = ''
def occurrences_next(arg, reset):
lisp.switch_to_buffer_other_window('*rope-occurrences*', True)
if reset:
lisp.goto_char(lisp.point_min())
lisp.forward_line(arg)
if lisp.eobp():
lisp.message("Cycling rope occurences")
lisp.goto_char(lisp.point_min())
occurrences_goto()
occurrences_next.interaction = ''
DEFVARS = """\
(defgroup ropemacs nil
"ropemacs, an emacs plugin for rope."
:link '(url-link "http://rope.sourceforge.net/ropemacs.html")
:prefix "rope-")
(defcustom ropemacs-confirm-saving t
"Shows whether to confirm saving modified buffers before refactorings.
If non-nil, you have to confirm saving all modified
python files before refactorings; otherwise they are
saved automatically.")
(defcustom ropemacs-codeassist-maxfixes 1
"The number of errors to fix before code-assist.
How many errors to fix, at most, when proposing code completions.")
(defcustom ropemacs-separate-doc-buffer t
"Should `rope-show-doc' use a separate buffer or the minibuffer.")
(defcustom ropemacs-max-doc-buffer-height 22
"The maximum buffer height for `rope-show-doc'.")
(defcustom ropemacs-enable-autoimport 'nil
"Specifies whether autoimport should be enabled.")
(defcustom ropemacs-autoimport-modules nil
"The name of modules whose global names should be cached.
The `rope-generate-autoimport-cache' reads this list and fills its
cache.")
(defcustom ropemacs-autoimport-underlineds 'nil
"If set, autoimport will cache names starting with underlines, too.")
(defcustom ropemacs-completing-read-function (if (and (boundp 'ido-mode)
ido-mode)
'ido-completing-read
'completing-read)
"Function to call when prompting user to choose between a list of options.
This should take the same arguments as `completing-read'.
Possible values are `completing-read' and `ido-completing-read'.
Note that you must set `ido-mode' if using`ido-completing-read'."
:type 'function)
(make-obsolete-variable
'rope-confirm-saving 'ropemacs-confirm-saving)
(make-obsolete-variable
'rope-code-assist-max-fixes 'ropemacs-codeassist-maxfixes)
(defcustom ropemacs-local-prefix "C-c r"
"The prefix for ropemacs refactorings.
Use nil to prevent binding keys.")
(defcustom ropemacs-global-prefix "C-x p"
"The prefix for ropemacs project commands.
Use nil to prevent binding keys.")
(defcustom ropemacs-enable-shortcuts 't
"Shows whether to bind ropemacs shortcuts keys.
If non-nil it binds:
================ ============================
Key Command
================ ============================
M-/ rope-code-assist
C-c g rope-goto-definition
C-c d rope-show-doc
C-c f rope-find-occurrences
M-? rope-lucky-assist
================ ============================
")
(defvar ropemacs-local-keymap (make-sparse-keymap))
(easy-menu-define ropemacs-mode-menu ropemacs-local-keymap
"`ropemacs' menu"
'("Rope"
["Code assist" rope-code-assist t]
["Lucky assist" rope-lucky-assist t]
["Goto definition" rope-goto-definition t]
["Jump to global" rope-jump-to-global t]
["Show documentation" rope-show-doc t]
["Find Occurrences" rope-find-occurrences t]
["Analyze module" rope-analyze-module t]
("Refactor"
["Inline" rope-inline t]
["Extract Variable" rope-extract-variable t]
["Extract Method" rope-extract-method t]
["Organize Imports" rope-organize-imports t]
["Rename" rope-rename t]
["Move" rope-move t]
["Restructure" rope-restructure t]
["Use Function" rope-use-function t]
["Introduce Factory" rope-introduce-factory t]
("Generate"
["Class" rope-generate-class t]
["Function" rope-generate-function t]
["Module" rope-generate-module t]
["Package" rope-generate-package t]
["Variable" rope-generate-variable t]
)
("Module"
["Module to Package" rope-module-to-package t]
["Rename Module" rope-rename-current-module t]
["Move Module" rope-move-current-module t]
)
"--"
["Undo" rope-undo t]
["Redo" rope-redo t]
)
("Project"
["Open project" rope-open-project t]
["Close project" rope-close-project t]
["Find file" rope-find-file t]
["Open project config" rope-project-config t]
)
("Create"
["Module" rope-create-module t]
["Package" rope-create-package t]
["File" rope-create-file t]
["Directory" rope-create-directory t]
)
))
(defcustom ropemacs-guess-project 'nil
"Try to guess the project when needed.
If non-nil, ropemacs tries to guess and open the project that contains
a file on which the rope command is performed when no project is
already opened.")
(provide 'ropemacs)
"""
MINOR_MODE = """\
(define-minor-mode ropemacs-mode
"ropemacs, rope in emacs!" nil " Rope" ropemacs-local-keymap
:global nil)
)
"""
shortcuts = [('M-/', 'rope-code-assist'),
('M-?', 'rope-lucky-assist'),
('C-c g', 'rope-goto-definition'),
('C-c d', 'rope-show-doc'),
('C-c f', 'rope-find-occurrences')]
_interface = None
def _load_ropemacs():
global _interface
ropemode.decorators.logger.message = message
lisp(DEFVARS)
_interface = ropemode.interface.RopeMode(env=LispUtils())
_interface.init()
lisp(MINOR_MODE)
if LispUtils().get('enable_shortcuts'):
for key, command in shortcuts:
LispUtils()._bind_local(command, key)
lisp.add_hook(lisp['python-mode-hook'], lisp['ropemacs-mode'])
def _started_from_pymacs():
import inspect
frame = sys._getframe()
while frame:
# checking frame.f_code.co_name == 'pymacs_load_helper' might
# be very fragile.
if inspect.getfile(frame).rstrip('c').endswith('Pymacs.py'):
return True
frame = frame.f_back
if _started_from_pymacs():
_load_ropemacs()
|
gpl-3.0
|
jamiejackherer/pyFilm
|
pyfilm/GoogleScraper/caching.py
|
2
|
20407
|
# -*- coding: utf-8 -*-
import os
import time
import hashlib
import gzip
import bz2
import re
from sqlalchemy.orm.exc import NoResultFound
from GoogleScraper.database import SearchEngineResultsPage
from GoogleScraper.parsing import parse_serp
from GoogleScraper.output_converter import store_serp_result
import logging
"""
GoogleScraper is a complex application and thus searching is error prone. While developing,
you may need to repeat the same searches several times and you might end up being banned by
the search engine providers. This is why all searches are chached by default.
Every SERP page is cached in a separate file. In the future, it might be more straightforward to
cache scraping jobs in archives (zip files).
What determines the uniqueness of a SERP result?
- The complete url (because in URLs search queries and params are included)
- The scrape mode: Raw Http might request different resources than a browser.
- Optionally the http headers (because different User-Agents yield different results)
Using these three pieces of information would guarantee that we cache only unique requests,
but then we couldn't read back the information of the cache files, since these parameters
are only available at runtime of the scrapers. So we have to be satisfied with the
keyword, search_engine and scrapemode as identifying params.
How does caching work on a higher level?
Assume the user interrupted his scrape job at 1000/2000 keywords and there remain
quite some keywords to scrape for. Then the previously parsed 1000 results are already
stored in the database and shouldn't be added a second time.
"""
logger = logging.getLogger(__name__)
ALLOWED_COMPRESSION_ALGORITHMS = ('gz', 'bz2')
class InvalidConfigurationFileException(Exception):
"""
Used when the cache module cannot
determine the kind (compression for instance) of a
configuration file
"""
pass
class CompressedFile(object):
"""Read and write the data of a compressed file.
Used to cache files for GoogleScraper.s
Supported algorithms: gz, bz2
>>> import os
>>> f = CompressedFile('/tmp/test.txt', algorithm='gz')
>>> f.write('hello world')
>>> assert os.path.exists('/tmp/test.txt.gz')
>>> f2 = CompressedFile('/tmp/test.txt.gz', algorithm='gz')
>>> assert f2.read() == 'hello world'
"""
def __init__(self, path, algorithm='gz'):
"""Create a new compressed file to read and write data to.
Args:
algorithm: Which algorithm to use.
path: A valid file path to the file to read/write. Depends
on the action called.
@todo: it would be a better approach to pass an Algorithm object instead of a string
"""
self.algorithm = algorithm
assert self.algorithm in ALLOWED_COMPRESSION_ALGORITHMS, \
'{algo} is not an supported compression algorithm'.format(algo=self.algorithm)
if path.endswith(self.algorithm):
self.path = path
else:
self.path = '{path}.{ext}'.format(path=path, ext=algorithm)
self.readers = {
'gz': self.read_gz,
'bz2': self.read_bz2
}
self.writers = {
'gz': self.write_gz,
'bz2': self.write_bz2
}
def read_gz(self):
with gzip.open(self.path, 'rb') as f:
return f.read().decode()
def read_bz2(self):
with bz2.open(self.path, 'rb') as f:
return f.read().decode()
def write_gz(self, data):
with gzip.open(self.path, 'wb') as f:
f.write(data)
def write_bz2(self, data):
with bz2.open(self.path, 'wb') as f:
f.write(data)
def read(self):
assert os.path.exists(self.path)
return self.readers[self.algorithm]()
def write(self, data):
if not isinstance(data, bytes):
data = data.encode()
return self.writers[self.algorithm](data)
class CacheManager():
"""
Manages caching for GoogleScraper.
"""
def __init__(self, config):
self.config = config
self.maybe_create_cache_dir()
def maybe_create_cache_dir(self):
if self.config.get('do_caching', True):
cd = self.config.get('cachedir', '.scrapecache')
if not os.path.exists(cd):
os.mkdir(cd)
def maybe_clean_cache(self):
"""
Clean the cache.
Clean all cached searches (the obtained html code) in the cache directory iff
the respective files are older than specified in the configuration. Defaults to 12 hours.
"""
cachedir = self.config.get('cachedir', '.scrapecache')
if os.path.exists(cachedir):
for fname in os.listdir(cachedir):
path = os.path.join(cachedir, fname)
if time.time() > os.path.getmtime(path) + (60 * 60 * int(self.config.get('clean_cache_after', 48))):
# Remove the whole directory if necessary
if os.path.isdir(path):
import shutil
shutil.rmtree(path)
else:
os.remove(os.path.join(cachedir, fname))
def cached_file_name(self, keyword, search_engine, scrape_mode, page_number):
"""Make a unique file name from the search engine search request.
Important! The order of the sequence is darn important! If search queries have the same
words but in a different order, they are unique searches.
Args:
keyword: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: The number of the SERP page.
Returns:
A unique file name based on the parameters of the search request.
"""
assert isinstance(keyword, str), 'Keyword {} must be a string'.format(keyword)
assert isinstance(search_engine, str), 'Search engine {} must be a string'.format(search_engine)
assert isinstance(scrape_mode, str), 'Scrapemode {} needs to be a string'.format(scrape_mode)
assert isinstance(page_number, int), 'Page_number {} needs to be an int'.format(page_number)
unique = [keyword, search_engine, scrape_mode, page_number]
sha = hashlib.sha256()
sha.update(b''.join(str(s).encode() for s in unique))
return '{file_name}.{extension}'.format(file_name=sha.hexdigest(), extension='cache')
def get_cached(self, keyword, search_engine, scrapemode, page_number):
"""Loads a cached SERP result.
Args:
keyword: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: page_number
Returns:
The contents of the HTML that was shipped while searching. False if there couldn't
be found a file based on the above params.
"""
if self.config.get('do_caching', False):
fname = self.cached_file_name(keyword, search_engine, scrapemode, page_number)
cdir = self.config.get('cachedir', '.scrapecache')
if fname in os.listdir(cdir):
# If the cached file is older than 12 hours, return False and thus
# make a new fresh request.
try:
modtime = os.path.getmtime(os.path.join(cdir, fname))
except FileNotFoundError:
return False
if (time.time() - modtime) / 60 / 60 > int(self.config('clean_cache_after', 48)):
return False
path = os.path.join(cdir, fname)
return self.read_cached_file(path)
else:
return False
def read_cached_file(self, path):
"""Read a compressed or uncompressed file.
The compressing schema is determined by the file extension. For example
a file that ends with .gz needs to be gunzipped.
Supported algorithms:
gzip and bzip2
Args:
path: The path to the cached file.
Returns:
The data of the cached file as a string.
Raises:
InvalidConfigurationFileException: When the type of the cached file
cannot be determined.
"""
if self.config.get('do_caching', False):
ext = path.split('.')[-1]
# The path needs to have an extension in any case.
# When uncompressed, ext is 'cache', else it is the
# compressing scheme file ending like .gz or .bz2 ...
assert ext in ALLOWED_COMPRESSION_ALGORITHMS or ext == 'cache', 'Invalid extension: {}'.format(ext)
if ext == 'cache':
with open(path, 'r') as fd:
try:
data = fd.read()
return data
except UnicodeDecodeError as e:
logger.warning(str(e))
# If we get this error, the cache files are probably
# compressed but the 'compress_cached_files' flag was
# set to False. Try to decompress them, but this may
# lead to a infinite recursion. This isn't proper coding,
# but convenient for the end user.
self.config['compress_cached_files'] = True
elif ext in ALLOWED_COMPRESSION_ALGORITHMS:
f = CompressedFile(path)
return f.read()
else:
raise InvalidConfigurationFileException('"{}" is a invalid configuration file.'.format(path))
def cache_results(self, parser, query, search_engine, scrape_mode, page_number, db_lock=None):
"""Stores the html of an parser in a file.
The file name is determined by the parameters query, search_engine, scrape_mode and page_number.
See cached_file_name() for more information.
This will always write(overwrite) the cached file. If compress_cached_files is
True, the page is written in bytes (obviously).
Args:
parser: A parser with the data to cache.
query: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrape_mode: The scrapemode that was used.
page_number: The page number that the serp page is.
db_lock: If an db_lock is given, all action are wrapped in this lock.
"""
if self.config.get('do_caching', False):
if db_lock:
db_lock.acquire()
if self.config.get('minimize_caching_files', True):
html = parser.cleaned_html
else:
html = parser.html
fname = self.cached_file_name(query, search_engine, scrape_mode, page_number)
cachedir = self.config.get('cachedir', '.scrapecache')
path = os.path.join(cachedir, fname)
if self.config.get('compress_cached_files'):
algorithm = self.config.get('compressing_algorithm', 'gz')
f = CompressedFile(path, algorithm=algorithm)
f.write(html)
else:
with open(path, 'w') as fd:
if isinstance(html, bytes):
fd.write(html.decode())
else:
fd.write(html)
if db_lock:
db_lock.release()
def _get_all_cache_files(self):
"""Return all files found in the cachedir.
Returns:
All files that have the string "cache" in it within the cache directory.
Files are either uncompressed filename.cache or are compressed with a
compression algorithm: "filename.cache.zip"
"""
files = set()
for dirpath, dirname, filenames in os.walk(self.config.get('cachedir', '.scrapecache')):
for name in filenames:
if 'cache' in name:
files.add(os.path.join(dirpath, name))
return files
def _caching_is_one_to_one(self, keywords, search_engine, scrapemode, page_number):
"""Check whether all keywords map to a unique file name.
Args:
keywords: All keywords for which to check the uniqueness of the hash
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: page_number
Returns:
True if all keywords map to a unique hash and False if not.
"""
mappings = {}
for kw in keywords:
file_hash = self.cached_file_name(kw, search_engine, scrapemode, page_number)
if file_hash not in mappings:
mappings.update({file_hash: [kw, ]})
else:
mappings[file_hash].append(kw)
duplicates = [v for k, v in mappings.items() if len(v) > 1]
if duplicates:
logger.info('Not one-to-one. {}'.format(duplicates))
return False
else:
logger.info('one-to-one')
return True
def parse_all_cached_files(self, scrape_jobs, session, scraper_search):
"""Walk recursively through the cachedir (as given by the Config) and parse all cached files.
Args:
session: An sql alchemy session to add the entities
scraper_search: Abstract object representing the current search.
Returns:
The scrape jobs that couldn't be parsed from the cache directory.
"""
files = self._get_all_cache_files()
num_cached = num_total = 0
mapping = {}
for job in scrape_jobs:
cache_name = self.cached_file_name(
job['query'],
job['search_engine'],
job['scrape_method'],
job['page_number']
)
mapping[cache_name] = job
num_total += 1
for path in files:
# strip of the extension of the path if it has eny
fname = os.path.split(path)[1]
clean_filename = fname
for ext in ALLOWED_COMPRESSION_ALGORITHMS:
if fname.endswith(ext):
clean_filename = fname.rstrip('.' + ext)
job = mapping.get(clean_filename, None)
if job:
# We found a file that contains the keyword, search engine name and
# search mode that fits our description. Let's see if there is already
# an record in the database and link it to our new ScraperSearch object.
serp = self.get_serp_from_database(session, job['query'], job['search_engine'], job['scrape_method'],
job['page_number'])
if not serp:
serp = self.parse_again(fname, job['search_engine'], job['scrape_method'], job['query'])
serp.scraper_searches.append(scraper_search)
session.add(serp)
if num_cached % 200 == 0:
session.commit()
store_serp_result(serp, self.config)
num_cached += 1
scrape_jobs.remove(job)
logger.info('{} cache files found in {}'.format(len(files), self.config.get('cachedir')))
logger.info('{}/{} objects have been read from the cache. {} remain to get scraped.'.format(
num_cached, num_total, num_total - num_cached))
session.add(scraper_search)
session.commit()
return scrape_jobs
def parse_again(self, fname, search_engine, scrape_method, query):
"""
@todo: `scrape_method` is not used here -> check if scrape_method is passed to this function and remove it
"""
path = os.path.join(self.config.get('cachedir', '.scrapecache'), fname)
html = self.read_cached_file(path)
return parse_serp(
self.config,
html=html,
search_engine=search_engine,
query=query
)
def get_serp_from_database(self, session, query, search_engine, scrape_method, page_number):
try:
serp = session.query(SearchEngineResultsPage).filter(
SearchEngineResultsPage.query == query,
SearchEngineResultsPage.search_engine_name == search_engine,
SearchEngineResultsPage.scrape_method == scrape_method,
SearchEngineResultsPage.page_number == page_number).first()
return serp
except NoResultFound:
# that shouldn't happen
# we have a cache file that matches the above identifying information
# but it was never stored to the database.
return False
def clean_cachefiles(self):
"""Clean silly html from all cachefiles in the cachdir"""
if input(
'Do you really want to strip all cache files from bloating tags such as <script> and <style>? ').startswith(
'y'):
import lxml.html
from lxml.html.clean import Cleaner
cleaner = Cleaner()
cleaner.style = True
cleaner.scripts = True
cleaner.javascript = True
for file in self._get_all_cache_files():
cfile = CompressedFile(file)
data = cfile.read()
cleaned = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(data)))
cfile.write(cleaned)
logger.info('Cleaned {}. Size before: {}, after {}'.format(file, len(data), len(cleaned)))
def fix_broken_cache_names(self, url, search_engine, scrapemode, page_number):
"""Fix broken cache names.
Args:
url: A list of strings to add to each cached_file_name() call.
@todo: `url` is not used here -> check if scrape_method is passed to this function and remove it
"""
files = self._get_all_cache_files()
logger.debug('{} cache files found in {}'.format(len(files), self.config.get('cachedir', '.scrapecache')))
r = re.compile(r'<title>(?P<kw>.*?) - Google Search</title>')
i = 0
for path in files:
fname = os.path.split(path)[1].strip()
data = self.read_cached_file(path)
infilekws = r.search(data).group('kw')
realname = self.cached_file_name(infilekws, search_engine, scrapemode, page_number)
if fname != realname:
logger.debug('The search query in the title element in file {} differ from that hash of its name. Fixing...'.format(path))
src = os.path.abspath(path)
dst = os.path.abspath(os.path.join(os.path.split(path)[0], realname))
logger.debug('Renamed from {} => {}'.format(src, dst))
os.rename(src, dst)
i += 1
logger.debug('Renamed {} files.'.format(i))
def cached(self, f, attr_to_cache=None):
"""Decorator that makes return value of functions cachable.
Any function that returns a value and that is decorated with
cached will be supplied with the previously calculated result of
an earlier call. The parameter name with the cached value may
be set with attr_to_cache.
Args:
attr_to_cache: The name of attribute whose data
is cachable.
Returns: The modified and wrapped function.
@todo: `attr_to_cache` is not used here -> check if scrape_method is passed to this function and remove it
"""
def wraps(*args, **kwargs):
cached_value = self.get_cached(*args, params=kwargs)
if cached_value:
f(*args, attr_to_cache=cached_value, **kwargs)
else:
# Nothing was cached for this attribute
value = f(*args, attr_to_cache=None, **kwargs)
self.cache_results(value, *args, params=kwargs)
return wraps
if __name__ == '__main__':
import doctest
doctest.testmod()
|
mit
|
xzturn/tensorflow
|
tensorflow/python/keras/applications/mobilenet.py
|
2
|
20221
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |
------------------------------------------------------------------------
Reference paper:
- [MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications](https://arxiv.org/abs/1704.04861)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet/')
@keras_export('keras.applications.mobilenet.MobileNet',
'keras.applications.MobileNet')
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the MobileNet architecture.
Reference paper:
- [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision
Applications](https://arxiv.org/abs/1704.04861)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in the `tf.keras.backend.image_data_format()`.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.mobilenet.preprocess_input` for an example.
Arguments:
input_shape: Optional shape tuple, only to be specified if `include_top`
is False (otherwise the input shape has to be `(224, 224, 3)` (with
`channels_last` data format) or (3, 224, 224) (with `channels_first`
data format). It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would be one
valid value. Default to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper. - If `alpha` < 1.0, proportionally
decreases the number of filters in each layer. - If `alpha` > 1.0,
proportionally increases the number of filters in each layer. - If
`alpha` = 1, default number of filters from the paper are used at each
layer. Default to 1.0.
depth_multiplier: Depth multiplier for depthwise convolution. This is
called the resolution multiplier in the MobileNet paper. Default to 1.0.
dropout: Dropout rate. Default to 0.001.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Default to `True`.
weights: One of `None` (random initialization), 'imagenet' (pre-training
on ImageNet), or the path to the weights file to be loaded. Default to
`imagenet`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to
use as image input for the model. `input_tensor` is useful for sharing
inputs between multiple different networks. Default to None.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified. Defaults to 1000.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if 'layers' in kwargs:
global layers
layers = kwargs.pop('layers')
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not in [128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if backend.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape(shape, name='reshape_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = layers.Reshape((classes,), name='reshape_2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Arguments:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height should
be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the width and
height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1. # Input shape
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)), name='conv1_pad')(inputs)
x = layers.Conv2D(
filters,
kernel,
padding='valid',
use_bias=False,
strides=strides,
name='conv1')(
x)
x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return layers.ReLU(6., name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Arguments:
inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape: `(batch, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(batch, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(
inputs)
x = layers.DepthwiseConv2D((3, 3),
padding='same' if strides == (1, 1) else 'valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_dw_%d_bn' % block_id)(
x)
x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x)
x = layers.Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_pw_%d_bn' % block_id)(
x)
return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
@keras_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x, data_format=None):
"""Preprocesses a numpy array encoding a batch of images.
Arguments
x: A 4D numpy array consists of RGB values within [0, 255].
Returns
Preprocessed array.
Raises
ValueError: In case of unknown `data_format` argument.
"""
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.mobilenet.decode_predictions')
def decode_predictions(preds, top=5):
"""Decodes the prediction result from the model.
Arguments
preds: Numpy tensor encoding a batch of predictions.
top: Integer, how many top-guesses to return.
Returns
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises
ValueError: In case of invalid shape of the `preds` array (must be 2D).
"""
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
apache-2.0
|
lakewik/storj-gui-client
|
UI/__init__.py
|
1
|
2608
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import os
import click
import logging
import logging.config as config
from logging import handlers
APP_NAME = 'storj-gui'
"""(str): the application name."""
def setup_logging():
"""Reads the Storj GUI logging configuration from logging.conf.
If the file does not exist it will load a default configuration.
Mac OS X (POSIX):
~/.storj-gui
Unix (POSIX):
~/.storj-gui
Win XP (not roaming):
``C:\Documents and Settings\<user>\Application Data\storj-gui``
Win 7 (not roaming):
``C:\\Users\<user>\AppData\Local\storj-gui``
"""
logging_conf = os.path.join(
click.get_app_dir(APP_NAME, force_posix=True),
'logging.conf')
if not os.path.exists(logging_conf) or not os.path.isfile(logging_conf):
load_default_logging()
logging.getLogger(__name__).warning('%s logging configuration file does not exist', logging_conf)
return
try:
config.fileConfig(logging_conf, disable_existing_loggers=False)
logging.getLogger(__name__).info('%s configuration file was loaded.', logging_conf)
except RuntimeError:
load_default_logging()
logging.getLogger(__name__).warning('failed to load configuration from %s', logging_conf)
return
logging.getLogger(__name__).info('using logging configuration from %s', logging_conf)
def load_default_logging():
"""Load default logging configuration:
- >=INFO messages will be written to storj-gui.log
- >=DEBUG messages will be written to stdout
- >=ERROR message will be written to stderr
"""
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
# file
# maximum of 5 log files of 3MB
handler_file = handlers.RotatingFileHandler(
os.path.join(os.getcwd(), '%s.log' % APP_NAME),
maxBytes=(1048576 * 3), backupCount=5)
handler_file.setFormatter(formatter)
handler_file.setLevel(logging.INFO)
# stdout
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setFormatter(formatter)
# stdout should only get WARNING, INFO and DEBUG
handler_stdout.setLevel(logging.DEBUG)
# stderr
handler_stderr = logging.StreamHandler(sys.stderr)
handler_stderr.setFormatter(formatter)
handler_stderr.setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
logger.addHandler(handler_file)
logger.addHandler(handler_stdout)
logging.getLogger(__name__).info('using default logging configuration')
setup_logging()
|
mit
|
FinalAngel/django-cms
|
cms/tests/test_signals.py
|
4
|
4334
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.utils import override_settings
from cms.api import create_page
from cms.models import UrlconfRevision
from cms.signals import urls_need_reloading
from cms.test_utils.project.sampleapp.cms_apps import SampleApp
from cms.test_utils.util.context_managers import apphooks, signal_tester
from cms.test_utils.testcases import CMSTestCase
class SignalTests(TestCase):
def test_urls_need_reloading_signal_create(self):
with apphooks(SampleApp):
with signal_tester(urls_need_reloading) as env:
self.client.get('/')
self.assertEqual(env.call_count, 0)
create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
self.client.get('/')
self.assertEqual(env.call_count, 1)
def test_urls_need_reloading_signal_delete(self):
with apphooks(SampleApp):
with signal_tester(urls_need_reloading) as env:
self.client.get('/')
self.assertEqual(env.call_count, 0)
page = create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
page.delete()
self.client.get('/')
self.assertEqual(env.call_count, 1)
def test_urls_need_reloading_signal_change_slug(self):
with apphooks(SampleApp):
with signal_tester(urls_need_reloading) as env:
self.assertEqual(env.call_count, 0)
page = create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
self.client.get('/')
self.assertEqual(env.call_count, 1)
title = page.title_set.get(language="en")
title.slug += 'test'
title.save()
page.publish('en')
self.client.get('/')
self.assertEqual(env.call_count, 2)
overrides = dict()
overrides['MIDDLEWARE' if getattr(settings, 'MIDDLEWARE', None) else 'MIDDLEWARE_CLASSES'] = [
'cms.middleware.utils.ApphookReloadMiddleware'
] + getattr(settings, 'MIDDLEWARE', getattr(settings, 'MIDDLEWARE_CLASSES', None))
@override_settings(**overrides)
class ApphooksReloadTests(CMSTestCase):
def test_urls_reloaded(self):
"""
Tests that URLs are automatically reloaded when the ApphookReload
middleware is installed.
"""
#
# Sets up an apphook'ed page, but does not yet publish it.
#
superuser = get_user_model().objects.create_superuser(
'admin', '[email protected]', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser)
page.publish('en')
app_page = create_page("app_page", "nav_playground.html", "en",
created_by=superuser, parent=page,
published=False, apphook="SampleApp")
self.client.get('/') # Required to invoke the middleware
#
# Gets the current urls revision for testing against later.
#
current_revision, _ = UrlconfRevision.get_or_create_revision()
#
# Publishes the apphook. This is one of many ways to trigger the
# firing of the signal. The tests above test some of the other ways
# already.
#
app_page.publish('en')
self.client.get('/') # Required to invoke the middleware
# And, this should result in a the updating of the UrlconfRevision
new_revision, _ = UrlconfRevision.get_or_create_revision()
self.assertNotEquals(current_revision, new_revision)
|
bsd-3-clause
|
sensepost/Snoopy
|
snoopy/server/transforms/fetchAllDomains.py
|
4
|
2606
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# [email protected]
# Snoopy // 2012
# By using this code you agree to abide by the supplied LICENSE.txt
import sys
import os
from Maltego import *
import stawk_db
import logging
import datetime
from common import *
logging.basicConfig(level=logging.DEBUG,filename='/tmp/maltego_logs.txt',format='%(asctime)s %(levelname)s: %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
sys.stderr = sys.stdout
def main():
print "Content-type: xml\n\n";
MaltegoXML_in = sys.stdin.read()
if MaltegoXML_in <> '':
#logging.debug(MaltegoXML_in)
m = MaltegoMsg(MaltegoXML_in)
cursor=stawk_db.dbconnect()
TRX = MaltegoTransform()
drone='%'
now=datetime.datetime.now()
if 'start_time' in m.AdditionalFields and 'end_time' in m.AdditionalFields :
start_time=m.AdditionalFields['start_time']
end_time=m.AdditionalFields['end_time']
else:
start_time=now+datetime.timedelta(seconds=-lookback)
end_time=now+datetime.timedelta(seconds=lookback)
# Maltego requires format e.g 2012-10-23 22:37:12.0
now=now.strftime("%Y-%m-%d %H:%M:%S.0")
start_time=start_time.strftime("%Y-%m-%d %H:%M:%S.0")
end_time=end_time.strftime("%Y-%m-%d %H:%M:%S.0")
if 'location' in m.AdditionalFields:
location=m.AdditionalFields['location']
else:
location="%"
if 'properties.drone' in m.AdditionalFields:
drone=m.AdditionalFields['properties.drone']
cursor.execute("SELECT domain, COUNT(*) FROM (SELECT domain, client_ip FROM squid_logs GROUP BY domain, client_ip) AS x GROUP BY domain")
results=cursor.fetchall()
for row in results:
num=-1
domain="fuck unicode"
try:
domain=row[0].encode('utf8','xmlcharrefreplace')
num=row[1]
except Exception,e:
logging.debug(e)
NewEnt=TRX.addEntity("Domain", domain);
NewEnt.addAdditionalFields("num","Number","strict",num)
NewEnt.addAdditionalFields("domain","domain","strict",domain)
NewEnt.setWeight(num)
#NewEnt.addAdditionalFields("drone","drone","strict",drone)
#NewEnt.addAdditionalFields("start_time", "start_time", "nostrict",start)
#NewEnt.addAdditionalFields("end_time","end_time", "nostrict",end)
#NewEnt.addAdditionalFields("location","location","strict",location)
#NewEnt.addAdditionalFields("run_id","run_id","strict",run_id)
TRX.returnOutput()
try:
main()
except Exception, e:
logging.debug(e)
|
mit
|
dntt1/youtube-dl
|
youtube_dl/extractor/lynda.py
|
2
|
9214
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
urlencode_postdata,
)
class LyndaBaseIE(InfoExtractor):
_SIGNIN_URL = 'https://www.lynda.com/signin'
_PASSWORD_URL = 'https://www.lynda.com/signin/password'
_USER_URL = 'https://www.lynda.com/signin/user'
_ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
_NETRC_MACHINE = 'lynda'
def _real_initialize(self):
self._login()
@staticmethod
def _check_error(json_string, key_or_keys):
keys = [key_or_keys] if isinstance(key_or_keys, compat_str) else key_or_keys
for key in keys:
error = json_string.get(key)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
def _login_step(self, form_html, fallback_action_url, extra_form_data, note, referrer_url):
action_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_html,
'post url', default=fallback_action_url, group='url')
if not action_url.startswith('http'):
action_url = compat_urlparse.urljoin(self._SIGNIN_URL, action_url)
form_data = self._hidden_inputs(form_html)
form_data.update(extra_form_data)
try:
response = self._download_json(
action_url, None, note,
data=urlencode_postdata(form_data),
headers={
'Referer': referrer_url,
'X-Requested-With': 'XMLHttpRequest',
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 500:
response = self._parse_json(e.cause.read().decode('utf-8'), None)
self._check_error(response, ('email', 'password'))
raise
self._check_error(response, 'ErrorMessage')
return response, action_url
def _login(self):
username, password = self._get_login_info()
if username is None:
return
# Step 1: download signin page
signin_page = self._download_webpage(
self._SIGNIN_URL, None, 'Downloading signin page')
# Already logged in
if any(re.search(p, signin_page) for p in (
'isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')):
return
# Step 2: submit email
signin_form = self._search_regex(
r'(?s)(<form[^>]+data-form-name=["\']signin["\'][^>]*>.+?</form>)',
signin_page, 'signin form')
signin_page, signin_url = self._login_step(
signin_form, self._PASSWORD_URL, {'email': username},
'Submitting email', self._SIGNIN_URL)
# Step 3: submit password
password_form = signin_page['body']
self._login_step(
password_form, self._USER_URL, {'email': username, 'password': password},
'Submitting password', signin_url)
class LyndaIE(LyndaBaseIE):
IE_NAME = 'lynda'
IE_DESC = 'lynda.com videos'
_VALID_URL = r'https?://www\.lynda\.com/(?:[^/]+/[^/]+/\d+|player/embed)/(?P<id>\d+)'
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
_TESTS = [{
'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
'md5': 'ecfc6862da89489161fb9cd5f5a6fac1',
'info_dict': {
'id': '114408',
'ext': 'mp4',
'title': 'Using the exercise files',
'duration': 68
}
}, {
'url': 'https://www.lynda.com/player/embed/133770?tr=foo=1;bar=g;fizz=rt&fs=0',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
video_id, 'Downloading video JSON')
if 'Status' in video:
raise ExtractorError(
'lynda returned error: %s' % video['Message'], expected=True)
if video.get('HasAccess') is False:
self.raise_login_required('Video %s is only available for members' % video_id)
video_id = compat_str(video.get('ID') or video_id)
duration = int_or_none(video.get('DurationInSeconds'))
title = video['Title']
formats = []
fmts = video.get('Formats')
if fmts:
formats.extend([{
'url': f['Url'],
'ext': f.get('Extension'),
'width': int_or_none(f.get('Width')),
'height': int_or_none(f.get('Height')),
'filesize': int_or_none(f.get('FileSize')),
'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None,
} for f in fmts if f.get('Url')])
prioritized_streams = video.get('PrioritizedStreams')
if prioritized_streams:
for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
formats.extend([{
'url': video_url,
'width': int_or_none(format_id),
'format_id': '%s-%s' % (prioritized_stream_id, format_id),
} for format_id, video_url in prioritized_stream.items()])
self._check_formats(formats, video_id)
self._sort_formats(formats)
subtitles = self.extract_subtitles(video_id)
return {
'id': video_id,
'title': title,
'duration': duration,
'subtitles': subtitles,
'formats': formats
}
def _fix_subtitles(self, subs):
srt = ''
seq_counter = 0
for pos in range(0, len(subs) - 1):
seq_current = subs[pos]
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
if m_current is None:
continue
seq_next = subs[pos + 1]
m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
if m_next is None:
continue
appear_time = m_current.group('timecode')
disappear_time = m_next.group('timecode')
text = seq_current['Caption'].strip()
if text:
seq_counter += 1
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (seq_counter, appear_time, disappear_time, text)
if srt:
return srt
def _get_subtitles(self, video_id):
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
subs = self._download_json(url, None, False)
if subs:
return {'en': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]}
else:
return {}
class LyndaCourseIE(LyndaBaseIE):
IE_NAME = 'lynda:course'
IE_DESC = 'lynda.com online courses'
# Course link equals to welcome/introduction video link of same course
# We will recognize it as course link
_VALID_URL = r'https?://(?:www|m)\.lynda\.com/(?P<coursepath>[^/]+/[^/]+/(?P<courseid>\d+))-\d\.html'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_path = mobj.group('coursepath')
course_id = mobj.group('courseid')
course = self._download_json(
'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
course_id, 'Downloading course JSON')
if course.get('Status') == 'NotFound':
raise ExtractorError(
'Course %s does not exist' % course_id, expected=True)
unaccessible_videos = 0
entries = []
# Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided
# by single video API anymore
for chapter in course['Chapters']:
for video in chapter.get('Videos', []):
if video.get('HasAccess') is False:
unaccessible_videos += 1
continue
video_id = video.get('ID')
if video_id:
entries.append({
'_type': 'url_transparent',
'url': 'http://www.lynda.com/%s/%s-4.html' % (course_path, video_id),
'ie_key': LyndaIE.ie_key(),
'chapter': chapter.get('Title'),
'chapter_number': int_or_none(chapter.get('ChapterIndex')),
'chapter_id': compat_str(chapter.get('ID')),
})
if unaccessible_videos > 0:
self._downloader.report_warning(
'%s videos are only available for members (or paid members) and will not be downloaded. '
% unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT)
course_title = course.get('Title')
course_description = course.get('Description')
return self.playlist_result(entries, course_id, course_title, course_description)
|
unlicense
|
evansd/django
|
tests/template_tests/syntax_tests/test_invalid_string.py
|
440
|
2310
|
from django.test import SimpleTestCase
from ..utils import setup
class InvalidStringTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'invalidstr01': '{{ var|default:"Foo" }}'})
def test_invalidstr01(self):
output = self.engine.render_to_string('invalidstr01')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, 'Foo')
@setup({'invalidstr02': '{{ var|default_if_none:"Foo" }}'})
def test_invalidstr02(self):
output = self.engine.render_to_string('invalidstr02')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr03': '{% for v in var %}({{ v }}){% endfor %}'})
def test_invalidstr03(self):
output = self.engine.render_to_string('invalidstr03')
self.assertEqual(output, '')
@setup({'invalidstr04': '{% if var %}Yes{% else %}No{% endif %}'})
def test_invalidstr04(self):
output = self.engine.render_to_string('invalidstr04')
self.assertEqual(output, 'No')
@setup({'invalidstr04_2': '{% if var|default:"Foo" %}Yes{% else %}No{% endif %}'})
def test_invalidstr04_2(self):
output = self.engine.render_to_string('invalidstr04_2')
self.assertEqual(output, 'Yes')
@setup({'invalidstr05': '{{ var }}'})
def test_invalidstr05(self):
output = self.engine.render_to_string('invalidstr05')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr06': '{{ var.prop }}'})
def test_invalidstr06(self):
output = self.engine.render_to_string('invalidstr06')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr07': '{% load i18n %}{% blocktrans %}{{ var }}{% endblocktrans %}'})
def test_invalidstr07(self):
output = self.engine.render_to_string('invalidstr07')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
|
bsd-3-clause
|
SerialShadow/SickRage
|
tests/tv_tests.py
|
7
|
3699
|
# coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import sys, os.path
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
import test_lib as test
import sickbeard
from sickbeard.tv import TVEpisode, TVShow
class TVShowTests(test.SickbeardTestDBCase):
def setUp(self):
super(TVShowTests, self).setUp()
sickbeard.showList = []
def test_init_indexerid(self):
show = TVShow(1, 0001, "en")
self.assertEqual(show.indexerid, 0001)
def test_change_indexerid(self):
show = TVShow(1, 0001, "en")
show.name = "show name"
show.network = "cbs"
show.genre = "crime"
show.runtime = 40
show.status = "Ended"
show.default_ep_status = "5"
show.airs = "monday"
show.startyear = 1987
show.saveToDB()
show.loadFromDB(skipNFO=True)
show.indexerid = 0002
show.saveToDB()
show.loadFromDB(skipNFO=True)
self.assertEqual(show.indexerid, 0002)
def test_set_name(self):
show = TVShow(1, 0001, "en")
show.name = "newName"
show.saveToDB()
show.loadFromDB(skipNFO=True)
self.assertEqual(show.name, "newName")
class TVEpisodeTests(test.SickbeardTestDBCase):
def setUp(self):
super(TVEpisodeTests, self).setUp()
sickbeard.showList = []
def test_init_empty_db(self):
show = TVShow(1, 0001, "en")
ep = TVEpisode(show, 1, 1)
ep.name = "asdasdasdajkaj"
ep.saveToDB()
ep.loadFromDB(1, 1)
self.assertEqual(ep.name, "asdasdasdajkaj")
class TVTests(test.SickbeardTestDBCase):
def setUp(self):
super(TVTests, self).setUp()
sickbeard.showList = []
def test_getEpisode(self):
show = TVShow(1, 0001, "en")
show.name = "show name"
show.network = "cbs"
show.genre = "crime"
show.runtime = 40
show.status = "Ended"
show.default_ep_status = "5"
show.airs = "monday"
show.startyear = 1987
show.saveToDB()
sickbeard.showList = [show]
#TODO: implement
if __name__ == '__main__':
print "=================="
print "STARTING - TV TESTS"
print "=================="
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(TVShowTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(TVEpisodeTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(TVTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
gpl-3.0
|
WangYueFt/jieba
|
test/parallel/test2.py
|
65
|
4527
|
#encoding=utf-8
from __future__ import print_function
import sys
sys.path.append("../../")
import jieba
jieba.enable_parallel(4)
def cuttest(test_sent):
result = jieba.cut(test_sent,cut_all=True)
for word in result:
print(word, "/", end=' ')
print("")
if __name__ == "__main__":
cuttest("这是一个伸手不见五指的黑夜。我叫孙悟空,我爱北京,我爱Python和C++。")
cuttest("我不喜欢日本和服。")
cuttest("雷猴回归人间。")
cuttest("工信处女干事每月经过下属科室都要亲口交代24口交换机等技术性器件的安装工作")
cuttest("我需要廉租房")
cuttest("永和服装饰品有限公司")
cuttest("我爱北京天安门")
cuttest("abc")
cuttest("隐马尔可夫")
cuttest("雷猴是个好网站")
cuttest("“Microsoft”一词由“MICROcomputer(微型计算机)”和“SOFTware(软件)”两部分组成")
cuttest("草泥马和欺实马是今年的流行词汇")
cuttest("伊藤洋华堂总府店")
cuttest("中国科学院计算技术研究所")
cuttest("罗密欧与朱丽叶")
cuttest("我购买了道具和服装")
cuttest("PS: 我觉得开源有一个好处,就是能够敦促自己不断改进,避免敞帚自珍")
cuttest("湖北省石首市")
cuttest("湖北省十堰市")
cuttest("总经理完成了这件事情")
cuttest("电脑修好了")
cuttest("做好了这件事情就一了百了了")
cuttest("人们审美的观点是不同的")
cuttest("我们买了一个美的空调")
cuttest("线程初始化时我们要注意")
cuttest("一个分子是由好多原子组织成的")
cuttest("祝你马到功成")
cuttest("他掉进了无底洞里")
cuttest("中国的首都是北京")
cuttest("孙君意")
cuttest("外交部发言人马朝旭")
cuttest("领导人会议和第四届东亚峰会")
cuttest("在过去的这五年")
cuttest("还需要很长的路要走")
cuttest("60周年首都阅兵")
cuttest("你好人们审美的观点是不同的")
cuttest("买水果然后来世博园")
cuttest("买水果然后去世博园")
cuttest("但是后来我才知道你是对的")
cuttest("存在即合理")
cuttest("的的的的的在的的的的就以和和和")
cuttest("I love你,不以为耻,反以为rong")
cuttest("因")
cuttest("")
cuttest("hello你好人们审美的观点是不同的")
cuttest("很好但主要是基于网页形式")
cuttest("hello你好人们审美的观点是不同的")
cuttest("为什么我不能拥有想要的生活")
cuttest("后来我才")
cuttest("此次来中国是为了")
cuttest("使用了它就可以解决一些问题")
cuttest(",使用了它就可以解决一些问题")
cuttest("其实使用了它就可以解决一些问题")
cuttest("好人使用了它就可以解决一些问题")
cuttest("是因为和国家")
cuttest("老年搜索还支持")
cuttest("干脆就把那部蒙人的闲法给废了拉倒!RT @laoshipukong : 27日,全国人大常委会第三次审议侵权责任法草案,删除了有关医疗损害责任“举证倒置”的规定。在医患纠纷中本已处于弱势地位的消费者由此将陷入万劫不复的境地。 ")
cuttest("大")
cuttest("")
cuttest("他说的确实在理")
cuttest("长春市长春节讲话")
cuttest("结婚的和尚未结婚的")
cuttest("结合成分子时")
cuttest("旅游和服务是最好的")
cuttest("这件事情的确是我的错")
cuttest("供大家参考指正")
cuttest("哈尔滨政府公布塌桥原因")
cuttest("我在机场入口处")
cuttest("邢永臣摄影报道")
cuttest("BP神经网络如何训练才能在分类时增加区分度?")
cuttest("南京市长江大桥")
cuttest("应一些使用者的建议,也为了便于利用NiuTrans用于SMT研究")
cuttest('长春市长春药店')
cuttest('邓颖超生前最喜欢的衣服')
cuttest('胡锦涛是热爱世界和平的政治局常委')
cuttest('程序员祝海林和朱会震是在孙健的左面和右面, 范凯在最右面.再往左是李松洪')
cuttest('一次性交多少钱')
cuttest('两块五一套,三块八一斤,四块七一本,五块六一条')
cuttest('小和尚留了一个像大和尚一样的和尚头')
cuttest('我是中华人民共和国公民;我爸爸是共和党党员; 地铁和平门站')
|
mit
|
thiagopnts/servo
|
components/script/dom/bindings/codegen/ply/ply/lex.py
|
344
|
40739
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
mpl-2.0
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/django/conf/locale/pl/formats.py
|
115
|
1147
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j E Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
bsd-3-clause
|
hybrideagle/django
|
django/db/backends/base/operations.py
|
192
|
22514
|
import datetime
import decimal
import warnings
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import six, timezone
from django.utils.dateparse import parse_duration
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_unicode(k): to_unicode(v) for k, v in params.items()}
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes a SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide a sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transforms a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_aggregate_support(self, aggregate_func):
warnings.warn(
"check_aggregate_support has been deprecated. Use "
"check_expression_support instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.check_expression_support(aggregate_func)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
|
bsd-3-clause
|
niranjan94/open-event-orga-server
|
app/helpers/request_context_task.py
|
17
|
3427
|
"""
Celery task wrapper to set request context vars and global
vars when a task is executed
Based on http://xion.io/post/code/celery-include-flask-request-context.html
"""
from celery import Task
from flask import has_request_context, request, g
from app import current_app as app
__all__ = ['RequestContextTask']
class RequestContextTask(Task):
"""Base class for tasks that originate from Flask request handlers
and carry over most of the request context data.
This has an advantage of being able to access all the usual information
that the HTTP request has and use them within the task. Pontential
use cases include e.g. formatting URLs for external use in emails sent
by tasks.
"""
abstract = True
#: Name of the additional parameter passed to tasks
#: that contains information about the original Flask request context.
CONTEXT_ARG_NAME = '_flask_request_context'
GLOBALS_ARG_NAME = '_flask_global_proxy'
GLOBAL_KEYS = ['user']
def __call__(self, *args, **kwargs):
"""Execute task code with given arguments."""
call = lambda: super(RequestContextTask, self).__call__(*args, **kwargs)
# set context
context = kwargs.pop(self.CONTEXT_ARG_NAME, None)
gl = kwargs.pop(self.GLOBALS_ARG_NAME, {})
if context is None or has_request_context():
return call()
with app.test_request_context(**context):
# set globals
for i in gl:
setattr(g, i, gl[i])
# call
result = call()
# process a fake "Response" so that
# ``@after_request`` hooks are executed
# app.process_response(make_response(result or ''))
return result
def apply_async(self, args=None, kwargs=None, **rest):
# if rest.pop('with_request_context', True):
self._include_request_context(kwargs)
self._include_global(kwargs)
return super(RequestContextTask, self).apply_async(args, kwargs, **rest)
def apply(self, args=None, kwargs=None, **rest):
# if rest.pop('with_request_context', True):
self._include_request_context(kwargs)
self._include_global(kwargs)
return super(RequestContextTask, self).apply(args, kwargs, **rest)
def retry(self, args=None, kwargs=None, **rest):
# if rest.pop('with_request_context', True):
self._include_request_context(kwargs)
self._include_global(kwargs)
return super(RequestContextTask, self).retry(args, kwargs, **rest)
def _include_request_context(self, kwargs):
"""Includes all the information about current Flask request context
as an additional argument to the task.
"""
if not has_request_context():
return
# keys correspond to arguments of :meth:`Flask.test_request_context`
context = {
'path': request.path,
'base_url': request.url_root,
'method': request.method,
'headers': dict(request.headers),
}
if '?' in request.url:
context['query_string'] = request.url[(request.url.find('?') + 1):]
kwargs[self.CONTEXT_ARG_NAME] = context
def _include_global(self, kwargs):
d = {}
for z in self.GLOBAL_KEYS:
if hasattr(g, z):
d[z] = getattr(g, z)
kwargs[self.GLOBALS_ARG_NAME] = d
|
gpl-3.0
|
chengdh/openerp-ktv
|
openerp/addons/auction/report/catalog2.py
|
9
|
6984
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from report.interface import report_rml
import pooler
from xml.dom import minidom
import re
import base64
import photo_shadow
import addons
def _to_unicode(s):
try:
return s.decode('utf-8')
except UnicodeError:
try:
return s.decode('latin')
except UnicodeError:
try:
return s.encode('ascii')
except UnicodeError:
return s
def _to_decode(s):
try:
return s.encode('utf-8')
except UnicodeError:
try:
return s.encode('latin')
except UnicodeError:
try:
return s.decode('ascii')
except UnicodeError:
return s
class auction_catalog(report_rml):
def create_xml(self, cr, uid, ids, data, context):
xml = self.catalog_xml(cr, uid, ids, data, context)
temp = self.post_process_xml_data(cr, uid, xml, context)
return temp
def catalog_xml(self, cr, uid, ids, data, context, cwid="0"):
impl = minidom.getDOMImplementation()
doc = impl.createDocument(None, "report", None)
catalog=doc.createElement('catalog')
doc.documentElement.appendChild(catalog)
auction_lot_pool = pooler.get_pool(cr.dbname).get('auction.lots')
auction_dates_pool = pooler.get_pool(cr.dbname).get('auction.dates')
for auction in auction_dates_pool.browse(cr, uid, ids, context=context):
auction_lot_ids = auction_lot_pool.search(cr, uid, [('auction_id', '=', auction.id)])
key = 'name'
categ = doc.createElement(key)
categ.appendChild(doc.createTextNode(_to_decode(auction.name)))
catalog.appendChild(categ)
#Auctuion Date element
categ = doc.createElement("AuctionDate1")
categ.appendChild(doc.createTextNode(_to_decode(auction.auction1)))
catalog.appendChild(categ)
# Action Date 2 element
categ = doc.createElement("AuctionDate2")
categ.appendChild(doc.createTextNode(_to_decode(auction.auction2)))
catalog.appendChild(categ)
# promotion element
promo = doc.createElement('promotion1')
fp = file(addons.get_module_resource('auction','report', 'images', 'flagey_logo.jpg'),'r')
file_data = fp.read()
promo.appendChild(doc.createTextNode(base64.encodestring(file_data)))
catalog.appendChild(promo)
promo = doc.createElement('promotion2')
fp = file(addons.get_module_resource('auction','report', 'images', 'flagey_logo.jpg'),'r')
file_data = fp.read()
promo.appendChild(doc.createTextNode(base64.encodestring(file_data)))
catalog.appendChild(promo)
#product element
products = doc.createElement('products')
catalog.appendChild(products)
side = 0
length = 0
for cat in auction_lot_pool.browse(cr, uid, auction_lot_ids, context=context):
product = doc.createElement('product')
products.appendChild(product)
if cat.obj_desc:
infos = doc.createElement('infos')
lines = re.split('<br/>|\n', _to_unicode(cat.obj_desc))
for line in lines:
xline = doc.createElement('info')
xline.appendChild(doc.createTextNode(_to_decode(line)))
infos.appendChild(xline)
product.appendChild(infos)
if cat.lot_num:
lnum = doc.createElement('lot_num')
lnum.appendChild(doc.createTextNode(_to_decode(str(cat.lot_num))))
infos.appendChild(lnum)
if cat.image:
import tempfile
limg = doc.createElement('photo_small')
file_name = tempfile.mktemp(prefix='openerp_auction_', suffix='.jpg')
fp = file(file_name, 'w')
content = base64.decodestring(cat.image)
fp.write(content)
fp.close()
fp = file(file_name,'r')
test_file_name = tempfile.mktemp(prefix='openerp_auction_test_', suffix='.jpg')
size = photo_shadow.convert_catalog(fp, test_file_name,110)
fp = file(test_file_name)
file_data = fp.read()
test_data = base64.encodestring(file_data)
fp.close()
limg.appendChild(doc.createTextNode(test_data))
infos.appendChild(limg)
if cat.lot_est1:
ref2 = doc.createElement(key)
ref2.appendChild(doc.createTextNode( _to_decode(str(cat.lot_est1 or 0.0))))
product.appendChild(ref2)
if cat.lot_est2:
ref2 = doc.createElement(key)
ref2.appendChild(doc.createTextNode( _to_decode(str(cat.lot_est2 or 0.0))))
product.appendChild(ref2)
oldlength = length
length += 2.0
if length>23.7:
side += 1
length = length - oldlength
ref3 = doc.createElement('newpage')
ref3.appendChild(doc.createTextNode( "1" ))
product.appendChild(ref3)
if side%2:
ref4 = doc.createElement('side')
ref4.appendChild(doc.createTextNode( "1" ))
product.appendChild(ref4)
xml1 = doc.toxml()
return xml1
auction_catalog('report.auction.cat_flagy', 'auction.dates','','addons/auction/report/catalog2.xsl')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
bhairavmehta95/flashcard-helper-alexa-skill
|
pymysql/constants/CR.py
|
27
|
2228
|
# flake8: noqa
# errmsg.h
CR_ERROR_FIRST = 2000
CR_UNKNOWN_ERROR = 2000
CR_SOCKET_CREATE_ERROR = 2001
CR_CONNECTION_ERROR = 2002
CR_CONN_HOST_ERROR = 2003
CR_IPSOCK_ERROR = 2004
CR_UNKNOWN_HOST = 2005
CR_SERVER_GONE_ERROR = 2006
CR_VERSION_ERROR = 2007
CR_OUT_OF_MEMORY = 2008
CR_WRONG_HOST_INFO = 2009
CR_LOCALHOST_CONNECTION = 2010
CR_TCP_CONNECTION = 2011
CR_SERVER_HANDSHAKE_ERR = 2012
CR_SERVER_LOST = 2013
CR_COMMANDS_OUT_OF_SYNC = 2014
CR_NAMEDPIPE_CONNECTION = 2015
CR_NAMEDPIPEWAIT_ERROR = 2016
CR_NAMEDPIPEOPEN_ERROR = 2017
CR_NAMEDPIPESETSTATE_ERROR = 2018
CR_CANT_READ_CHARSET = 2019
CR_NET_PACKET_TOO_LARGE = 2020
CR_EMBEDDED_CONNECTION = 2021
CR_PROBE_SLAVE_STATUS = 2022
CR_PROBE_SLAVE_HOSTS = 2023
CR_PROBE_SLAVE_CONNECT = 2024
CR_PROBE_MASTER_CONNECT = 2025
CR_SSL_CONNECTION_ERROR = 2026
CR_MALFORMED_PACKET = 2027
CR_WRONG_LICENSE = 2028
CR_NULL_POINTER = 2029
CR_NO_PREPARE_STMT = 2030
CR_PARAMS_NOT_BOUND = 2031
CR_DATA_TRUNCATED = 2032
CR_NO_PARAMETERS_EXISTS = 2033
CR_INVALID_PARAMETER_NO = 2034
CR_INVALID_BUFFER_USE = 2035
CR_UNSUPPORTED_PARAM_TYPE = 2036
CR_SHARED_MEMORY_CONNECTION = 2037
CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038
CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039
CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040
CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041
CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042
CR_SHARED_MEMORY_MAP_ERROR = 2043
CR_SHARED_MEMORY_EVENT_ERROR = 2044
CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045
CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046
CR_CONN_UNKNOW_PROTOCOL = 2047
CR_INVALID_CONN_HANDLE = 2048
CR_SECURE_AUTH = 2049
CR_FETCH_CANCELED = 2050
CR_NO_DATA = 2051
CR_NO_STMT_METADATA = 2052
CR_NO_RESULT_SET = 2053
CR_NOT_IMPLEMENTED = 2054
CR_SERVER_LOST_EXTENDED = 2055
CR_STMT_CLOSED = 2056
CR_NEW_STMT_METADATA = 2057
CR_ALREADY_CONNECTED = 2058
CR_AUTH_PLUGIN_CANNOT_LOAD = 2059
CR_DUPLICATE_CONNECTION_ATTR = 2060
CR_AUTH_PLUGIN_ERR = 2061
CR_ERROR_LAST = 2061
|
mit
|
morristech/POSTMan-Chrome-Extension
|
tests/selenium/pmtests/postman_tests_history.py
|
104
|
3459
|
from optparse import OptionParser
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.chrome.service as service
import inspect
import time
from postman_tests import PostmanTests
class PostmanTestsHistory(PostmanTests):
def test_1_save_request_to_history(self):
self.set_url_field(self.browser, "http://localhost:5000/get?val=1")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("get") > 0:
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request")
value = self.browser.execute_script("return arguments[0].innerHTML", first_history_item)
if value.find("http://localhost:5000/get?val=1") > 0:
return True
else:
return False
else:
return False
def test_2_load_request_from_history(self):
self.set_url_field(self.browser, "")
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request")
first_history_item.click()
try:
w = WebDriverWait(self.browser, 10)
w.until(lambda browser: self.browser.find_element_by_id("url").get_attribute("value") == "http://localhost:5000/get?val=1")
return True
except:
return False
def test_3_delete_request_from_history(self):
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request-actions .request-actions-delete")
first_history_item.click()
history_items = self.browser.find_elements_by_css_selector("#history-items li")
if len(history_items) == 0:
return True
else:
return False
def test_4_clear_history(self):
self.set_url_field(self.browser, "http://localhost:5000/html?val=1")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
# Waits for the response
self.get_codemirror_value(self.browser)
self.set_url_field(self.browser, "http://localhost:5000/html?val=2")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
# Waits for the response
self.get_codemirror_value(self.browser)
clear_all_button = self.browser.find_element_by_css_selector("#history-options .history-actions-delete")
clear_all_button.click()
history_items = self.browser.find_elements_by_css_selector("#history-items li")
if len(history_items) == 0:
return True
else:
return False
PostmanTestsHistory().run()
|
apache-2.0
|
junzis/py-adsb-decoder
|
setup.py
|
1
|
4319
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
Steps for deploying a new verison:
1. Increase the version number
2. remove the old deployment under [dist] folder
3. run: python setup.py sdist
run: python setup.py bdist_wheel --universal
4. twine upload dist/*
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyModeS',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='2.0',
description='Python ADS-B/Mode-S Decoder',
long_description=long_description,
# The project's main homepage.
url='https://github.com/junzis/pyModes',
# Author details
author='Junzi Sun',
author_email='[email protected]',
# Choose your license
license='GNU GPL v3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='Mode-S ADS-B EHS decoding',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'argparse'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
scripts=['pyModeS/streamer/modeslive'],
)
|
mit
|
sdague/home-assistant
|
homeassistant/components/blueprint/importer.py
|
2
|
4760
|
"""Import logic for blueprint."""
from dataclasses import dataclass
import re
from typing import Optional
import voluptuous as vol
import yarl
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.util import yaml
from .models import Blueprint
from .schemas import is_blueprint_config
COMMUNITY_TOPIC_PATTERN = re.compile(
r"^https://community.home-assistant.io/t/[a-z0-9-]+/(?P<topic>\d+)(?:/(?P<post>\d+)|)$"
)
COMMUNITY_CODE_BLOCK = re.compile(
r'<code class="lang-(?P<syntax>[a-z]+)">(?P<content>(?:.|\n)*)</code>', re.MULTILINE
)
GITHUB_FILE_PATTERN = re.compile(
r"^https://github.com/(?P<repository>.+)/blob/(?P<path>.+)$"
)
GITHUB_RAW_FILE_PATTERN = re.compile(r"^https://raw.githubusercontent.com/")
COMMUNITY_TOPIC_SCHEMA = vol.Schema(
{
"slug": str,
"title": str,
"post_stream": {"posts": [{"updated_at": cv.datetime, "cooked": str}]},
},
extra=vol.ALLOW_EXTRA,
)
@dataclass(frozen=True)
class ImportedBlueprint:
"""Imported blueprint."""
url: str
suggested_filename: str
raw_data: str
blueprint: Blueprint
def _get_github_import_url(url: str) -> str:
"""Convert a GitHub url to the raw content.
Async friendly.
"""
match = GITHUB_RAW_FILE_PATTERN.match(url)
if match is not None:
return url
match = GITHUB_FILE_PATTERN.match(url)
if match is None:
raise ValueError("Not a GitHub file url")
repo, path = match.groups()
return f"https://raw.githubusercontent.com/{repo}/{path}"
def _get_community_post_import_url(url: str) -> str:
"""Convert a forum post url to an import url.
Async friendly.
"""
match = COMMUNITY_TOPIC_PATTERN.match(url)
if match is None:
raise ValueError("Not a topic url")
_topic, post = match.groups()
json_url = url
if post is not None:
# Chop off post part, ie /2
json_url = json_url[: -len(post) - 1]
json_url += ".json"
return json_url
def _extract_blueprint_from_community_topic(
url: str,
topic: dict,
) -> Optional[ImportedBlueprint]:
"""Extract a blueprint from a community post JSON.
Async friendly.
"""
block_content = None
blueprint = None
post = topic["post_stream"]["posts"][0]
for match in COMMUNITY_CODE_BLOCK.finditer(post["cooked"]):
block_syntax, block_content = match.groups()
if block_syntax not in ("auto", "yaml"):
continue
block_content = block_content.strip()
try:
data = yaml.parse_yaml(block_content)
except HomeAssistantError:
if block_syntax == "yaml":
raise
continue
if not is_blueprint_config(data):
continue
blueprint = Blueprint(data)
break
if blueprint is None:
return None
return ImportedBlueprint(url, topic["slug"], block_content, blueprint)
async def fetch_blueprint_from_community_post(
hass: HomeAssistant, url: str
) -> Optional[ImportedBlueprint]:
"""Get blueprints from a community post url.
Method can raise aiohttp client exceptions, vol.Invalid.
Caller needs to implement own timeout.
"""
import_url = _get_community_post_import_url(url)
session = aiohttp_client.async_get_clientsession(hass)
resp = await session.get(import_url, raise_for_status=True)
json_resp = await resp.json()
json_resp = COMMUNITY_TOPIC_SCHEMA(json_resp)
return _extract_blueprint_from_community_topic(url, json_resp)
async def fetch_blueprint_from_github_url(
hass: HomeAssistant, url: str
) -> ImportedBlueprint:
"""Get a blueprint from a github url."""
import_url = _get_github_import_url(url)
session = aiohttp_client.async_get_clientsession(hass)
resp = await session.get(import_url, raise_for_status=True)
raw_yaml = await resp.text()
data = yaml.parse_yaml(raw_yaml)
blueprint = Blueprint(data)
parsed_import_url = yarl.URL(import_url)
suggested_filename = f"{parsed_import_url.parts[1]}-{parsed_import_url.parts[-1]}"
if suggested_filename.endswith(".yaml"):
suggested_filename = suggested_filename[:-5]
return ImportedBlueprint(url, suggested_filename, raw_yaml, blueprint)
async def fetch_blueprint_from_url(hass: HomeAssistant, url: str) -> ImportedBlueprint:
"""Get a blueprint from a url."""
for func in (fetch_blueprint_from_community_post, fetch_blueprint_from_github_url):
try:
return await func(hass, url)
except ValueError:
pass
raise HomeAssistantError("Unsupported url")
|
apache-2.0
|
arameshkumar/base-nuxeo-drive
|
nuxeo-drive-client/nxdrive/tests/test_conflicts.py
|
2
|
12533
|
import time
import shutil
from nxdrive.tests.common import OS_STAT_MTIME_RESOLUTION
from nxdrive.tests.common_unit_test import UnitTestCase
from nxdrive.osi import AbstractOSIntegration
from nose.plugins.skip import SkipTest
class TestConflicts(UnitTestCase):
def setUp(self):
super(TestConflicts, self).setUp()
self.workspace_id = ('defaultSyncRootFolderItemFactory#default#' + self.workspace)
self.file_id = self.remote_file_system_client_1.make_file(self.workspace_id, 'test.txt', 'Some content').uid
self.engine_1.start()
self.wait_sync(wait_for_async=True)
self.assertTrue(self.local_client_1.exists('/test.txt'))
def test_self_conflict(self):
remote = self.remote_file_system_client_1
local = self.local_client_1
# Update content on both sides by the same user, remote last
remote.update_content(self.file_id, 'Remote update')
local.update_content('/test.txt', 'Local update')
self.wait_sync(wait_for_async=True)
self.assertEqual(len(local.get_children_info('/')), 1)
self.assertTrue(local.exists('/test.txt'))
self.assertEqual(local.get_content('/test.txt'), 'Local update')
remote_children = remote.get_children_info(self.workspace_id)
self.assertEqual(len(remote_children), 1)
self.assertEqual(remote_children[0].uid, self.file_id)
self.assertEqual(remote_children[0].name, 'test.txt')
self.assertEqual(remote.get_content(remote_children[0].uid), 'Remote update')
self.assertEqual(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
# Update content on both sides by the same user, local last
remote.update_content(self.file_id, 'Remote update 2')
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/test.txt', 'Local update 2')
self.wait_sync(wait_for_async=True)
self.assertEqual(len(local.get_children_info('/')), 1)
self.assertTrue(local.exists('/test.txt'))
self.assertEqual(local.get_content('/test.txt'), 'Local update 2')
remote_children = remote.get_children_info(self.workspace_id)
self.assertEqual(len(remote_children), 1)
self.assertEqual(remote_children[0].uid, self.file_id)
self.assertEqual(remote_children[0].name, 'test.txt')
self.assertEqual(remote.get_content(remote_children[0].uid), 'Remote update 2')
self.assertEqual(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
def test_real_conflict(self):
local = self.local_client_1
remote = self.remote_file_system_client_2
# Update content on both sides by different users, remote last
time.sleep(OS_STAT_MTIME_RESOLUTION)
# Race condition is still possible
remote.update_content(self.file_id, 'Remote update')
local.update_content('/test.txt', 'Local update')
self.wait_sync(wait_for_async=True)
self.assertEqual(remote.get_content(self.file_id), 'Remote update')
self.assertEqual(local.get_content('/test.txt'), 'Local update')
self.assertEqual(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
# Update content on both sides by different users, local last
remote.update_content(self.file_id, 'Remote update 2')
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/test.txt', 'Local update 2')
self.wait_sync(wait_for_async=True)
self.assertEqual(remote.get_content(self.file_id), 'Remote update 2')
self.assertEqual(local.get_content('/test.txt'), 'Local update 2')
self.assertEqual(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
def test_resolve_local(self):
self.test_real_conflict()
# Resolve to local file
pair = self.engine_1.get_dao().get_normal_state_from_remote(self.file_id)
self.assertIsNotNone(pair)
self.engine_1.resolve_with_local(pair.id)
self.wait_sync(wait_for_async=True)
self.assertEqual(self.remote_file_system_client_2.get_content(self.file_id), 'Local update 2')
def test_resolve_remote(self):
self.test_real_conflict()
# Resolve to local file
pair = self.engine_1.get_dao().get_normal_state_from_remote(self.file_id)
self.assertIsNotNone(pair)
self.engine_1.resolve_with_remote(pair.id)
self.wait_sync(wait_for_async=True)
self.assertEqual(self.local_client_1.get_content('/test.txt'), 'Remote update 2')
def test_resolve_duplicate(self):
self.test_real_conflict()
# Resolve to local file
pair = self.engine_1.get_dao().get_normal_state_from_remote(self.file_id)
self.assertIsNotNone(pair)
self.engine_1.resolve_with_duplicate(pair.id)
self.wait_sync(wait_for_async=True)
self.assertEqual(self.local_client_1.get_content('/test.txt'), 'Remote update 2')
self.assertEqual(self.local_client_1.get_content('/test__1.txt'), 'Local update 2')
def test_conflict_on_lock(self):
doc_uid = self.file_id.split("#")[-1]
local = self.local_client_1
remote = self.remote_file_system_client_2
self.remote_document_client_2.lock(doc_uid)
local.update_content('/test.txt', 'Local update')
self.wait_sync(wait_for_async=True)
self.assertEqual(local.get_content('/test.txt'), 'Local update')
self.assertEqual(remote.get_content(self.file_id), 'Some content')
remote.update_content(self.file_id, 'Remote update')
self.wait_sync(wait_for_async=True)
self.assertEqual(local.get_content('/test.txt'), 'Local update')
self.assertEqual(remote.get_content(self.file_id), 'Remote update')
self.assertEqual(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
self.remote_document_client_2.unlock(doc_uid)
self.wait_sync(wait_for_async=True)
self.assertEqual(local.get_content('/test.txt'), 'Local update')
self.assertEqual(remote.get_content(self.file_id), 'Remote update')
self.assertEqual(self.engine_1.get_dao().get_normal_state_from_remote(self.file_id).pair_state, "conflicted")
def test_XLS_conflict_on_locked_document(self):
if not AbstractOSIntegration.is_windows():
raise SkipTest("Windows Office only test")
self._XLS_local_update_on_locked_document(locked_from_start=False)
def test_XLS_conflict_on_locked_document_from_start(self):
if not AbstractOSIntegration.is_windows():
raise SkipTest("Windows Office only test")
self._XLS_local_update_on_locked_document()
def _XLS_local_update_on_locked_document(self, locked_from_start=True):
remote = self.remote_file_system_client_2
local = self.local_client_1
# user2: create remote XLS file
fs_item_id = remote.make_file(self.workspace_id, 'Excel 97 file.xls',
b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00').uid
doc_uid = fs_item_id.split("#")[-1]
self.wait_sync(wait_for_async=True)
self.assertTrue(local.exists('/Excel 97 file.xls'))
if locked_from_start:
# user2: lock document before user1 opening it
self.remote_document_client_2.lock(doc_uid)
self.wait_sync(wait_for_async=True)
local.unset_readonly('/Excel 97 file.xls')
# user1: simulate opening XLS file with MS Office ~= update its content
local.update_content('/Excel 97 file.xls', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01')
self.wait_sync(wait_for_async=locked_from_start)
pair_state = self.engine_1.get_dao().get_normal_state_from_remote(fs_item_id)
self.assertIsNotNone(pair_state)
if locked_from_start:
# remote content hasn't changed, pair state is conflicted and remote_can_update flag is False
self.assertEqual(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00')
self.assertEqual(pair_state.pair_state, 'unsynchronized')
self.assertFalse(pair_state.remote_can_update)
else:
# remote content has changed, pair state is synchronized and remote_can_update flag is True
self.assertEqual(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01')
self.assertEqual(pair_state.pair_state, 'synchronized')
self.assertTrue(pair_state.remote_can_update)
if not locked_from_start:
# user2: lock document after user1 opening it
self.remote_document_client_2.lock(doc_uid)
self.wait_sync(wait_for_async=True)
# user1: simulate updating XLS file with MS Office
# 1. Create empty file 787D3000
# 2. Update 787D3000
# 3. Update Excel 97 file.xls
# 4. Update 787D3000
# 5. Move Excel 97 file.xls to 1743B25F.tmp
# 6. Move 787D3000 to Excel 97 file.xls
# 7. Update Excel 97 file.xls
# 8. Update 1743B25F.tmp
# 9. Update Excel 97 file.xls
# 10. Delete 1743B25F.tmp
local.make_file('/', '787D3000')
local.update_content('/787D3000', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00')
local.unset_readonly('/Excel 97 file.xls')
local.update_content('/Excel 97 file.xls', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02')
local.update_content('/787D3000', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03')
shutil.move(local._abspath('/Excel 97 file.xls'), local._abspath('/1743B25F.tmp'))
shutil.move(local._abspath('/787D3000'), local._abspath('/Excel 97 file.xls'))
local.update_content('/Excel 97 file.xls', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03\x04')
local.update_content('/1743B25F.tmp', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00')
local.update_content('/Excel 97 file.xls', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03')
local.delete_final('/1743B25F.tmp')
self.wait_sync(wait_for_async=not locked_from_start)
self.assertEqual(len(local.get_children_info('/')), 2)
self.assertEqual(local.get_content('/Excel 97 file.xls'), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03')
# remote content hasn't changed, pair state is conflicted and remote_can_update flag is False
if locked_from_start:
self.assertEqual(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00')
else:
self.assertEqual(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01')
pair_state = self.engine_1.get_dao().get_normal_state_from_remote(fs_item_id)
self.assertIsNotNone(pair_state)
self.assertEqual(pair_state.pair_state, 'unsynchronized')
self.assertFalse(pair_state.remote_can_update)
# user2: remote update, conflict is detected once again and remote_can_update flag is still False
remote.update_content(fs_item_id, b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02', 'New Excel 97 file.xls')
self.wait_sync(wait_for_async=True)
self.assertEqual(len(local.get_children_info('/')), 2)
self.assertTrue(local.exists('/Excel 97 file.xls'))
self.assertEqual(local.get_content('/Excel 97 file.xls'), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03')
self.assertEqual(len(remote.get_children_info(self.workspace_id)), 2)
self.assertEqual(remote.get_info(fs_item_id).name, 'New Excel 97 file.xls')
self.assertEqual(remote.get_content(fs_item_id), b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02')
pair_state = self.engine_1.get_dao().get_normal_state_from_remote(fs_item_id)
self.assertIsNotNone(pair_state)
self.assertEqual(pair_state.pair_state, 'conflicted')
self.assertFalse(pair_state.remote_can_update)
# user2: unlock document, conflict is detected once again and remote_can_update flag is now True
self.remote_document_client_2.unlock(doc_uid)
self.wait_sync(wait_for_async=True)
pair_state = self.engine_1.get_dao().get_normal_state_from_remote(fs_item_id)
self.assertIsNotNone(pair_state)
self.assertEqual(pair_state.pair_state, 'conflicted')
self.assertTrue(pair_state.remote_can_update)
|
lgpl-2.1
|
tdmsinc/three.js
|
utils/exporters/blender/addons/io_three/exporter/geometry.py
|
11
|
12378
|
import os
from .. import constants, logger
from . import base_classes, io, api
FORMAT_VERSION = 3
class Geometry(base_classes.BaseNode):
def __init__(self, node, parent=None):
logger.debug('Geometry().__init__(%s)', node)
#@TODO: maybe better to have `three` constants for
# strings that are specific to `three` properties
geo_type = constants.GEOMETRY.title()
if parent.options.get(constants.GEOMETRY_TYPE):
opt_type = parent.options[constants.GEOMETRY_TYPE]
if opt_type == constants.BUFFER_GEOMETRY:
geo_type = constants.BUFFER_GEOMETRY
elif opt_type != constants.GEOMETRY:
logger.error('Unknown geometry type %s', opt_type)
logger.info('Setting %s to "%s"', node, geo_type)
self._defaults[constants.TYPE] = geo_type
base_classes.BaseNode.__init__(self, node,
parent=parent,
type=geo_type)
@property
def animation_filename(self):
compression = self.options.get(constants.COMPRESSION)
if compression in (None, constants.NONE):
ext = constants.JSON
elif compression == constants.MSGPACK:
ext = constants.PACK
for key in (constants.MORPH_TARGETS, constants.ANIMATION):
try:
self[key]
break
except KeyError:
pass
else:
logger.info('%s has no animation data', self.node)
return
return '%s.%s.%s' % (self.node, key, ext)
@property
def face_count(self):
try:
faces = self[constants.FACES]
except KeyError:
logger.debug('No parsed faces found')
return 0
length = len(faces)
offset = 0
bitset = lambda x,y: x & ( 1 << y )
face_count = 0
masks = (constants.MASK[constants.UVS],
constants.MASK[constants.NORMALS],
constants.MASK[constants.COLORS])
while offset < length:
bit = faces[offset]
offset += 1
face_count += 1
is_quad = bitset(bit, constants.MASK[constants.QUAD])
vector = 4 if is_quad else 3
offset += vector
if bitset(bit, constants.MASK[constants.MATERIALS]):
offset += 1
for mask in masks:
if bitset(bit, mask):
offset += vector
return face_count
@property
def metadata(self):
metadata = {
constants.GENERATOR: constants.THREE,
constants.VERSION: FORMAT_VERSION
}
if self[constants.TYPE] == constants.GEOMETRY.title():
self.__geometry_metadata(metadata)
else:
self.__buffer_geometry_metadata(metadata)
return metadata
def copy(self, scene=True):
logger.debug('Geometry().copy(scene=%s)', scene)
dispatch = {
True: self._scene_format,
False: self._geometry_format
}
data = dispatch[scene]()
try:
data[constants.MATERIALS] = self[constants.MATERIALS].copy()
except KeyError:
logger.debug('No materials to copy')
return data
def copy_textures(self):
logger.debug('Geometry().copy_textures()')
if self.options.get(constants.COPY_TEXTURES):
texture_registration = self.register_textures()
if texture_registration:
logger.info('%s has registered textures', self.node)
io.copy_registered_textures(
os.path.dirname(self.scene.filepath),
texture_registration)
def parse(self):
logger.debug('Geometry().parse()')
if self[constants.TYPE] == constants.GEOMETRY.title():
logger.info('Parsing Geometry format')
self.__parse_geometry()
else:
logger.info('Parsing BufferGeometry format')
self.__parse_buffer_geometry()
def register_textures(self):
logger.debug('Geometry().register_textures()')
return api.mesh.texture_registration(self.node)
def write(self, filepath=None):
logger.debug('Geometry().write(filepath=%s)', filepath)
filepath = filepath or self.scene.filepath
io.dump(filepath, self.copy(scene=False),
options=self.scene.options)
if self.options.get(constants.MAPS):
logger.info('Copying textures for %s', self.node)
self.copy_textures()
def write_animation(self, filepath):
logger.debug('Geometry().write_animation(%s)', filepath)
for key in (constants.MORPH_TARGETS, constants.ANIMATION):
try:
data = self[key]
break
except KeyError:
pass
else:
logger.info('%s has no animation data', self.node)
return
filepath = os.path.join(filepath, self.animation_filename)
if filepath:
logger.info('Dumping animation data to %s', filepath)
io.dump(filepath, data, options=self.scene.options)
return filepath
else:
logger.warning('Could not determine a filepath for '\
'animation data. Nothing written to disk.')
def _component_data(self):
logger.debug('Geometry()._component_data()')
if self[constants.TYPE] != constants.GEOMETRY.title():
return self[constants.ATTRIBUTES]
components = [constants.VERTICES, constants.FACES,
constants.UVS, constants.COLORS, constants.NORMALS,
constants.BONES, constants.SKIN_WEIGHTS,
constants.SKIN_INDICES, constants.NAME,
constants.INFLUENCES_PER_VERTEX]
data = {}
anim_components = [constants.MORPH_TARGETS, constants.ANIMATION]
if self.options.get(constants.EMBED_ANIMATION):
components.extend(anim_components)
else:
for component in anim_components:
try:
self[component]
except KeyError:
pass
else:
data[component] = os.path.basename(
self.animation_filename)
else:
logger.info('No animation data found for %s', self.node)
for component in components:
try:
data[component] = self[component]
except KeyError:
logger.debug('Component %s not found', component)
pass
return data
def _geometry_format(self):
data = self._component_data()
if self[constants.TYPE] != constants.GEOMETRY.title():
data = {constants.ATTRIBUTES: data}
data[constants.METADATA] = {
constants.TYPE: self[constants.TYPE]
}
data[constants.METADATA].update(self.metadata)
return data
def __buffer_geometry_metadata(self, metadata):
for key, value in self[constants.ATTRIBUTES].items():
size = value[constants.ITEM_SIZE]
array = value[constants.ARRAY]
metadata[key] = len(array)/size
def __geometry_metadata(self, metadata):
skip = (constants.TYPE, constants.FACES, constants.UUID,
constants.ANIMATION, constants.SKIN_INDICES,
constants.SKIN_WEIGHTS, constants.NAME,
constants.INFLUENCES_PER_VERTEX)
vectors = (constants.VERTICES, constants.NORMALS)
for key in self.keys():
if key in vectors:
try:
metadata[key] = int(len(self[key])/3)
except KeyError:
pass
continue
if key in skip: continue
metadata[key] = len(self[key])
faces = self.face_count
if faces > 0:
metadata[constants.FACES] = faces
def _scene_format(self):
data = {
constants.UUID: self[constants.UUID],
constants.TYPE: self[constants.TYPE]
}
component_data = self._component_data()
if self[constants.TYPE] == constants.GEOMETRY.title():
data[constants.DATA] = component_data
data[constants.DATA].update({
constants.METADATA: self.metadata
})
else:
if self.options[constants.EMBED_GEOMETRY]:
data[constants.DATA] = {
constants.ATTRIBUTES: component_data
}
else:
data[constants.ATTRIBUTES] = component_data
data[constants.METADATA] = self.metadata
data[constants.NAME] = self[constants.NAME]
return data
def __parse_buffer_geometry(self):
self[constants.ATTRIBUTES] = {}
options_vertices = self.options.get(constants.VERTICES)
option_normals = self.options.get(constants.NORMALS)
option_uvs = self.options.get(constants.UVS)
dispatch = (
(constants.POSITION, options_vertices,
api.mesh.buffer_position, 3),
(constants.UV, option_uvs, api.mesh.buffer_uv, 2),
(constants.NORMAL, option_normals,
api.mesh.buffer_normal, 3)
)
for key, option, func, size in dispatch:
if not option:
continue
array = func(self.node, self.options)
if not array:
logger.warning('No array could be made for %s', key)
continue
self[constants.ATTRIBUTES][key] = {
constants.ITEM_SIZE: size,
constants.TYPE: constants.FLOAT_32,
constants.ARRAY: array
}
def __parse_geometry(self):
if self.options.get(constants.VERTICES):
logger.info('Parsing %s', constants.VERTICES)
self[constants.VERTICES] = api.mesh.vertices(
self.node, self.options)
if self.options.get(constants.FACES):
logger.info('Parsing %s', constants.FACES)
self[constants.FACES] = api.mesh.faces(
self.node, self.options)
if self.options.get(constants.NORMALS):
logger.info('Parsing %s', constants.NORMALS)
self[constants.NORMALS] = api.mesh.normals(
self.node, self.options)
if self.options.get(constants.COLORS):
logger.info('Parsing %s', constants.COLORS)
self[constants.COLORS] = api.mesh.vertex_colors(
self.node)
if self.options.get(constants.FACE_MATERIALS):
logger.info('Parsing %s', constants.FACE_MATERIALS)
self[constants.MATERIALS] = api.mesh.materials(
self.node, self.options)
if self.options.get(constants.UVS):
logger.info('Parsing %s', constants.UVS)
self[constants.UVS] = api.mesh.uvs(
self.node, self.options)
if self.options.get(constants.ANIMATION):
logger.info('Parsing %s', constants.ANIMATION)
self[constants.ANIMATION] = api.mesh.animation(
self.node, self.options)
#@TODO: considering making bones data implied when
# querying skinning data
bone_map = {}
if self.options.get(constants.BONES):
logger.info('Parsing %s', constants.BONES)
bones, bone_map = api.mesh.bones(self.node)
self[constants.BONES] = bones
if self.options.get(constants.SKINNING):
logger.info('Parsing %s', constants.SKINNING)
influences = self.options.get(
constants.INFLUENCES_PER_VERTEX, 2)
self[constants.INFLUENCES_PER_VERTEX] = influences
self[constants.SKIN_INDICES] = api.mesh.skin_indices(
self.node, bone_map, influences)
self[constants.SKIN_WEIGHTS] = api.mesh.skin_weights(
self.node, bone_map, influences)
if self.options.get(constants.MORPH_TARGETS):
logger.info('Parsing %s', constants.MORPH_TARGETS)
self[constants.MORPH_TARGETS] = api.mesh.morph_targets(
self.node, self.options)
|
mit
|
LuxoftAKutsan/sdl_core
|
src/3rd_party-static/jsoncpp/amalgamate.py
|
20
|
6606
|
"""Amalgate json-cpp library sources into a single source and header file.
Requires Python 2.6
Example of invocation (must be invoked from json-cpp top directory):
python amalgate.py
"""
import os
import os.path
import sys
class AmalgamationFile:
def __init__( self, top_dir ):
self.top_dir = top_dir
self.blocks = []
def add_text( self, text ):
if not text.endswith( '\n' ):
text += '\n'
self.blocks.append( text )
def add_file( self, relative_input_path, wrap_in_comment=False ):
def add_marker( prefix ):
self.add_text( '' )
self.add_text( '// ' + '/'*70 )
self.add_text( '// %s of content of file: %s' % (prefix, relative_input_path.replace('\\','/')) )
self.add_text( '// ' + '/'*70 )
self.add_text( '' )
add_marker( 'Beginning' )
f = open( os.path.join( self.top_dir, relative_input_path ), 'rt' )
content = f.read()
if wrap_in_comment:
content = '/*\n' + content + '\n*/'
self.add_text( content )
f.close()
add_marker( 'End' )
self.add_text( '\n\n\n\n' )
def get_value( self ):
return ''.join( self.blocks ).replace('\r\n','\n')
def write_to( self, output_path ):
output_dir = os.path.dirname( output_path )
if output_dir and not os.path.isdir( output_dir ):
os.makedirs( output_dir )
f = open( output_path, 'wb' )
f.write( self.get_value() )
f.close()
def amalgamate_source( source_top_dir=None,
target_source_path=None,
header_include_path=None ):
"""Produces amalgated source.
Parameters:
source_top_dir: top-directory
target_source_path: output .cpp path
header_include_path: generated header path relative to target_source_path.
"""
print 'Amalgating header...'
header = AmalgamationFile( source_top_dir )
header.add_text( '/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/).' )
header.add_text( '/// It is intented to be used with #include <%s>' % header_include_path )
header.add_file( 'LICENSE', wrap_in_comment=True )
header.add_text( '#ifndef JSON_AMALGATED_H_INCLUDED' )
header.add_text( '# define JSON_AMALGATED_H_INCLUDED' )
header.add_text( '/// If defined, indicates that the source file is amalgated' )
header.add_text( '/// to prevent private header inclusion.' )
header.add_text( '#define JSON_IS_AMALGATED' )
header.add_file( 'include/json/config.h' )
header.add_file( 'include/json/forwards.h' )
header.add_file( 'include/json/features.h' )
header.add_file( 'include/json/value.h' )
header.add_file( 'include/json/reader.h' )
header.add_file( 'include/json/writer.h' )
header.add_text( '#endif //ifndef JSON_AMALGATED_H_INCLUDED' )
target_header_path = os.path.join( os.path.dirname(target_source_path), header_include_path )
print 'Writing amalgated header to %r' % target_header_path
header.write_to( target_header_path )
base, ext = os.path.splitext( header_include_path )
forward_header_include_path = base + '-forwards' + ext
print 'Amalgating forward header...'
header = AmalgamationFile( source_top_dir )
header.add_text( '/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/).' )
header.add_text( '/// It is intented to be used with #include <%s>' % forward_header_include_path )
header.add_text( '/// This header provides forward declaration for all JsonCpp types.' )
header.add_file( 'LICENSE', wrap_in_comment=True )
header.add_text( '#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED' )
header.add_text( '# define JSON_FORWARD_AMALGATED_H_INCLUDED' )
header.add_text( '/// If defined, indicates that the source file is amalgated' )
header.add_text( '/// to prevent private header inclusion.' )
header.add_text( '#define JSON_IS_AMALGATED' )
header.add_file( 'include/json/config.h' )
header.add_file( 'include/json/forwards.h' )
header.add_text( '#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED' )
target_forward_header_path = os.path.join( os.path.dirname(target_source_path),
forward_header_include_path )
print 'Writing amalgated forward header to %r' % target_forward_header_path
header.write_to( target_forward_header_path )
print 'Amalgating source...'
source = AmalgamationFile( source_top_dir )
source.add_text( '/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/).' )
source.add_text( '/// It is intented to be used with #include <%s>' % header_include_path )
source.add_file( 'LICENSE', wrap_in_comment=True )
source.add_text( '' )
source.add_text( '#include <%s>' % header_include_path )
source.add_text( '' )
source.add_file( 'src/lib_json\json_tool.h' )
source.add_file( 'src/lib_json\json_reader.cpp' )
source.add_file( 'src/lib_json\json_batchallocator.h' )
source.add_file( 'src/lib_json\json_valueiterator.inl' )
source.add_file( 'src/lib_json\json_value.cpp' )
source.add_file( 'src/lib_json\json_writer.cpp' )
print 'Writing amalgated source to %r' % target_source_path
source.write_to( target_source_path )
def main():
usage = """%prog [options]
Generate a single amalgated source and header file from the sources.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-s', '--source', dest="target_source_path", action='store', default='dist/jsoncpp.cpp',
help="""Output .cpp source path. [Default: %default]""")
parser.add_option('-i', '--include', dest="header_include_path", action='store', default='json/json.h',
help="""Header include path. Used to include the header from the amalgated source file. [Default: %default]""")
parser.add_option('-t', '--top-dir', dest="top_dir", action='store', default=os.getcwd(),
help="""Source top-directory. [Default: %default]""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
msg = amalgamate_source( source_top_dir=options.top_dir,
target_source_path=options.target_source_path,
header_include_path=options.header_include_path )
if msg:
sys.stderr.write( msg + '\n' )
sys.exit( 1 )
else:
print 'Source succesfully amalagated'
if __name__ == '__main__':
main()
|
bsd-3-clause
|
simonwydooghe/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_target_http_proxy_info.py
|
13
|
7153
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_http_proxy_info
description:
- Gather info for GCP TargetHttpProxy
short_description: Gather info for GCP TargetHttpProxy
version_added: '2.7'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a target HTTP proxy
gcp_compute_target_http_proxy_info:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
urlMap:
description:
- A reference to the UrlMap resource that defines the mapping from URL to the
BackendService.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetHttpProxies".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
return auth.list(link, return_if_object, array_name='items', params={'filter': query})
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
gpl-3.0
|
benjystanton/F2D-Prototype
|
node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/generator/ninja.py
|
372
|
89149
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=config_name)
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
self.ninja.build(output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
gyp.common.uniquer(map(self.ExpandSpecial, ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
mem_limit = max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
hard_cap = max(1, int(os.getenv('GYP_LINK_CONCURRENCY_MAX', 2**32)))
# return min(mem_limit, hard_cap)
# TODO(scottmg): Temporary speculative fix for OOM on builders
# See http://crbug.com/333000.
return 2
elif sys.platform.startswith('linux'):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
pool='link_pool')
solink_module_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_module_suffix,
'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_module_suffix, 'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $keys')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
mit
|
pjbriggs/tools-iuc
|
tools/ncbi_entrez_eutils/ecitmatch.py
|
10
|
2254
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import eutils
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ECitMatch', epilog='')
parser.add_argument('--file', type=argparse.FileType('r'), help='Tabular file containing citations to search')
parser.add_argument('--key', nargs='*', help='Citation Key')
parser.add_argument('--journal_title', nargs='*', help='Journal Title')
parser.add_argument('--year', nargs='*', help='Year')
parser.add_argument('--volume', nargs='*', help='Volume')
parser.add_argument('--first_page', nargs='*', help='First Page')
parser.add_argument('--author_name', nargs='*', help='Author name')
# Emails
parser.add_argument('--user_email', help="User email")
parser.add_argument('--admin_email', help="Admin email")
args = parser.parse_args()
c = eutils.Client(user_email=args.user_email, admin_email=args.admin_email)
citations = []
if args.file is None:
for key, journal, year, volume, first_page, author_name in \
zip(args.key, args.journal_title, args.year, args.volume, args.first_page, args.author_name):
citations.append({
'key': key,
'journal': journal,
'year': year,
'volume': volume,
'first_page': first_page,
'author_name': author_name,
})
else:
for line in args.file:
line = line.strip()
if not line.startswith('#'):
tmp = line.split('\t')
try:
citations.append({
'journal': tmp[0],
'year': tmp[1],
'volume': tmp[2],
'first_page': tmp[3],
'author_name': tmp[4],
'key': tmp[5],
})
except KeyError:
print("Could not parse line: %s" % line)
payload = {
'db': 'pubmed',
'bdata': citations
}
results = c.citmatch(**payload)
# We get data back as pipe separated, so just replace those with tabs
print(results.replace('|', '\t'))
|
mit
|
lgarren/spack
|
var/spack/repos/builtin/packages/py-iminuit/package.py
|
3
|
1800
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyIminuit(PythonPackage):
"""Interactive IPython-Friendly Minimizer based on SEAL Minuit2."""
homepage = "https://pypi.python.org/pypi/iminuit"
url = "https://pypi.io/packages/source/i/iminuit/iminuit-1.2.tar.gz"
version('1.2', '4701ec472cae42015e26251703e6e984')
# Required dependencies
depends_on('py-setuptools', type='build')
# Optional dependencies
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-cython', type='build')
|
lgpl-2.1
|
wangzw/tushare
|
test/ref_test.py
|
36
|
1507
|
# -*- coding:utf-8 -*-
'''
Created on 2015/3/14
@author: Jimmy Liu
'''
import unittest
from tushare.stock import reference as fd
class Test(unittest.TestCase):
def set_data(self):
self.code = '600848'
self.start = '2015-01-03'
self.end = '2015-04-07'
self.year = 2014
self.quarter = 4
self.top = 60
self.show_content = True
def test_profit_data(self):
self.set_data()
print(fd.profit_data(top=self.top))
def test_forecast_data(self):
self.set_data()
print(fd.forecast_data(self.year, self.quarter))
def test_xsg_data(self):
print(fd.xsg_data())
def test_fund_holdings(self):
self.set_data()
print(fd.fund_holdings(self.year, self.quarter))
def test_new_stocksa(self):
print(fd.new_stocks())
def test_sh_margin_details(self):
self.set_data()
print(fd.sh_margin_details(self.start, self.end, self.code))
def test_sh_margins(self):
self.set_data()
print(fd.sh_margins(self.start, self.end))
def test_sz_margins(self):
self.set_data()
print(fd.sz_margins(self.start, self.end))
def test_sz_margin_details(self):
self.set_data()
print(fd.sz_margin_details(self.end))
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
enkiv2/popcorn_maker
|
vendor-local/lib/python/easy_thumbnails/tests/files.py
|
4
|
5945
|
from os import path
from easy_thumbnails import files, utils, signals
from easy_thumbnails.tests import utils as test_utils
from easy_thumbnails.conf import settings
try:
from PIL import Image
except ImportError:
import Image
class FilesTest(test_utils.BaseTest):
def setUp(self):
super(FilesTest, self).setUp()
self.storage = test_utils.TemporaryStorage()
self.remote_storage = test_utils.FakeRemoteStorage()
# Save a test image in both storages.
filename = self.create_image(self.storage, 'test.jpg')
self.thumbnailer = files.get_thumbnailer(self.storage, filename)
self.thumbnailer.thumbnail_storage = self.storage
filename = self.create_image(self.remote_storage, 'test.jpg')
self.remote_thumbnailer = files.get_thumbnailer(self.remote_storage,
filename)
self.remote_thumbnailer.thumbnail_storage = self.remote_storage
# Create another thumbnailer for extension test.
self.ext_thumbnailer = files.get_thumbnailer(self.storage, filename)
self.ext_thumbnailer.thumbnail_storage = self.storage
# Generate test transparent images.
filename = self.create_image(self.storage, 'transparent.png',
image_mode='RGBA', image_format='PNG')
self.transparent_thumbnailer = files.get_thumbnailer(self.storage,
filename)
self.transparent_thumbnailer.thumbnail_storage = self.storage
filename = self.create_image(self.storage, 'transparent-greyscale.png',
image_mode='LA', image_format='PNG')
self.transparent_greyscale_thumbnailer = files.get_thumbnailer(
self.storage, filename)
self.transparent_greyscale_thumbnailer.thumbnail_storage = self.storage
def tearDown(self):
self.storage.delete_temporary_storage()
self.remote_storage.delete_temporary_storage()
super(FilesTest, self).tearDown()
def test_tag(self):
local = self.thumbnailer.get_thumbnail({'size': (100, 100)})
remote = self.remote_thumbnailer.get_thumbnail({'size': (100, 100)})
self.assertEqual(local.tag(), '<img alt="" height="75" '
'src="%s" width="100" />' % local.url)
self.assertEqual(local.tag(alt='A & B'), '<img alt="A & B" '
'height="75" src="%s" width="100" />' % local.url)
# Can turn off dimensions.
self.assertEqual(remote.tag(use_size=False), '<img alt="" '
'src="%s" />' % remote.url)
# Thumbnails on remote storage don't get dimensions...
self.assertEqual(remote.tag(), '<img alt="" '
'src="%s" />' % remote.url)
# ...unless explicitly requested.
self.assertEqual(remote.tag(use_size=True), '<img alt="" height="75" '
'src="%s" width="100" />' % remote.url)
# All other arguments are passed through as attributes.
self.assertEqual(local.tag(**{'rel': 'A&B', 'class': 'fish'}),
'<img alt="" class="fish" height="75" rel="A&B" '
'src="%s" width="100" />' % local.url)
def test_transparent_thumbnailing(self):
thumb_file = self.thumbnailer.get_thumbnail(
{'size': (100, 100)})
thumb_file.seek(0)
thumb = Image.open(thumb_file)
self.assertFalse(utils.is_transparent(thumb),
"%s shouldn't be transparent." % thumb_file.name)
thumb_file = self.transparent_thumbnailer.get_thumbnail(
{'size': (100, 100)})
thumb_file.seek(0)
thumb = Image.open(thumb_file)
self.assertTrue(utils.is_transparent(thumb),
"%s should be transparent." % thumb_file.name)
thumb_file = self.transparent_greyscale_thumbnailer.get_thumbnail(
{'size': (100, 100)})
thumb_file.seek(0)
thumb = Image.open(thumb_file)
self.assertTrue(utils.is_transparent(thumb),
"%s should be transparent." % thumb_file.name)
def test_extensions(self):
self.ext_thumbnailer.thumbnail_extension = 'png'
thumb = self.ext_thumbnailer.get_thumbnail({'size': (100, 100)})
self.assertEqual(path.splitext(thumb.name)[1], '.png')
self.ext_thumbnailer.thumbnail_preserve_extensions = ('foo',)
thumb = self.ext_thumbnailer.get_thumbnail({'size': (100, 100)})
self.assertEqual(path.splitext(thumb.name)[1], '.png')
self.ext_thumbnailer.thumbnail_preserve_extensions = True
thumb = self.ext_thumbnailer.get_thumbnail({'size': (100, 100)})
self.assertEqual(path.splitext(thumb.name)[1], '.jpg')
self.ext_thumbnailer.thumbnail_preserve_extensions = ('foo', 'jpg')
thumb = self.ext_thumbnailer.get_thumbnail({'size': (100, 100)})
self.assertEqual(path.splitext(thumb.name)[1], '.jpg')
def test_USE_TZ(self):
settings.USE_TZ = True
self.thumbnailer.get_thumbnail({'size': (10, 20)})
settings.USE_TZ = False
self.thumbnailer.get_thumbnail({'size': (20, 40)})
def test_thumbnailfile_options(self):
opts = {'size': (50, 50), 'crop': True, 'upscale': True}
thumb = self.thumbnailer.get_thumbnail(opts)
self.assertEqual(thumb.thumbnail_options, opts)
def test_default_options_setting(self):
settings.THUMBNAIL_DEFAULT_OPTIONS = {'crop': True}
opts = {'size': (50, 50)}
thumb = self.thumbnailer.get_thumbnail(opts)
self.assertEqual((thumb.width, thumb.height), (50, 50))
def test_thumbnail_created_signal(self):
def signal_handler(sender, *args, **kwargs):
sender.signal_received = True
signals.thumbnail_created.connect(signal_handler)
try:
thumb = self.thumbnailer.get_thumbnail({'size': (10, 20)})
self.assertTrue(hasattr(thumb, 'signal_received'))
finally:
signals.thumbnail_created.disconnect(signal_handler)
|
bsd-3-clause
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/roles/lib_openshift_3.2/library/oc_version.py
|
2
|
33688
|
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
else:
return False
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
def get(self):
'''get and return version information '''
results = {}
results["installed"] = OCVersion.openshift_installed()
if not results["installed"]:
return results
version_results = self.openshift_cmd(['version'], output=True, output_type='raw')
if version_results['returncode'] == 0:
filtered_vers = OCVersion.filter_versions(version_results['results'])
custom_vers = OCVersion.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
# pylint: disable=too-many-branches
def main():
'''
ansible oc module for version
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
version=dict(default=True, type='bool'),
state=dict(default='list', type='str',
choices=['list']),
debug=dict(default=False, type='bool'),
),
)
oc_version = OCVersion(module.params['kubeconfig'],
module.params['debug'])
state = module.params['state']
if state == 'list':
#pylint: disable=protected-access
results = oc_version.get()
module.exit_json(changed=False, results=results)
if __name__ == '__main__':
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.