repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
grantsewell/nzbToMedia | libs/unidecode/x082.py | 252 | 4649 | data = (
'Yao ', # 0x00
'Yu ', # 0x01
'Chong ', # 0x02
'Xi ', # 0x03
'Xi ', # 0x04
'Jiu ', # 0x05
'Yu ', # 0x06
'Yu ', # 0x07
'Xing ', # 0x08
'Ju ', # 0x09
'Jiu ', # 0x0a
'Xin ', # 0x0b
'She ', # 0x0c
'She ', # 0x0d
'Yadoru ', # 0x0e
'Jiu ', # 0x0f
'Shi ', # 0x10
'Tan ', # 0x11
'Shu ', # 0x12
'Shi ', # 0x13
'Tian ', # 0x14
'Dan ', # 0x15
'Pu ', # 0x16
'Pu ', # 0x17
'Guan ', # 0x18
'Hua ', # 0x19
'Tan ', # 0x1a
'Chuan ', # 0x1b
'Shun ', # 0x1c
'Xia ', # 0x1d
'Wu ', # 0x1e
'Zhou ', # 0x1f
'Dao ', # 0x20
'Gang ', # 0x21
'Shan ', # 0x22
'Yi ', # 0x23
'[?] ', # 0x24
'Pa ', # 0x25
'Tai ', # 0x26
'Fan ', # 0x27
'Ban ', # 0x28
'Chuan ', # 0x29
'Hang ', # 0x2a
'Fang ', # 0x2b
'Ban ', # 0x2c
'Que ', # 0x2d
'Hesaki ', # 0x2e
'Zhong ', # 0x2f
'Jian ', # 0x30
'Cang ', # 0x31
'Ling ', # 0x32
'Zhu ', # 0x33
'Ze ', # 0x34
'Duo ', # 0x35
'Bo ', # 0x36
'Xian ', # 0x37
'Ge ', # 0x38
'Chuan ', # 0x39
'Jia ', # 0x3a
'Lu ', # 0x3b
'Hong ', # 0x3c
'Pang ', # 0x3d
'Xi ', # 0x3e
'[?] ', # 0x3f
'Fu ', # 0x40
'Zao ', # 0x41
'Feng ', # 0x42
'Li ', # 0x43
'Shao ', # 0x44
'Yu ', # 0x45
'Lang ', # 0x46
'Ting ', # 0x47
'[?] ', # 0x48
'Wei ', # 0x49
'Bo ', # 0x4a
'Meng ', # 0x4b
'Nian ', # 0x4c
'Ju ', # 0x4d
'Huang ', # 0x4e
'Shou ', # 0x4f
'Zong ', # 0x50
'Bian ', # 0x51
'Mao ', # 0x52
'Die ', # 0x53
'[?] ', # 0x54
'Bang ', # 0x55
'Cha ', # 0x56
'Yi ', # 0x57
'Sao ', # 0x58
'Cang ', # 0x59
'Cao ', # 0x5a
'Lou ', # 0x5b
'Dai ', # 0x5c
'Sori ', # 0x5d
'Yao ', # 0x5e
'Tong ', # 0x5f
'Yofune ', # 0x60
'Dang ', # 0x61
'Tan ', # 0x62
'Lu ', # 0x63
'Yi ', # 0x64
'Jie ', # 0x65
'Jian ', # 0x66
'Huo ', # 0x67
'Meng ', # 0x68
'Qi ', # 0x69
'Lu ', # 0x6a
'Lu ', # 0x6b
'Chan ', # 0x6c
'Shuang ', # 0x6d
'Gen ', # 0x6e
'Liang ', # 0x6f
'Jian ', # 0x70
'Jian ', # 0x71
'Se ', # 0x72
'Yan ', # 0x73
'Fu ', # 0x74
'Ping ', # 0x75
'Yan ', # 0x76
'Yan ', # 0x77
'Cao ', # 0x78
'Cao ', # 0x79
'Yi ', # 0x7a
'Le ', # 0x7b
'Ting ', # 0x7c
'Qiu ', # 0x7d
'Ai ', # 0x7e
'Nai ', # 0x7f
'Tiao ', # 0x80
'Jiao ', # 0x81
'Jie ', # 0x82
'Peng ', # 0x83
'Wan ', # 0x84
'Yi ', # 0x85
'Chai ', # 0x86
'Mian ', # 0x87
'Mie ', # 0x88
'Gan ', # 0x89
'Qian ', # 0x8a
'Yu ', # 0x8b
'Yu ', # 0x8c
'Shuo ', # 0x8d
'Qiong ', # 0x8e
'Tu ', # 0x8f
'Xia ', # 0x90
'Qi ', # 0x91
'Mang ', # 0x92
'Zi ', # 0x93
'Hui ', # 0x94
'Sui ', # 0x95
'Zhi ', # 0x96
'Xiang ', # 0x97
'Bi ', # 0x98
'Fu ', # 0x99
'Tun ', # 0x9a
'Wei ', # 0x9b
'Wu ', # 0x9c
'Zhi ', # 0x9d
'Qi ', # 0x9e
'Shan ', # 0x9f
'Wen ', # 0xa0
'Qian ', # 0xa1
'Ren ', # 0xa2
'Fou ', # 0xa3
'Kou ', # 0xa4
'Jie ', # 0xa5
'Lu ', # 0xa6
'Xu ', # 0xa7
'Ji ', # 0xa8
'Qin ', # 0xa9
'Qi ', # 0xaa
'Yuan ', # 0xab
'Fen ', # 0xac
'Ba ', # 0xad
'Rui ', # 0xae
'Xin ', # 0xaf
'Ji ', # 0xb0
'Hua ', # 0xb1
'Hua ', # 0xb2
'Fang ', # 0xb3
'Wu ', # 0xb4
'Jue ', # 0xb5
'Gou ', # 0xb6
'Zhi ', # 0xb7
'Yun ', # 0xb8
'Qin ', # 0xb9
'Ao ', # 0xba
'Chu ', # 0xbb
'Mao ', # 0xbc
'Ya ', # 0xbd
'Fei ', # 0xbe
'Reng ', # 0xbf
'Hang ', # 0xc0
'Cong ', # 0xc1
'Yin ', # 0xc2
'You ', # 0xc3
'Bian ', # 0xc4
'Yi ', # 0xc5
'Susa ', # 0xc6
'Wei ', # 0xc7
'Li ', # 0xc8
'Pi ', # 0xc9
'E ', # 0xca
'Xian ', # 0xcb
'Chang ', # 0xcc
'Cang ', # 0xcd
'Meng ', # 0xce
'Su ', # 0xcf
'Yi ', # 0xd0
'Yuan ', # 0xd1
'Ran ', # 0xd2
'Ling ', # 0xd3
'Tai ', # 0xd4
'Tiao ', # 0xd5
'Di ', # 0xd6
'Miao ', # 0xd7
'Qiong ', # 0xd8
'Li ', # 0xd9
'Yong ', # 0xda
'Ke ', # 0xdb
'Mu ', # 0xdc
'Pei ', # 0xdd
'Bao ', # 0xde
'Gou ', # 0xdf
'Min ', # 0xe0
'Yi ', # 0xe1
'Yi ', # 0xe2
'Ju ', # 0xe3
'Pi ', # 0xe4
'Ruo ', # 0xe5
'Ku ', # 0xe6
'Zhu ', # 0xe7
'Ni ', # 0xe8
'Bo ', # 0xe9
'Bing ', # 0xea
'Shan ', # 0xeb
'Qiu ', # 0xec
'Yao ', # 0xed
'Xian ', # 0xee
'Ben ', # 0xef
'Hong ', # 0xf0
'Ying ', # 0xf1
'Zha ', # 0xf2
'Dong ', # 0xf3
'Ju ', # 0xf4
'Die ', # 0xf5
'Nie ', # 0xf6
'Gan ', # 0xf7
'Hu ', # 0xf8
'Ping ', # 0xf9
'Mei ', # 0xfa
'Fu ', # 0xfb
'Sheng ', # 0xfc
'Gu ', # 0xfd
'Bi ', # 0xfe
'Wei ', # 0xff
)
| gpl-3.0 | -1,827,297,418,165,924,900 | 17.01938 | 20 | 0.390191 | false |
s0enke/boto | boto/exception.py | 117 | 17106 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
import boto
from boto import handler
from boto.compat import json, StandardError
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.message = ''
self.box_usage = None
if isinstance(self.body, bytes):
try:
self.body = self.body.decode('utf-8')
except UnicodeDecodeError:
boto.log.debug('Unable to decode body from bytes!')
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
# Check if it looks like a ``dict``.
if hasattr(self.body, 'items'):
# It's not a string, so trying to parse it will fail.
# But since it's data, we can work with that.
self.request_id = self.body.get('RequestId', None)
if 'Error' in self.body:
# XML-style
error = self.body.get('Error', {})
self.error_code = error.get('Code', None)
self.message = error.get('Message', None)
else:
# JSON-style.
self.message = self.body.get('message', None)
else:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException):
# What if it's JSON? Let's try that.
try:
parsed = json.loads(self.body)
if 'RequestId' in parsed:
self.request_id = parsed['RequestId']
if 'Error' in parsed:
if 'Code' in parsed['Error']:
self.error_code = parsed['Error']['Code']
if 'Message' in parsed['Error']:
self.message = parsed['Error']['Message']
except (TypeError, ValueError):
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(SQSError, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return super(SQSError, self).endElement(name, value, connection)
def _cleanupParsedProperties(self):
super(SQSError, self)._cleanupParsedProperties()
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
super(SQSDecodeError, self).__init__(reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
super(StorageResponseError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(StorageResponseError, self).startElement(
name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return super(StorageResponseError, self).endElement(
name, value, connection)
def _cleanupParsedProperties(self):
super(StorageResponseError, self)._cleanupParsedProperties()
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
super(EC2ResponseError, self).__init__(status, reason, body)
self.errors = [
(e.error_code, e.error_message) for e in self._errorResultSet]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
super(EC2ResponseError, self)._cleanupParsedProperties()
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
super(InvalidAclError, self).__init__(message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
super(InvalidCorsError, self).__init__(message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
super(InvalidLifecycleConfigError, self).__init__(message)
self.message = message
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableUploadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableDownloadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
super(TooManyRecordsException, self).__init__(message)
self.message = message
class PleaseRetryException(Exception):
"""
Indicates a request should be retried.
"""
def __init__(self, message, response=None):
self.message = message
self.response = response
def __repr__(self):
return 'PleaseRetryException("%s", %s)' % (
self.message,
self.response
)
| mit | -6,589,614,208,402,237,000 | 28.853403 | 87 | 0.614404 | false |
j4s0nh4ck/you-get | src/you_get/extractors/ehow.py | 20 | 1099 | #!/usr/bin/env python
__all__ = ['ehow_download']
from ..common import *
def ehow_download(url, output_dir = '.', merge = True, info_only = False):
assert re.search(r'http://www.ehow.com/video_', url), "URL you entered is not supported"
html = get_html(url)
contentid = r1(r'<meta name="contentid" scheme="DMINSTR2" content="([^"]+)" />', html)
vid = r1(r'"demand_ehow_videoid":"([^"]+)"', html)
assert vid
xml = get_html('http://www.ehow.com/services/video/series.xml?demand_ehow_videoid=%s' % vid)
from xml.dom.minidom import parseString
doc = parseString(xml)
tab = doc.getElementsByTagName('related')[0].firstChild
for video in tab.childNodes:
if re.search(contentid, video.attributes['link'].value):
url = video.attributes['flv'].value
break
title = video.attributes['title'].value
assert title
type, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
site_info = "ehow.com"
download = ehow_download
download_playlist = playlist_not_supported('ehow') | mit | -7,793,249,044,918,545,000 | 27.947368 | 93 | 0.680619 | false |
Yelp/paasta | setup.py | 1 | 5208 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
from pkg_resources import yield_lines
from setuptools import find_packages
from setuptools import setup
from paasta_tools import __version__
def get_install_requires():
with open("requirements-minimal.txt", "r") as f:
minimal_reqs = list(yield_lines(f.read()))
return minimal_reqs
setup(
name="paasta-tools",
version=__version__,
provides=["paasta_tools"],
author="Compute Infrastructure @ Yelp",
author_email="[email protected]",
description="Tools for Yelps SOA infrastructure",
packages=find_packages(exclude=("tests*", "scripts*")),
include_package_data=True,
install_requires=get_install_requires(),
scripts=[
"paasta_tools/am_i_mesos_leader.py",
"paasta_tools/apply_external_resources.py",
"paasta_tools/autoscale_all_services.py",
"paasta_tools/check_flink_services_health.py",
"paasta_tools/check_cassandracluster_services_replication.py",
"paasta_tools/check_marathon_services_replication.py",
"paasta_tools/check_kubernetes_api.py",
"paasta_tools/check_kubernetes_services_replication.py",
"paasta_tools/check_oom_events.py",
"paasta_tools/check_spark_jobs.py",
"paasta_tools/cleanup_marathon_jobs.py",
"paasta_tools/cleanup_kubernetes_cr.py",
"paasta_tools/cleanup_kubernetes_crd.py",
"paasta_tools/cleanup_kubernetes_jobs.py",
"paasta_tools/delete_kubernetes_deployments.py",
"paasta_tools/deploy_marathon_services",
"paasta_tools/paasta_deploy_tron_jobs",
"paasta_tools/generate_all_deployments",
"paasta_tools/generate_deployments_for_service.py",
"paasta_tools/generate_services_file.py",
"paasta_tools/generate_services_yaml.py",
"paasta_tools/get_mesos_leader.py",
"paasta_tools/kubernetes/bin/paasta_secrets_sync.py",
"paasta_tools/kubernetes/bin/paasta_cleanup_stale_nodes.py",
"paasta_tools/kubernetes/bin/kubernetes_remove_evicted_pods.py",
"paasta_tools/list_marathon_service_instances.py",
"paasta_tools/log_task_lifecycle_events.py",
"paasta_tools/marathon_dashboard.py",
"paasta_tools/monitoring/check_capacity.py",
"paasta_tools/monitoring/check_marathon_has_apps.py",
"paasta_tools/monitoring/check_mesos_active_frameworks.py",
"paasta_tools/monitoring/check_mesos_duplicate_frameworks.py",
"paasta_tools/monitoring/check_mesos_quorum.py",
"paasta_tools/monitoring/check_mesos_outdated_tasks.py",
"paasta_tools/monitoring/kill_orphaned_docker_containers.py",
"paasta_tools/cli/paasta_tabcomplete.sh",
"paasta_tools/paasta_cluster_boost.py",
"paasta_tools/paasta_execute_docker_command.py",
"paasta_tools/paasta_maintenance.py",
"paasta_tools/paasta_metastatus.py",
"paasta_tools/paasta_remote_run.py",
"paasta_tools/setup_kubernetes_job.py",
"paasta_tools/setup_kubernetes_crd.py",
"paasta_tools/setup_kubernetes_cr.py",
"paasta_tools/setup_marathon_job.py",
"paasta_tools/setup_prometheus_adapter_config.py",
"paasta_tools/synapse_srv_namespaces_fact.py",
]
+ glob.glob("paasta_tools/contrib/*.sh")
+ glob.glob("paasta_tools/contrib/[!_]*.py"),
entry_points={
"console_scripts": [
"paasta=paasta_tools.cli.cli:main",
"paasta-api=paasta_tools.api.api:main",
"paasta-deployd=paasta_tools.deployd.master:main",
"paasta-fsm=paasta_tools.cli.fsm_cmd:main",
"paasta_cleanup_tron_namespaces=paasta_tools.cleanup_tron_namespaces:main",
"paasta_list_kubernetes_service_instances=paasta_tools.list_kubernetes_service_instances:main",
"paasta_list_tron_namespaces=paasta_tools.list_tron_namespaces:main",
"paasta_setup_tron_namespace=paasta_tools.setup_tron_namespace:main",
"paasta_cleanup_maintenance=paasta_tools.cleanup_maintenance:main",
"paasta_docker_wrapper=paasta_tools.docker_wrapper:main",
"paasta_firewall_update=paasta_tools.firewall_update:main",
"paasta_firewall_logging=paasta_tools.firewall_logging:main",
"paasta_oom_logger=paasta_tools.oom_logger:main",
"paasta_broadcast_log=paasta_tools.broadcast_log_to_services:main",
"paasta_dump_locally_running_services=paasta_tools.dump_locally_running_services:main",
],
"paste.app_factory": ["paasta-api-config=paasta_tools.api.api:make_app"],
},
)
| apache-2.0 | -3,272,263,591,176,207,400 | 45.5 | 107 | 0.682796 | false |
ahmed-mahran/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/formatters/terminal.py | 75 | 4012 | # -*- coding: utf-8 -*-
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('lightgray', 'darkgray'),
Comment: ('lightgray', 'darkgray'),
Comment.Preproc: ('teal', 'turquoise'),
Keyword: ('darkblue', 'blue'),
Keyword.Type: ('teal', 'turquoise'),
Operator.Word: ('purple', 'fuchsia'),
Name.Builtin: ('teal', 'turquoise'),
Name.Function: ('darkgreen', 'green'),
Name.Namespace: ('_teal_', '_turquoise_'),
Name.Class: ('_darkgreen_', '_green_'),
Name.Exception: ('teal', 'turquoise'),
Name.Decorator: ('darkgray', 'lightgray'),
Name.Variable: ('darkred', 'red'),
Name.Constant: ('darkred', 'red'),
Name.Attribute: ('teal', 'turquoise'),
Name.Tag: ('blue', 'blue'),
String: ('brown', 'brown'),
Number: ('darkblue', 'blue'),
Generic.Deleted: ('red', 'red'),
Generic.Inserted: ('darkgreen', 'green'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*purple*', '*fuchsia*'),
Generic.Error: ('red', 'red'),
Error: ('_red_', '_red_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty():
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
| apache-2.0 | -2,827,566,433,296,070,000 | 35.807339 | 79 | 0.54337 | false |
nrsimha/wagtail | wagtail/wagtailimages/rich_text.py | 9 | 1466 | from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
except Image.DoesNotExist:
return "<img>"
image_format = get_image_format(attrs['format'])
if for_editor:
return image_format.image_to_editor_html(image, attrs['alt'])
else:
return image_format.image_to_html(image, attrs['alt'])
| bsd-3-clause | 345,756,464,383,440,830 | 33.904762 | 86 | 0.615962 | false |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/fpformat.py | 322 | 4699 | """General floating point formatting functions.
Functions:
fix(x, digits_behind)
sci(x, digits_behind)
Each takes a number or a string and a number of digits as arguments.
Parameters:
x: number to be formatted; or a string resembling a number
digits_behind: number of digits behind the decimal point
"""
from warnings import warnpy3k
warnpy3k("the fpformat module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import re
__all__ = ["fix","sci","NotANumber"]
# Compiled regular expression to "decode" a number
decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
# \0 the whole thing
# \1 leading sign or empty
# \2 digits left of decimal point
# \3 fraction (empty or begins with point)
# \4 exponent part (empty or begins with 'e' or 'E')
try:
class NotANumber(ValueError):
pass
except TypeError:
NotANumber = 'fpformat.NotANumber'
def extract(s):
"""Return (sign, intpart, fraction, expo) or raise an exception:
sign is '+' or '-'
intpart is 0 or more digits beginning with a nonzero
fraction is 0 or more digits
expo is an integer"""
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
def unexpo(intpart, fraction, expo):
"""Remove the exponent by changing intpart and fraction."""
if expo > 0: # Move the point left
f = len(fraction)
intpart, fraction = intpart + fraction[:expo], fraction[expo:]
if expo > f:
intpart = intpart + '0'*(expo-f)
elif expo < 0: # Move the point right
i = len(intpart)
intpart, fraction = intpart[:expo], intpart[expo:] + fraction
if expo < -i:
fraction = '0'*(-expo-i) + fraction
return intpart, fraction
def roundfrac(intpart, fraction, digs):
"""Round or extend the fraction to size digs."""
f = len(fraction)
if f <= digs:
return intpart, fraction + '0'*(digs-f)
i = len(intpart)
if i+digs < 0:
return '0'*-digs, ''
total = intpart + fraction
nextdigit = total[i+digs]
if nextdigit >= '5': # Hard case: increment last digit, may have carry!
n = i + digs - 1
while n >= 0:
if total[n] != '9': break
n = n-1
else:
total = '0' + total
i = i+1
n = 0
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
intpart, fraction = total[:i], total[i:]
if digs >= 0:
return intpart, fraction[:digs]
else:
return intpart[:digs] + '0'*-digs, ''
def fix(x, digs):
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = repr(x)
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
def sci(x, digs):
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = repr(x)
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = repr(abs(expo))
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
def test():
"""Interactive test run."""
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass
| gpl-2.0 | 3,394,050,482,289,884,700 | 31.406897 | 76 | 0.57608 | false |
gautamMalu/rootfs_xen_arndale | usr/lib/python3/dist-packages/dbus/mainloop/glib.py | 10 | 1773 | # Copyright (C) 2004 Anders Carlsson
# Copyright (C) 2004-2006 Red Hat Inc. <http://www.redhat.com/>
# Copyright (C) 2005-2006 Collabora Ltd. <http://www.collabora.co.uk/>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""GLib main loop integration using libdbus-glib."""
__all__ = ('DBusGMainLoop', 'threads_init')
from _dbus_glib_bindings import DBusGMainLoop, gthreads_init
_dbus_gthreads_initialized = False
def threads_init():
"""Initialize threads in dbus-glib, if this has not already been done.
This must be called before creating a second thread in a program that
uses this module.
"""
global _dbus_gthreads_initialized
if not _dbus_gthreads_initialized:
gthreads_init()
_dbus_gthreads_initialized = True
| gpl-2.0 | 4,866,839,379,932,341,000 | 42.243902 | 74 | 0.748449 | false |
traveloka/ansible | lib/ansible/modules/cloud/rackspace/rax_keypair.py | 50 | 5128 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_keypair
short_description: Create a keypair for use with Rackspace Cloud Servers
description:
- Create a keypair for use with Rackspace Cloud Servers
version_added: 1.5
options:
name:
description:
- Name of keypair
required: true
public_key:
description:
- Public Key string to upload. Can be a file path or string
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: "Matt Martz (@sivel)"
notes:
- Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate.
- The ability to specify a file path for the public key was added in 1.7
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
region: DFW
register: keypair
- name: Create local public key
local_action:
module: copy
content: "{{ keypair.keypair.public_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
- name: Create local private key
local_action:
module: copy
content: "{{ keypair.keypair.private_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
region: DFW
register: keypair
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_keypair(module, name, public_key, state):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
keypair = {}
if state == 'present':
if public_key and os.path.isfile(public_key):
try:
f = open(public_key)
public_key = f.read()
f.close()
except Exception as e:
module.fail_json(msg='Failed to load %s' % public_key)
try:
keypair = cs.keypairs.find(name=name)
except cs.exceptions.NotFound:
try:
keypair = cs.keypairs.create(name, public_key)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
keypair = cs.keypairs.find(name=name)
except:
pass
if keypair:
try:
keypair.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
public_key=dict(),
state=dict(default='present', choices=['absent', 'present']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
public_key = module.params.get('public_key')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_keypair(module, name, public_key, state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
if __name__ == '__main__':
main()
| gpl-3.0 | -8,319,619,266,170,348,000 | 27.488889 | 75 | 0.614275 | false |
PetePriority/home-assistant | homeassistant/components/xiaomi_aqara/binary_sensor.py | 3 | 17451 | """Support for Xiaomi aqara binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.xiaomi_aqara import (PY_XIAOMI_GATEWAY,
XiaomiDevice)
from homeassistant.core import callback
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
NO_CLOSE = 'no_close'
ATTR_OPEN_SINCE = 'Open since'
MOTION = 'motion'
NO_MOTION = 'no_motion'
ATTR_LAST_ACTION = 'last_action'
ATTR_NO_MOTION_SINCE = 'No motion since'
DENSITY = 'density'
ATTR_DENSITY = 'Density'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Perform the setup for Xiaomi devices."""
devices = []
for (_, gateway) in hass.data[PY_XIAOMI_GATEWAY].gateways.items():
for device in gateway.devices['binary_sensor']:
model = device['model']
if model in ['motion', 'sensor_motion', 'sensor_motion.aq2']:
devices.append(XiaomiMotionSensor(device, hass, gateway))
elif model in ['magnet', 'sensor_magnet', 'sensor_magnet.aq2']:
devices.append(XiaomiDoorSensor(device, gateway))
elif model == 'sensor_wleak.aq1':
devices.append(XiaomiWaterLeakSensor(device, gateway))
elif model in ['smoke', 'sensor_smoke']:
devices.append(XiaomiSmokeSensor(device, gateway))
elif model in ['natgas', 'sensor_natgas']:
devices.append(XiaomiNatgasSensor(device, gateway))
elif model in ['switch', 'sensor_switch',
'sensor_switch.aq2', 'sensor_switch.aq3',
'remote.b1acn01']:
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'button_0'
devices.append(XiaomiButton(device, 'Switch', data_key,
hass, gateway))
elif model in ['86sw1', 'sensor_86sw1', 'sensor_86sw1.aq1',
'remote.b186acn01']:
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'channel_0'
else:
data_key = 'button_0'
devices.append(XiaomiButton(device, 'Wall Switch', data_key,
hass, gateway))
elif model in ['86sw2', 'sensor_86sw2', 'sensor_86sw2.aq1',
'remote.b286acn01']:
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key_left = 'channel_0'
data_key_right = 'channel_1'
else:
data_key_left = 'button_0'
data_key_right = 'button_1'
devices.append(XiaomiButton(device, 'Wall Switch (Left)',
data_key_left, hass, gateway))
devices.append(XiaomiButton(device, 'Wall Switch (Right)',
data_key_right, hass, gateway))
devices.append(XiaomiButton(device, 'Wall Switch (Both)',
'dual_channel', hass, gateway))
elif model in ['cube', 'sensor_cube', 'sensor_cube.aqgl01']:
devices.append(XiaomiCube(device, hass, gateway))
elif model in ['vibration', 'vibration.aq1']:
devices.append(XiaomiVibration(device, 'Vibration',
'status', gateway))
else:
_LOGGER.warning('Unmapped Device Model %s', model)
add_entities(devices)
class XiaomiBinarySensor(XiaomiDevice, BinarySensorDevice):
"""Representation of a base XiaomiBinarySensor."""
def __init__(self, device, name, xiaomi_hub, data_key, device_class):
"""Initialize the XiaomiSmokeSensor."""
self._data_key = data_key
self._device_class = device_class
self._should_poll = False
self._density = 0
XiaomiDevice.__init__(self, device, name, xiaomi_hub)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return self._should_poll
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of binary sensor."""
return self._device_class
def update(self):
"""Update the sensor state."""
_LOGGER.debug('Updating xiaomi sensor (%s) by polling', self._sid)
self._get_from_hub(self._sid)
class XiaomiNatgasSensor(XiaomiBinarySensor):
"""Representation of a XiaomiNatgasSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiSmokeSensor."""
self._density = None
XiaomiBinarySensor.__init__(self, device, 'Natgas Sensor', xiaomi_hub,
'alarm', 'gas')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value in ('1', '2'):
if self._state:
return False
self._state = True
return True
if value == '0':
if self._state:
self._state = False
return True
return False
class XiaomiMotionSensor(XiaomiBinarySensor):
"""Representation of a XiaomiMotionSensor."""
def __init__(self, device, hass, xiaomi_hub):
"""Initialize the XiaomiMotionSensor."""
self._hass = hass
self._no_motion_since = 0
self._unsub_set_no_motion = None
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'motion_status'
XiaomiBinarySensor.__init__(self, device, 'Motion Sensor', xiaomi_hub,
data_key, 'motion')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_NO_MOTION_SINCE: self._no_motion_since}
attrs.update(super().device_state_attributes)
return attrs
@callback
def _async_set_no_motion(self, now):
"""Set state to False."""
self._unsub_set_no_motion = None
self._state = False
self.async_schedule_update_ha_state()
def parse_data(self, data, raw_data):
"""Parse data sent by gateway.
Polling (proto v1, firmware version 1.4.1_159.0143)
>> { "cmd":"read","sid":"158..."}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'read_ack', 'data': '{"voltage":3005}'}
Multicast messages (proto v1, firmware version 1.4.1_159.0143)
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"status":"motion"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"120"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"180"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"300"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'heartbeat', 'data': '{"voltage":3005}'}
"""
if raw_data['cmd'] == 'heartbeat':
_LOGGER.debug(
'Skipping heartbeat of the motion sensor. '
'It can introduce an incorrect state because of a firmware '
'bug (https://github.com/home-assistant/home-assistant/pull/'
'11631#issuecomment-357507744).')
return
if NO_MOTION in data:
self._no_motion_since = data[NO_MOTION]
self._state = False
return True
value = data.get(self._data_key)
if value is None:
return False
if value == MOTION:
if self._data_key == 'motion_status':
if self._unsub_set_no_motion:
self._unsub_set_no_motion()
self._unsub_set_no_motion = async_call_later(
self._hass,
120,
self._async_set_no_motion
)
if self.entity_id is not None:
self._hass.bus.fire('xiaomi_aqara.motion', {
'entity_id': self.entity_id
})
self._no_motion_since = 0
if self._state:
return False
self._state = True
return True
class XiaomiDoorSensor(XiaomiBinarySensor):
"""Representation of a XiaomiDoorSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiDoorSensor."""
self._open_since = 0
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'window_status'
XiaomiBinarySensor.__init__(self, device, 'Door Window Sensor',
xiaomi_hub, data_key, 'opening')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_OPEN_SINCE: self._open_since}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
self._should_poll = False
if NO_CLOSE in data: # handle push from the hub
self._open_since = data[NO_CLOSE]
return True
value = data.get(self._data_key)
if value is None:
return False
if value == 'open':
self._should_poll = True
if self._state:
return False
self._state = True
return True
if value == 'close':
self._open_since = 0
if self._state:
self._state = False
return True
return False
class XiaomiWaterLeakSensor(XiaomiBinarySensor):
"""Representation of a XiaomiWaterLeakSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiWaterLeakSensor."""
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'wleak_status'
XiaomiBinarySensor.__init__(self, device, 'Water Leak Sensor',
xiaomi_hub, data_key, 'moisture')
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
self._should_poll = False
value = data.get(self._data_key)
if value is None:
return False
if value == 'leak':
self._should_poll = True
if self._state:
return False
self._state = True
return True
if value == 'no_leak':
if self._state:
self._state = False
return True
return False
class XiaomiSmokeSensor(XiaomiBinarySensor):
"""Representation of a XiaomiSmokeSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiSmokeSensor."""
self._density = 0
XiaomiBinarySensor.__init__(self, device, 'Smoke Sensor', xiaomi_hub,
'alarm', 'smoke')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value in ('1', '2'):
if self._state:
return False
self._state = True
return True
if value == '0':
if self._state:
self._state = False
return True
return False
class XiaomiVibration(XiaomiBinarySensor):
"""Representation of a Xiaomi Vibration Sensor."""
def __init__(self, device, name, data_key, xiaomi_hub):
"""Initialize the XiaomiVibration."""
self._last_action = None
super().__init__(device, name, xiaomi_hub, data_key, None)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value not in ('vibrate', 'tilt', 'free_fall'):
_LOGGER.warning("Unsupported movement_type detected: %s",
value)
return False
self.hass.bus.fire('xiaomi_aqara.movement', {
'entity_id': self.entity_id,
'movement_type': value
})
self._last_action = value
return True
class XiaomiButton(XiaomiBinarySensor):
"""Representation of a Xiaomi Button."""
def __init__(self, device, name, data_key, hass, xiaomi_hub):
"""Initialize the XiaomiButton."""
self._hass = hass
self._last_action = None
XiaomiBinarySensor.__init__(self, device, name, xiaomi_hub,
data_key, None)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value == 'long_click_press':
self._state = True
click_type = 'long_click_press'
elif value == 'long_click_release':
self._state = False
click_type = 'hold'
elif value == 'click':
click_type = 'single'
elif value == 'double_click':
click_type = 'double'
elif value == 'both_click':
click_type = 'both'
elif value == 'double_both_click':
click_type = 'double_both'
elif value == 'shake':
click_type = 'shake'
elif value == 'long_click':
click_type = 'long'
elif value == 'long_both_click':
click_type = 'long_both'
else:
_LOGGER.warning("Unsupported click_type detected: %s", value)
return False
self._hass.bus.fire('xiaomi_aqara.click', {
'entity_id': self.entity_id,
'click_type': click_type
})
self._last_action = click_type
return True
class XiaomiCube(XiaomiBinarySensor):
"""Representation of a Xiaomi Cube."""
def __init__(self, device, hass, xiaomi_hub):
"""Initialize the Xiaomi Cube."""
self._hass = hass
self._last_action = None
self._state = False
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'cube_status'
XiaomiBinarySensor.__init__(self, device, 'Cube', xiaomi_hub,
data_key, None)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if self._data_key in data:
self._hass.bus.fire('xiaomi_aqara.cube_action', {
'entity_id': self.entity_id,
'action_type': data[self._data_key]
})
self._last_action = data[self._data_key]
if 'rotate' in data:
self._hass.bus.fire('xiaomi_aqara.cube_action', {
'entity_id': self.entity_id,
'action_type': 'rotate',
'action_value': float(data['rotate'].replace(",", "."))
})
self._last_action = 'rotate'
if 'rotate_degree' in data:
self._hass.bus.fire('xiaomi_aqara.cube_action', {
'entity_id': self.entity_id,
'action_type': 'rotate',
'action_value': float(data['rotate_degree'].replace(",", "."))
})
self._last_action = 'rotate'
return True
| apache-2.0 | -1,792,327,147,663,880,000 | 34.325911 | 78 | 0.528738 | false |
Lyleo/nupic | nupic/datafiles/extra/firstOrder/raw/makeDataset.py | 17 | 3488 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file import File
def createFirstOrderModel(numCategories=5, alpha=0.5):
categoryList = ['cat%02d' % i for i in range(numCategories)]
initProbability = numpy.ones(numCategories)/numCategories
transitionTable = numpy.random.dirichlet(alpha=[alpha]*numCategories,
size=numCategories)
return categoryList, initProbability, transitionTable
def generateFirstOrderData(model, numIterations=10000, seqLength=5,
resets=True, suffix='train'):
print "Creating %d iteration file with seqLength %d" % (numIterations, seqLength)
print "Filename",
categoryList, initProbability, transitionTable = model
initProbability = initProbability.cumsum()
transitionTable = transitionTable.cumsum(axis=1)
outputFile = 'fo_%d_%d_%s.csv' % (numIterations, seqLength, suffix)
print "Filename", outputFile
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
o = File(outputFile, fields)
seqIdx = 0
rand = numpy.random.rand()
catIdx = numpy.searchsorted(initProbability, rand)
for i in xrange(numIterations):
rand = numpy.random.rand()
if seqIdx == 0 and resets:
catIdx = numpy.searchsorted(initProbability, rand)
reset = 1
else:
catIdx = numpy.searchsorted(transitionTable[catIdx], rand)
reset = 0
o.write([reset,categoryList[catIdx]])
seqIdx = (seqIdx+1)%seqLength
o.close()
if __name__=='__main__':
numpy.random.seed(1956)
model = createFirstOrderModel()
categoryList = model[0]
categoryFile = open("categories.txt", 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
#import pylab
#pylab.imshow(model[2], interpolation='nearest')
#pylab.show()
for resets in [True, False]:
for seqLength in [2, 10]:
for numIterations in [1000, 10000, 100000]:
generateFirstOrderData(model,
numIterations=numIterations,
seqLength=seqLength,
resets=resets,
suffix='train_%s' % ('resets' if resets else 'noresets',))
generateFirstOrderData(model, numIterations=10000, seqLength=seqLength,
resets=resets,
suffix='test_%s' % ('resets' if resets else 'noresets',))
| gpl-3.0 | 4,705,039,321,854,961,000 | 32.538462 | 89 | 0.629874 | false |
kxliugang/edx-platform | common/lib/capa/capa/tests/test_hint_functionality.py | 41 | 34139 | # -*- coding: utf-8 -*-
"""
Tests of extended hints
"""
import unittest
from ddt import ddt, data, unpack
# With the use of ddt, some of the data expected_string cases below are naturally long stretches
# of text text without whitespace. I think it's best to leave such lines intact
# in the test code. Therefore:
# pylint: disable=line-too-long
# For out many ddt data cases, prefer a compact form of { .. }
# pylint: disable=bad-continuation
from . import new_loncapa_problem, load_fixture
class HintTest(unittest.TestCase):
"""Base class for tests of extended hinting functionality."""
def correctness(self, problem_id, choice):
"""Grades the problem and returns the 'correctness' string from cmap."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
return cmap[problem_id]['correctness']
def get_hint(self, problem_id, choice):
"""Grades the problem and returns its hint from cmap or the empty string."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
adict = cmap.cmap.get(problem_id)
if adict:
return adict['msg']
else:
return ''
# It is a little surprising how much more complicated TextInput is than all the other cases.
@ddt
class TextInputHintsTest(HintTest):
"""
Test Text Input Hints Test
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'Blue')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_2',
'trigger_type': 'single',
'hint_label': u'Correct',
'correctness': True,
'student_answer': [u'Blue'],
'question_type': 'stringresponse',
'hints': [{'text': 'The red light is scattered by water molecules leaving only blue light.'}]}
)
@data(
{'problem_id': u'1_2_1', u'choice': u'GermanyΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">I do not think so.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'franceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'FranceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'Mexico',
'expected_string': ''},
{'problem_id': u'1_2_1', u'choice': u'USAΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'usaΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'uSAxΩ',
'expected_string': u''},
{'problem_id': u'1_2_1', u'choice': u'NICKLANDΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">The country name does not end in LANDΩ</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'Blue',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">The red light is scattered by water molecules leaving only blue light.</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'blue',
'expected_string': u''},
{'problem_id': u'1_3_1', u'choice': u'b',
'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseInsensitive(HintTest):
"""Test Text Input Extended hints Case Insensitive"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_5_1', 'choice': 'abc', 'expected_string': ''}, # wrong answer yielding no hint
{'problem_id': u'1_5_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Woo Hoo: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'a', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Woo Hoo: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'b', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'c', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint4</div></div>'},
# regexp cases
{'problem_id': u'1_5_1', 'choice': 'FGGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'fgG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseSensitive(HintTest):
"""Sometimes the semantics can be encoded in the class name."""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_6_1', 'choice': 'abc', 'expected_string': ''},
{'problem_id': u'1_6_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'a', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'b', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'c', 'expected_string': u''},
# regexp cases
{'problem_id': u'1_6_1', 'choice': 'FGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'fgG', 'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
@ddt
class TextInputExtendedHintsCompatible(HintTest):
"""
Compatibility test with mixed old and new style additional_answer tags.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_7_1', 'choice': 'A', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'B', 'correct': 'correct', 'expected_string': ''},
{'problem_id': u'1_7_1', 'choice': 'C', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'D', 'correct': 'incorrect', 'expected_string': ''},
# check going through conversion with difficult chars
{'problem_id': u'1_7_1', 'choice': """<&"'>""", 'correct': 'correct', 'expected_string': ''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class TextInputExtendedHintsRegex(HintTest):
"""
Extended hints where the answer is regex mode.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_8_1', 'choice': 'ABwrong', 'correct': 'incorrect', 'expected_string': ''},
{'problem_id': u'1_8_1', 'choice': 'ABC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'ABBBBC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'aBc', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'BBBB', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'bbb', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'C', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'c', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'D', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'd', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class NumericInputHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the numeric input problem represented by the XML below.
"""
xml = load_fixture('extended_hints_numeric_input.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
def test_tracking_log(self):
self.get_hint(u'1_2_1', u'1.141')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'hint_label': u'Nice',
'correctness': True,
'student_answer': [u'1.141'],
'question_type': 'numericalresponse',
'hints': [{'text': 'The square root of two turns up in the strangest places.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': '1.141',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Nice: </div><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
{'problem_id': u'1_3_1', 'choice': '4',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Pretty easy, uh?.</div></div>'},
# should get hint, when correct via numeric-tolerance
{'problem_id': u'1_2_1', 'choice': '1.15',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Nice: </div><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
# when they answer wrong, nothing
{'problem_id': u'1_2_1', 'choice': '2', 'expected_string': ''},
)
@unpack
def test_numeric_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class CheckboxHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the checkbox problem represented by the XML below.
"""
xml = load_fixture('extended_hints_checkbox.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
@data(
{'problem_id': u'1_2_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">Mushroom is a fungus, not a fruit.</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_4'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">I do not know what a Camero is but it is not a fruit.</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Almost right: </div><div class="hint-text">You are right that apple is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">You are right that grape is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprouts are vegetables.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Very funny: </div><div class="hint-text">Making a banana split?</div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
# check for interaction between compoundhint and correct/incorrect
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">AB</div></div>'},
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">AC</div></div>'},
# check for labeling where multiple child hints have labels
# These are some tricky cases
{'problem_id': '1_5_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">AA: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_1'],
'expected_string': ''},
{'problem_id': '1_5_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">BB: </div><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-text">compoundo</div></div>'},
# The user selects *nothing*, but can still get "unselected" feedback
{'problem_id': '1_7_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
# 100% not match of sel/unsel feedback
{'problem_id': '1_7_1', 'choice': ['choice_1'],
'expected_string': ''},
# Here we have the correct combination, and that makes feedback too
{'problem_id': '1_7_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
)
@unpack
def test_checkbox_hints(self, problem_id, choice, expected_string):
self.maxDiff = None # pylint: disable=invalid-name
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class CheckboxHintsTestTracking(HintTest):
"""
Test the rather complicated tracking log output for checkbox cases.
"""
xml = """
<problem>
<p>question</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Apple
<choicehint selected="true">A true</choicehint>
<choicehint selected="false">A false</choicehint>
</choice>
<choice correct="false">Banana
</choice>
<choice correct="true">Cronut
<choicehint selected="true">C true</choicehint>
</choice>
<compoundhint value="A C">A C Compound</compoundhint>
</checkboxgroup>
</choiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test checkbox tracking log - by far the most complicated case"""
# A -> 1 hint
self.get_hint(u'1_2_1', [u'choice_0'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_0'],
'hints': [{'text': 'A true', 'trigger': [{'choice': 'choice_0', 'selected': True}]}],
'question_type': 'choiceresponse'}
)
# B C -> 2 hints
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_1', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_1', u'choice_2'],
'hints': [
{'text': 'A false', 'trigger': [{'choice': 'choice_0', 'selected': False}]},
{'text': 'C true', 'trigger': [{'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
# A C -> 1 Compound hint
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_0', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Correct',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': True,
'trigger_type': 'compound',
'student_answer': [u'choice_0', u'choice_2'],
'hints': [
{'text': 'A C Compound',
'trigger': [{'choice': 'choice_0', 'selected': True}, {'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
@ddt
class MultpleChoiceHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'choice_2')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'choice_2'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'OOPS', 'hints': [{'text': 'Apple is a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-text">Mushroom is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': ''},
{'problem_id': u'1_3_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Potato is a root vegetable.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">OUTSTANDING: </div><div class="hint-text">Apple is indeed a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">OOPS: </div><div class="hint-text">Apple is a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_9',
'expected_string': ''},
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class MultpleChoiceHintsWithHtmlTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice_with_html.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_2_1', u'choice_0')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'student_answer': [u'choice_0'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'Incorrect', 'hints': [{'text': 'Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">Potato is <img src="#" ale="#"/> not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text"><a href="#">Apple</a> is a fruit.</div></div>'}
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class DropdownHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the drop down problem represented by the XML below.
"""
xml = load_fixture('extended_hints_dropdown.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'FACES')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'FACES'], 'correctness': True, 'question_type': 'optionresponse',
'hint_label': 'Correct', 'hints': [{'text': 'With lots of makeup, doncha know?'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': 'Multiple Choice',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Good Job: </div><div class="hint-text">Yes, multiple choice is the right answer.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Text Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">No, text input problems do not present options.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Numerical Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">No, numerical input problems do not present options.</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'FACES',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">With lots of makeup, doncha know?</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'dogs',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">NOPE: </div><div class="hint-text">Not dogs, not cats, not toads</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'wrongo',
'expected_string': ''},
# Regression case where feedback includes answer substring
{'problem_id': u'1_4_1', 'choice': 'AAA',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">AAABBB1</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'BBB',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">AAABBB2</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'not going to match',
'expected_string': ''},
)
@unpack
def test_dropdown_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class ErrorConditionsTest(HintTest):
"""
Erroneous xml should raise exception.
"""
def test_error_conditions_illegal_element(self):
xml_with_errors = load_fixture('extended_hints_with_errors.xml')
with self.assertRaises(Exception):
new_loncapa_problem(xml_with_errors) # this problem is improperly constructed
| agpl-3.0 | 4,464,286,818,915,529,000 | 62.090573 | 439 | 0.614936 | false |
Esri/executive-dashboard | ExecutiveReportingScript/arcrest/webmap/renderer.py | 6 | 5241 | from __future__ import absolute_import
from __future__ import print_function
from .._abstract.abstract import BaseRenderer
import json
########################################################################
class SimpleRenderer(BaseRenderer):
""" A simple renderer is a renderer that uses one symbol only. The type
property for simple renderers is simple.
"""
_type = "simple"
_symbol = None
_label = None
_description = None
_rotationType = None
_rotationExpression = None
#----------------------------------------------------------------------
def __init__(self, symbol, label, description="",
rotationType="geographic", rotationExpression=""):
"""Constructor"""
self._symbol = symbol
self._label = label
self._description = description
self._rotationType = rotationType
self._rotationExpression = rotationExpression
#----------------------------------------------------------------------
def __str__(self):
""" provides a string reprsentation of the object """
return json.dumps(self.asDictionary)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" provides a dictionary representation of the object """
template = {
"type" : "simple",
"symbol" : self._symbol.asDictionary,
"label" : self._label,
"description" : self._description,
"rotationType": self._rotationType,
"rotationExpression": self._rotationExpression
}
return template
#----------------------------------------------------------------------
@property
def type(self):
""" gets the type value """
return self._type
########################################################################
class UniqueValueRenderer:
"""
A unique value renderer symbolizes groups of features that have
matching field values. The type property for unique value renderers
is uniqueValue. The rotationType property controls the origin and
direction of rotation. If the rotationType is defined as arithmetic,
the symbol is rotated from East in a counter-clockwise direction
where East is the 0 axis. If the rotationType is defined as
geographic, the symbol is rotated from North in a clockwise
direction where North is the 0 axis.
"""
_type = "uniqueValue"
_field1 = None
_field2 = None
_field3 = None
_fieldDelimiter = None
_defaultSymbol = None
_defaultLabel = None
_uniqueValueInfos = None
_rotationType = None
_rotationExpression = None
#----------------------------------------------------------------------
def __init__(self,
field1,
defaultSymbol,
defaultLabel="Other Values",
field2=None,
field3=None,
fieldDelimiter="",
uniqueValueInfos=[],
rotationType="geographic",
rotationExpression=""):
"""Constructor"""
self._field1 = field1
self._field2 = field2
self._field3 = field3
self._defaultSymbol = defaultSymbol
self._defaultLabel = defaultLabel
self._fieldDelimiter = fieldDelimiter
self._uniqueValueInfos = uniqueValueInfos
self._rotationType = rotationType
self._rotationExpression = rotationExpression
#----------------------------------------------------------------------
@property
def type(self):
""" returns the type """
return self._type
#----------------------------------------------------------------------
def __str__(self):
""" returns the object as string """
return json.dumps(self.asDictionary)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns object as dictionary """
template = {
"type" : "uniqueValue",
"field1" : self._field1,
"field2" : self._field2,
"field3" : self._field3,
"fieldDelimiter" : self._fieldDelimiter,
"defaultSymbol" : self._defaultSymbol.asDictionary,
"defaultLabel" : self._defaultLabel,
"uniqueValueInfos" : self._uniqueValueInfos,
"rotationType": self._rotationType,
"rotationExpression": self._rotationExpression
}
return template
########################################################################
class ClassBreaksRenderer:
"""
A class breaks renderer symbolizes each feature based on the value
of some numeric field. The type property for class breaks renderers
is classBreaks.
"""
_type = "classBreaks"
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
pass
#----------------------------------------------------------------------
@property
def type(self):
""" gets the object type """
return self._type
| apache-2.0 | -1,773,925,026,866,336,800 | 34.174497 | 75 | 0.487693 | false |
jhaux/tensorflow | tensorflow/python/client/client_lib.py | 111 | 1698 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for launching graphs and executing operations.
See the @{$python/client} guide.
@@Session
@@InteractiveSession
@@get_default_session
@@OpError
@@CancelledError
@@UnknownError
@@InvalidArgumentError
@@DeadlineExceededError
@@NotFoundError
@@AlreadyExistsError
@@PermissionDeniedError
@@UnauthenticatedError
@@ResourceExhaustedError
@@FailedPreconditionError
@@AbortedError
@@OutOfRangeError
@@UnimplementedError
@@InternalError
@@UnavailableError
@@DataLossError
@@exception_type_from_error_code
@@error_code_from_exception_type
@@raise_exception_on_not_ok_status
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.client.session import InteractiveSession
from tensorflow.python.client.session import Session
from tensorflow.python.framework import errors
from tensorflow.python.framework.errors import OpError
from tensorflow.python.framework.ops import get_default_session
| apache-2.0 | -1,982,769,063,421,749,000 | 29.321429 | 80 | 0.759717 | false |
itai12312/workspaces | hellodjango/venv/lib/python2.7/site-packages/django/utils/http.py | 35 | 8435 | from __future__ import unicode_literals
import calendar
import datetime
import re
import sys
try:
from urllib import parse as urllib_parse
except ImportError: # Python 2
import urllib as urllib_parse
import urlparse
urllib_parse.urlparse = urlparse.urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(urllib_parse.quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(urllib_parse.quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(urllib_parse.unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(urllib_parse.unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib_parse.urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if not six.PY3 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if not six.PY3:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urllib_parse.urlparse(url1), urllib_parse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if not url:
return False
url_info = urllib_parse.urlparse(url)
return (not url_info.netloc or url_info.netloc == host) and \
(not url_info.scheme or url_info.scheme in ['http', 'https'])
| gpl-2.0 | 8,264,150,456,334,422,000 | 33.855372 | 84 | 0.637937 | false |
zjutjsj1004/third | boost/tools/build/src/util/set.py | 1 | 1486 | # (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
from b2.util import is_iterable
from .utility import to_seq
def difference (b, a):
""" Returns the elements of B that are not in A.
"""
assert is_iterable(b)
assert is_iterable(a)
result = []
for element in b:
if not element in a:
result.append (element)
return result
def intersection (set1, set2):
""" Removes from set1 any items which don't appear in set2 and returns the result.
"""
assert is_iterable(set1)
assert is_iterable(set2)
result = []
for v in set1:
if v in set2:
result.append (v)
return result
def contains (small, large):
""" Returns true iff all elements of 'small' exist in 'large'.
"""
small = to_seq (small)
large = to_seq (large)
for s in small:
if not s in large:
return False
return True
def equal (a, b):
""" Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class.
"""
assert is_iterable(a)
assert is_iterable(b)
return contains (a, b) and contains (b, a)
| mit | 253,651,022,869,154,020 | 27.72 | 92 | 0.617093 | false |
krattai/noo-ebs | docs/zeroMQ-guide2/examples/Python/kvmsg.py | 1 | 4388 | """
=====================================================================
kvmsg - key-value message class for example applications
Author: Min RK <[email protected]>
"""
import struct # for packing integers
import sys
from uuid import uuid4
import zmq
# zmq.jsonapi ensures bytes, instead of unicode:
import zmq.utils.jsonapi as json
class KVMsg(object):
"""
Message is formatted on wire as 5 frames:
frame 0: key (0MQ string)
frame 1: sequence (8 bytes, network order)
frame 2: uuid (blob, 16 bytes)
frame 3: properties (0MQ string)
frame 4: body (blob)
"""
key = None
sequence = 0
uuid=None
properties = None
body = None
def __init__(self, sequence, uuid=None, key=None, properties=None, body=None):
assert isinstance(sequence, int)
self.sequence = sequence
if uuid is None:
uuid = uuid4().bytes
self.uuid = uuid
self.key = key
self.properties = {} if properties is None else properties
self.body = body
# dictionary access maps to properties:
def __getitem__(self, k):
return self.properties[k]
def __setitem__(self, k, v):
self.properties[k] = v
def get(self, k, default=None):
return self.properties.get(k, default)
def store(self, dikt):
"""Store me in a dict if I have anything to store"""
# this seems weird to check, but it's what the C example does
if self.key is not None and self.body is not None:
dikt[self.key] = self
def send(self, socket):
"""Send key-value message to socket; any empty frames are sent as such."""
key = '' if self.key is None else self.key
seq_s = struct.pack('!q', self.sequence)
body = '' if self.body is None else self.body
prop_s = json.dumps(self.properties)
socket.send_multipart([ key, seq_s, self.uuid, prop_s, body ])
@classmethod
def recv(cls, socket):
"""Reads key-value message from socket, returns new kvmsg instance."""
return cls.from_msg(socket.recv_multipart())
@classmethod
def from_msg(cls, msg):
"""Construct key-value message from a multipart message"""
key, seq_s, uuid, prop_s, body = msg
key = key if key else None
seq = struct.unpack('!q',seq_s)[0]
body = body if body else None
prop = json.loads(prop_s)
return cls(seq, uuid=uuid, key=key, properties=prop, body=body)
def dump(self):
if self.body is None:
size = 0
data='NULL'
else:
size = len(self.body)
data=repr(self.body)
print >> sys.stderr, "[seq:{seq}][key:{key}][size:{size}] {props} {data}".format(
seq=self.sequence,
# uuid=hexlify(self.uuid),
key=self.key,
size=size,
props=json.dumps(self.properties),
data=data,
)
# ---------------------------------------------------------------------
# Runs self test of class
def test_kvmsg (verbose):
print " * kvmsg: ",
# Prepare our context and sockets
ctx = zmq.Context()
output = ctx.socket(zmq.DEALER)
output.bind("ipc://kvmsg_selftest.ipc")
input = ctx.socket(zmq.DEALER)
input.connect("ipc://kvmsg_selftest.ipc")
kvmap = {}
# Test send and receive of simple message
kvmsg = KVMsg(1)
kvmsg.key = "key"
kvmsg.body = "body"
if verbose:
kvmsg.dump()
kvmsg.send(output)
kvmsg.store(kvmap)
kvmsg2 = KVMsg.recv(input)
if verbose:
kvmsg2.dump()
assert kvmsg2.key == "key"
kvmsg2.store(kvmap)
assert len(kvmap) == 1 # shouldn't be different
# test send/recv with properties:
kvmsg = KVMsg(2, key="key", body="body")
kvmsg["prop1"] = "value1"
kvmsg["prop2"] = "value2"
kvmsg["prop3"] = "value3"
assert kvmsg["prop1"] == "value1"
if verbose:
kvmsg.dump()
kvmsg.send(output)
kvmsg2 = KVMsg.recv(input)
if verbose:
kvmsg2.dump()
# ensure properties were preserved
assert kvmsg2.key == kvmsg.key
assert kvmsg2.body == kvmsg.body
assert kvmsg2.properties == kvmsg.properties
assert kvmsg2["prop2"] == kvmsg["prop2"]
print "OK"
if __name__ == '__main__':
test_kvmsg('-v' in sys.argv) | bsd-2-clause | 7,135,120,706,968,045,000 | 28.456376 | 89 | 0.569508 | false |
bkrukowski/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/find_files.py | 181 | 3872 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is used to find files used by run-webkit-tests and
perftestrunner. It exposes one public function - find() - which takes
an optional list of paths, optional set of skipped directories and optional
filter callback.
If a list is passed in, the returned list of files is constrained to those
found under the paths passed in. i.e. calling find(["LayoutTests/fast"])
will only return files under that directory.
If a set of skipped directories is passed in, the function will filter out
the files lying in these directories i.e. find(["LayoutTests"], set(["fast"]))
will return everything except files in fast subfolder.
If a callback is passed in, it will be called for the each file and the file
will be included into the result if the callback returns True.
The callback has to take three arguments: filesystem, dirname and filename."""
import itertools
def find(filesystem, base_dir, paths=None, skipped_directories=None, file_filter=None, directory_sort_key=None):
"""Finds the set of tests under a given list of sub-paths.
Args:
paths: a list of path expressions relative to base_dir
to search. Glob patterns are ok, as are path expressions with
forward slashes on Windows. If paths is empty, we look at
everything under the base_dir.
"""
paths = paths or ['*']
skipped_directories = skipped_directories or set(['.svn', '_svn'])
return _normalized_find(filesystem, _normalize(filesystem, base_dir, paths), skipped_directories, file_filter, directory_sort_key)
def _normalize(filesystem, base_dir, paths):
return [filesystem.normpath(filesystem.join(base_dir, path)) for path in paths]
def _normalized_find(filesystem, paths, skipped_directories, file_filter, directory_sort_key):
"""Finds the set of tests under the list of paths.
Args:
paths: a list of absolute path expressions to search.
Glob patterns are ok.
"""
paths_to_walk = itertools.chain(*(filesystem.glob(path) for path in paths))
def sort_by_directory_key(files_list):
if directory_sort_key:
files_list.sort(key=directory_sort_key)
return files_list
all_files = itertools.chain(*(sort_by_directory_key(filesystem.files_under(path, skipped_directories, file_filter)) for path in paths_to_walk))
return all_files
| bsd-3-clause | 5,641,598,151,147,127,000 | 45.095238 | 147 | 0.746643 | false |
ToonTownInfiniteRepo/ToontownInfinite | Panda3D-1.9.0/python/Tools/Scripts/pathfix.py | 66 | 4329 | #! /usr/bin/env python
# Change the #! line occurring in Python scripts. The new interpreter
# pathname must be given with a -i option.
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments). Of course, the original file is kept as a back-up
# (with a "~" attached to its name).
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixfile() you can turn this
# into a program for a different change to Python programs...
import sys
import re
import os
from stat import *
import getopt
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
new_interpreter = None
def main():
global new_interpreter
usage = ('usage: %s -i /interpreter file-or-directory ...\n' %
sys.argv[0])
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:')
except getopt.error, msg:
err(msg + '\n')
err(usage)
sys.exit(2)
for o, a in opts:
if o == '-i':
new_interpreter = a
if not new_interpreter or new_interpreter[0] != '/' or not args:
err('-i option or file-or-directory missing\n')
err(usage)
sys.exit(2)
bad = 0
for arg in args:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return ispythonprog.match(name) >= 0
def recursedown(dirname):
dbg('recursedown(%r)\n' % (dirname,))
bad = 0
try:
names = os.listdir(dirname)
except os.error, msg:
err('%s: cannot list directory: %r\n' % (dirname, msg))
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(%r)\n' % (filename,))
try:
f = open(filename, 'r')
except IOError, msg:
err('%s: cannot open: %r\n' % (filename, msg))
return 1
line = f.readline()
fixed = fixline(line)
if line == fixed:
rep(filename+': no change\n')
f.close()
return
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
try:
g = open(tempname, 'w')
except IOError, msg:
f.close()
err('%s: cannot create: %r\n' % (tempname, msg))
return 1
rep(filename + ': updating\n')
g.write(fixed)
BUFSIZE = 8*1024
while 1:
buf = f.read(BUFSIZE)
if not buf: break
g.write(buf)
g.close()
f.close()
# Finishing touch -- move files
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
os.chmod(tempname, statbuf[ST_MODE] & 07777)
except os.error, msg:
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
# Then make a backup of the original file as filename~
try:
os.rename(filename, filename + '~')
except os.error, msg:
err('%s: warning: backup failed (%r)\n' % (filename, msg))
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except os.error, msg:
err('%s: rename failed (%r)\n' % (filename, msg))
return 1
# Return succes
return 0
def fixline(line):
if not line.startswith('#!'):
return line
if "python" not in line:
return line
return '#! %s\n' % new_interpreter
if __name__ == '__main__':
main()
| mit | -8,844,020,723,375,267,000 | 28.053691 | 70 | 0.595288 | false |
MaxPoint/bayes_logistic | bayes_logistic/bayes_logistic.py | 2 | 19650 | # Copyright (c) 2015 MaxPoint Interactive, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, print_function
import numpy as np
from scipy.optimize import minimize
from scipy.stats import norm
def logistic_prob(X, w):
""" MAP (Bayes point) logistic regression probability with overflow prevention via exponent truncation
Parameters
----------
X : array-like, shape (N, p)
Feature matrix
w : array-like, shape (p, )
Parameter vector
Returns
-------
pr : array-like, shape (N, )
vector of logistic regression probabilities
References
----------
Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# set a truncation exponent.
trunc = 8. # exp(8)/(1+exp(8)) = 0.9997 which is close enough to 1 as to not matter in most cases.
# calculate argument of logit
z = np.dot(X, w)
# truncate to avoid numerical over/underflow
z = np.clip(z, -trunc, trunc)
# calculate logitstic probability
pr = np.exp(z)
pr = pr / (1. + pr)
return pr
def f_log_posterior(w, wprior, H, y, X, weights=None):
"""Returns negative log posterior probability.
Parameters
----------
w : array-like, shape (p, )
vector of parameters at which the negative log posterior is to be evaluated
wprior : array-like, shape (p, )
vector of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
Array of prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
vector of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
vector of data point weights. Should be within [0,1]
Returns
-------
neg_log_post : float
negative log posterior probability
References
----------
Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate negative log posterior
eps = 1e-6 # this defined to ensure that we never take a log of zero
mu = logistic_prob(X, w)
if len(H.shape) == 2:
neg_log_post = (- (np.dot(y.T, weights * np.log(mu + eps))
+ np.dot((1. - y).T, weights * np.log(1. - mu + eps)))
+ 0.5 * np.dot((w - wprior).T, np.dot(H, (w - wprior))))
elif len(H.shape) == 1:
neg_log_post = (- (np.dot(y.T, weights * np.log(mu + eps))
+ np.dot((1. - y).T, weights * np.log(1. - mu + eps)))
+ 0.5 * np.dot((w - wprior).T, H * (w - wprior)))
else:
raise ValueError('Incompatible Hessian')
return float(neg_log_post)
def g_log_posterior(w, wprior, H, y, X, weights=None):
"""Returns gradient of the negative log posterior probability.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the gradient is to be evaluated
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
grad_log_post : array-like, shape (p, )
gradient of negative log posterior
References
----------
Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate gradient
mu_ = logistic_prob(X, w)
if len(H.shape) == 2:
grad_log_post = np.dot(X.T, weights * (mu_ - y)) + np.dot(H, (w - wprior))
elif len(H.shape) == 1:
grad_log_post = np.dot(X.T, weights * (mu_ - y)) + H * (w - wprior)
else:
raise ValueError('Incompatible Hessian')
return grad_log_post
def g_log_posterior_small(w, wprior, H, y, X, weights=None):
"""Returns normalized (to 1) gradient of the negative log posterior probability.
This is used for BFGS and L-BFGS-B solvers which tend to not converge unless
the gradient is normalized.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the gradient is to be evaluated
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
grad_log_post : array-like, shape (p, )
normalized (to 1) gradient of negative log posterior
References
----------
Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate gradient
mu = logistic_prob(X, w)
if len(H.shape) == 2:
grad_log_post = np.dot(X.T, weights * (mu - y)) + np.dot(H, (w - wprior))
elif len(H.shape) == 1:
grad_log_post = np.dot(X.T, weights * (mu - y)) + H * (w - wprior)
else:
raise ValueError('Incompatible Hessian')
# normalize gradient to length 1
grad_log_post = grad_log_post / np.sqrt(np.sum(grad_log_post * grad_log_post))
return grad_log_post
def H_log_posterior(w, wprior, H, y, X, weights=None):
"""Returns Hessian (either full or diagonal) of the negative log posterior probability.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the Hessian is to be evaluated
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of log prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
H_log_post : array-like, shape like `H`
Hessian of negative log posterior
References
----------
Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# calculate log posterior Hessian
mu = logistic_prob(X, w)
S = mu * (1. - mu) * weights
if len(H.shape) == 2:
H_log_post = np.dot(X.T, X * S[:, np.newaxis]) + H
elif len(H.shape) == 1:
H_log_post = np.diag(np.dot(X.T, X * S[:, np.newaxis])) + H
else:
raise ValueError('Incompatible Hessian')
return H_log_post
def HP_log_posterior(w, q, wprior, H, y, X, weights=None):
"""Returns diagonal Hessian of the negative log posterior probability multiplied by an arbitrary vector.
This is useful for the Newton-CG solver, particularly when we only want to store a diagonal Hessian.
Parameters
----------
w : array-like, shape (p, )
parameter vector at which the Hessian is to be evaluated
q : array-like, shape (p, )
arbitrary vector to multiply Hessian by
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, )
array of diagonal log prior Hessian (inverse covariance of prior distribution of parameters)
y : array-like, shape (N, )
array of binary ({0,1} responses)
X : array-like, shape (N, p)
array of features
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
Returns
-------
HP : array-like, shape (p, )
Hessian of log posterior (diagonal approx) multiplied by arbitrary vector
References
----------
Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
HP = H_log_posterior(w, wprior, H, y, X, weights)
HP = HP * q
return HP
def fit_bayes_logistic(y, X, wprior, H, weights=None, solver='Newton-CG', bounds=None, maxiter=100):
""" Bayesian Logistic Regression Solver. Assumes Laplace (Gaussian) Approximation
to the posterior of the fitted parameter vector. Uses scipy.optimize.minimize
Parameters
----------
y : array-like, shape (N, )
array of binary {0,1} responses
X : array-like, shape (N, p)
array of features
wprior : array-like, shape (p, )
array of prior means on the parameters to be fit
H : array-like, shape (p, p) or (p, )
array of prior Hessian (inverse covariance of prior distribution of parameters)
weights : array-like, shape (N, )
array of data point weights. Should be within [0,1]
solver : string
scipy optimize solver used. this should be either 'Newton-CG', 'BFGS' or 'L-BFGS-B'.
The default is Newton-CG.
bounds : iterable of length p
a length p list (or tuple) of tuples each of length 2.
This is only used if the solver is set to 'L-BFGS-B'. In that case, a tuple
(lower_bound, upper_bound), both floats, is defined for each parameter. See the
scipy.optimize.minimize docs for further information.
maxiter : int
maximum number of iterations for scipy.optimize.minimize solver.
Returns
-------
w_fit : array-like, shape (p, )
posterior parameters (MAP estimate)
H_fit : array-like, shape like `H`
posterior Hessian (Hessian of negative log posterior evaluated at MAP parameters)
References
----------
Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# Check that dimensionality of inputs agrees
# check X
if len(X.shape) != 2:
raise ValueError('X must be a N*p matrix')
(nX, pX) = X.shape
# check y
if len(y.shape) > 1:
raise ValueError('y must be a vector of shape (p, )')
if len(np.atleast_1d(y)) != nX:
raise ValueError('y and X do not have the same number of rows')
# check wprior
if len(wprior.shape) > 1:
raise ValueError('prior should be a vector of shape (p, )')
if len(np.atleast_1d(wprior)) != pX:
raise ValueError('prior mean has incompatible length')
# check H
if len(H.shape) == 1:
if np.atleast_1d(H).shape[0] != pX:
raise ValueError('prior Hessian is diagonal but has incompatible length')
elif len(H.shape) == 2:
(h1,h2) = np.atleast_2d(H).shape
if h1 != h2:
raise ValueError('prior Hessian must either be a p*p square matrix or a vector or shape (p, ) ')
if h1 != pX:
raise ValueError('prior Hessian is square but has incompatible size')
# fill in weights if need be
if weights is None:
weights = np.ones(len(np.atleast_1d(y)), )
if len(np.atleast_1d(weights)) != len(np.atleast_1d(y)):
raise ValueError(' weight vector must be same length as response vector')
# Do the regression
if solver == 'Newton-CG':
if len(H.shape) == 2:
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior,
hess=H_log_posterior, method='Newton-CG', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
elif len(H.shape) == 1:
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior,
hessp=HP_log_posterior, method='Newton-CG', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
else:
raise ValueError(' You must either use the full Hessian or its diagonal as a vector')
elif solver == 'BFGS':
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior_small,
method='BFGS', options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
elif solver == 'L-BFGS-B':
ww = minimize(f_log_posterior, wprior, args=(wprior, H, y, X, weights), jac=g_log_posterior_small,
method='L-BFGS-B', bounds=bounds, options={'maxiter': maxiter})
w_fit = ww.x
H_fit = H_log_posterior(w_fit, wprior, H, y, X, weights)
else:
raise ValueError('Unknown solver specified: "{0}"'.format(solver))
return w_fit, H_fit
def get_pvalues(w, H):
""" Calculates p-values on fitted parameters. This can be used for variable selection by,
for example, discarding every parameter with a p-value less than 0.05 (or some other cutoff)
Parameters
----------
w : array-like, shape (p, )
array of posterior means on the fitted parameters
H : array-like, shape (p, p) or (p, )
array of log posterior Hessian
Returns
-------
pvals : array-like, shape (p, )
array of p-values for each of the fitted parameters
References
----------
Chapter 2 of Pawitan, Y. 'In All Likelihood', Oxford University Press (2013)
Also see: Gerhard, F. 'Extraction of network topology from multi-electrode recordings: is there
a small world effect', Frontiers in Computational Neuroscience (2011) for a use case of
p-value based variable selection.
"""
# get inverse standard error of each parameter from the square root of the Hessian,
# which is equal to the Fisher information
if len(H.shape) == 2:
inv_std_err = np.sqrt(np.diag(H))
elif len(H.shape) == 1:
inv_std_err = np.sqrt(H)
else:
raise ValueError("Incompatible Hessian provided")
# calculate Wald statistic
z_ = w * inv_std_err
# get p-value by comparing Wald statistic to cdf of Normal distribution
pvals = 2. * (1. - norm.cdf(np.abs(z_)))
return pvals
def bayes_logistic_prob(X, w, H):
""" Posterior predictive logistic regression probability. Uses probit approximation
to the logistic regression sigmoid. Also has overflow prevention via exponent truncation.
Parameters
----------
X : array-like, shape (N, p)
array of covariates
w : array-like, shape (p, )
array of fitted MAP parameters
H : array-like, shape (p, p) or (p, )
array of log posterior Hessian (covariance matrix of fitted MAP parameters)
Returns
-------
pr : array-like, shape (N, )
moderated (by full distribution) logistic probability
References
----------
Chapter 8 of Murphy, K. 'Machine Learning a Probabilistic Perspective', MIT Press (2012)
Chapter 4 of Bishop, C. 'Pattern Recognition and Machine Learning', Springer (2006)
"""
# set a truncation exponent
trunc = 8. # exp(8)/(1+exp(8)) = 0.9997 which is close enough to 1 as to not matter in most cases.
# unmoderated argument of exponent
z_a = np.dot(X, w)
# find the moderation
if len(H.shape) == 2:
H_inv_ = np.linalg.inv(H)
sig2_a = np.sum(X * np.dot(H_inv_, X.T).T, axis=1)
elif len(H.shape) == 1:
H_inv_ = 1. / H
sig2_a = np.sum(X * (H_inv_ * X), axis=1)
else:
raise ValueError(' You must either use the full Hessian or its diagonal as a vector')
# get the moderation factor. Implicit in here is approximating the logistic sigmoid with
# a probit by setting the probit and sigmoid slopes to be equal at the origin. This is where
# the factor of pi/8 comes from.
kappa_sig2_a = 1. / np.sqrt(1. + 0.125 * np.pi * sig2_a)
# calculate the moderated argument of the logit
z = z_a * kappa_sig2_a
# do a truncation to prevent exp overflow
z = np.clip(z, -trunc, trunc)
# get the moderated logistic probability
pr = np.exp(z)
pr = pr / (1. + pr)
return pr
| bsd-3-clause | 4,326,502,064,166,138,400 | 35.187845 | 122 | 0.627583 | false |
Cinntax/home-assistant | homeassistant/components/stream/hls.py | 1 | 3398 | """Provide functionality to stream HLS."""
from aiohttp import web
from homeassistant.core import callback
from homeassistant.util.dt import utcnow
from .const import FORMAT_CONTENT_TYPE
from .core import StreamView, StreamOutput, PROVIDERS
@callback
def async_setup_hls(hass):
"""Set up api endpoints."""
hass.http.register_view(HlsPlaylistView())
hass.http.register_view(HlsSegmentView())
return "/api/hls/{}/playlist.m3u8"
class HlsPlaylistView(StreamView):
"""Stream view to serve a M3U8 stream."""
url = r"/api/hls/{token:[a-f0-9]+}/playlist.m3u8"
name = "api:stream:hls:playlist"
cors_allowed = True
async def handle(self, request, stream, sequence):
"""Return m3u8 playlist."""
renderer = M3U8Renderer(stream)
track = stream.add_provider("hls")
stream.start()
# Wait for a segment to be ready
if not track.segments:
await track.recv()
headers = {"Content-Type": FORMAT_CONTENT_TYPE["hls"]}
return web.Response(
body=renderer.render(track, utcnow()).encode("utf-8"), headers=headers
)
class HlsSegmentView(StreamView):
"""Stream view to serve a MPEG2TS segment."""
url = r"/api/hls/{token:[a-f0-9]+}/segment/{sequence:\d+}.ts"
name = "api:stream:hls:segment"
cors_allowed = True
async def handle(self, request, stream, sequence):
"""Return mpegts segment."""
track = stream.add_provider("hls")
segment = track.get_segment(int(sequence))
if not segment:
return web.HTTPNotFound()
headers = {"Content-Type": "video/mp2t"}
return web.Response(body=segment.segment.getvalue(), headers=headers)
class M3U8Renderer:
"""M3U8 Render Helper."""
def __init__(self, stream):
"""Initialize renderer."""
self.stream = stream
@staticmethod
def render_preamble(track):
"""Render preamble."""
return ["#EXT-X-VERSION:3", f"#EXT-X-TARGETDURATION:{track.target_duration}"]
@staticmethod
def render_playlist(track, start_time):
"""Render playlist."""
segments = track.segments
if not segments:
return []
playlist = ["#EXT-X-MEDIA-SEQUENCE:{}".format(segments[0])]
for sequence in segments:
segment = track.get_segment(sequence)
playlist.extend(
[
"#EXTINF:{:.04f},".format(float(segment.duration)),
f"./segment/{segment.sequence}.ts",
]
)
return playlist
def render(self, track, start_time):
"""Render M3U8 file."""
lines = (
["#EXTM3U"]
+ self.render_preamble(track)
+ self.render_playlist(track, start_time)
)
return "\n".join(lines) + "\n"
@PROVIDERS.register("hls")
class HlsStreamOutput(StreamOutput):
"""Represents HLS Output formats."""
@property
def name(self) -> str:
"""Return provider name."""
return "hls"
@property
def format(self) -> str:
"""Return container format."""
return "mpegts"
@property
def audio_codec(self) -> str:
"""Return desired audio codec."""
return "aac"
@property
def video_codec(self) -> str:
"""Return desired video codec."""
return "h264"
| apache-2.0 | 3,944,976,066,007,308,300 | 26.852459 | 85 | 0.59123 | false |
hcsturix74/django | tests/gis_tests/geos_tests/test_io.py | 282 | 3918 | from __future__ import unicode_literals
import binascii
import unittest
from unittest import skipUnless
from django.contrib.gis.geos import (
HAS_GEOS, GEOSGeometry, WKBReader, WKBWriter, WKTReader, WKTWriter,
)
from django.utils.six import memoryview
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSIOTest(unittest.TestCase):
def test01_wktreader(self):
# Creating a WKTReader instance
wkt_r = WKTReader()
wkt = 'POINT (5 23)'
# read() should return a GEOSGeometry
ref = GEOSGeometry(wkt)
g1 = wkt_r.read(wkt.encode())
g2 = wkt_r.read(wkt)
for geom in (g1, g2):
self.assertEqual(ref, geom)
# Should only accept six.string_types objects.
self.assertRaises(TypeError, wkt_r.read, 1)
self.assertRaises(TypeError, wkt_r.read, memoryview(b'foo'))
def test02_wktwriter(self):
# Creating a WKTWriter instance, testing its ptr property.
wkt_w = WKTWriter()
self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type())
ref = GEOSGeometry('POINT (5 23)')
ref_wkt = 'POINT (5.0000000000000000 23.0000000000000000)'
self.assertEqual(ref_wkt, wkt_w.write(ref).decode())
def test03_wkbreader(self):
# Creating a WKBReader instance
wkb_r = WKBReader()
hex = b'000000000140140000000000004037000000000000'
wkb = memoryview(binascii.a2b_hex(hex))
ref = GEOSGeometry(hex)
# read() should return a GEOSGeometry on either a hex string or
# a WKB buffer.
g1 = wkb_r.read(wkb)
g2 = wkb_r.read(hex)
for geom in (g1, g2):
self.assertEqual(ref, geom)
bad_input = (1, 5.23, None, False)
for bad_wkb in bad_input:
self.assertRaises(TypeError, wkb_r.read, bad_wkb)
def test04_wkbwriter(self):
wkb_w = WKBWriter()
# Representations of 'POINT (5 23)' in hex -- one normal and
# the other with the byte order changed.
g = GEOSGeometry('POINT (5 23)')
hex1 = b'010100000000000000000014400000000000003740'
wkb1 = memoryview(binascii.a2b_hex(hex1))
hex2 = b'000000000140140000000000004037000000000000'
wkb2 = memoryview(binascii.a2b_hex(hex2))
self.assertEqual(hex1, wkb_w.write_hex(g))
self.assertEqual(wkb1, wkb_w.write(g))
# Ensuring bad byteorders are not accepted.
for bad_byteorder in (-1, 2, 523, 'foo', None):
# Equivalent of `wkb_w.byteorder = bad_byteorder`
self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder)
# Setting the byteorder to 0 (for Big Endian)
wkb_w.byteorder = 0
self.assertEqual(hex2, wkb_w.write_hex(g))
self.assertEqual(wkb2, wkb_w.write(g))
# Back to Little Endian
wkb_w.byteorder = 1
# Now, trying out the 3D and SRID flags.
g = GEOSGeometry('POINT (5 23 17)')
g.srid = 4326
hex3d = b'0101000080000000000000144000000000000037400000000000003140'
wkb3d = memoryview(binascii.a2b_hex(hex3d))
hex3d_srid = b'01010000A0E6100000000000000000144000000000000037400000000000003140'
wkb3d_srid = memoryview(binascii.a2b_hex(hex3d_srid))
# Ensuring bad output dimensions are not accepted
for bad_outdim in (-1, 0, 1, 4, 423, 'foo', None):
# Equivalent of `wkb_w.outdim = bad_outdim`
self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim)
# Now setting the output dimensions to be 3
wkb_w.outdim = 3
self.assertEqual(hex3d, wkb_w.write_hex(g))
self.assertEqual(wkb3d, wkb_w.write(g))
# Telling the WKBWriter to include the srid in the representation.
wkb_w.srid = True
self.assertEqual(hex3d_srid, wkb_w.write_hex(g))
self.assertEqual(wkb3d_srid, wkb_w.write(g))
| bsd-3-clause | 8,529,692,043,066,229,000 | 34.297297 | 90 | 0.633231 | false |
rjschwei/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/models/win_rm_listener.py | 1 | 1321 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WinRMListener(Model):
"""Describes Protocol and thumbprint of Windows Remote Management listener.
:param protocol: The Protocol used by the WinRM listener. Http and Https
are supported. Possible values include: 'Http', 'Https'
:type protocol: str or :class:`ProtocolTypes
<azure.mgmt.compute.models.ProtocolTypes>`
:param certificate_url: The Certificate URL in KMS for Https listeners.
Should be null for Http listeners.
:type certificate_url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'ProtocolTypes'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(self, protocol=None, certificate_url=None):
self.protocol = protocol
self.certificate_url = certificate_url
| mit | -569,361,415,192,360,770 | 37.852941 | 79 | 0.622256 | false |
mferenca/HMS-ecommerce | ecommerce/extensions/dashboard/orders/views.py | 1 | 3461 | from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from oscar.apps.dashboard.orders.views import (
OrderListView as CoreOrderListView, OrderDetailView as CoreOrderDetailView
)
from oscar.core.loading import get_model
from ecommerce.extensions.dashboard.views import FilterFieldsMixin
Order = get_model('order', 'Order')
Partner = get_model('partner', 'Partner')
Refund = get_model('refund', 'Refund')
def queryset_orders_for_user(user): # pylint: disable=unused-argument
"""
Returns a queryset of all orders that a user is allowed to access.
A staff user may access all orders.
To allow access to an order for a non-staff user, at least one line's
partner has to have the user in the partner's list.
This customization removes the selection of the related address data, as it drastically decreases
query response time. Support for non-staff users is also removed.
"""
return Order._default_manager.select_related('user').prefetch_related('lines') # pylint: disable=protected-access
class OrderListView(FilterFieldsMixin, CoreOrderListView):
base_queryset = None
form = None
def dispatch(self, request, *args, **kwargs):
# NOTE: This method is overridden so that we can use our override of `queryset_orders_for_user`.
# base_queryset is equal to all orders the user is allowed to access
self.base_queryset = queryset_orders_for_user(request.user).order_by('-date_placed')
# Bypass the CoreOrderListView.dispatch()
return super(CoreOrderListView, self).dispatch(request, *args, **kwargs) # pylint: disable=bad-super-call
def get_queryset(self):
queryset = super(OrderListView, self).get_queryset()
# Note (CCB): We set self.form here because the super method does not always pass request.GET
# to the form constructor. This results in the form not being populated when re-rendered.
self.form = self.form_class(self.request.GET)
if self.form.is_valid():
for field, value in self.form.cleaned_data.iteritems():
if value:
_filter = self.get_filter_fields().get(field)
if _filter:
queryset = queryset.filter(**{_filter['query_filter']: value})
return queryset
class OrderDetailView(CoreOrderDetailView):
line_actions = ('change_line_statuses', 'create_shipping_event', 'create_payment_event', 'create_refund')
def create_refund(self, request, order, lines, _quantities): # pylint: disable=unused-argument
refund = Refund.create_with_lines(order, lines)
if refund:
data = {
'link_start': '<a href="{}" target="_blank">'.format(
reverse('dashboard:refunds:detail', kwargs={'pk': refund.pk})),
'link_end': '</a>',
'refund_id': refund.pk
}
message = _('{link_start}Refund #{refund_id}{link_end} created! '
'Click {link_start}here{link_end} to view it.').format(**data)
messages.success(request, mark_safe(message))
else:
message = _('A refund cannot be created for these lines. They may have already been refunded.')
messages.error(request, message)
return self.reload_page()
| agpl-3.0 | -1,514,523,753,496,620,500 | 42.2625 | 118 | 0.663103 | false |
rosswhitfield/mantid | Framework/PythonInterface/mantid/plots/resampling_image/samplingimage.py | 3 | 10843 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import matplotlib.colors
import numpy as np
from mantid.plots.datafunctions import get_matrix_2d_ragged, get_normalize_by_bin_width
from mantid.plots.mantidimage import MantidImage
from mantid.api import MatrixWorkspace
MAX_HISTOGRAMS = 5000
class SamplingImage(MantidImage):
def __init__(self,
ax,
workspace,
transpose=False,
cmap=None,
norm=None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample=False,
normalize_by_bin_width=None,
**kwargs):
super().__init__(ax,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
extent=extent,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs)
self.ws = workspace
try:
self.spectrum_info = workspace.spectrumInfo()
except Exception:
self.spectrum_info = None
self.transpose = transpose
self.normalize_by_bin_width = normalize_by_bin_width
self._resize_cid, self._xlim_cid, self._ylim_cid = None, None, None
self._resample_required = True
self._full_extent = extent
self.orig_shape = (workspace.getDimension(0).getNBins(),
workspace.getDimension(1).getNBins())
self._xbins, self._ybins = 100, 100
self.origin = origin
self._update_maxpooling_option()
def connect_events(self):
axes = self.axes
self._resize_cid = axes.get_figure().canvas.mpl_connect('resize_event', self._resize)
self._xlim_cid = axes.callbacks.connect('xlim_changed', self._xlim_changed)
self._ylim_cid = axes.callbacks.connect('ylim_changed', self._ylim_changed)
def disconnect_events(self):
axes = self.axes
axes.get_figure().canvas.mpl_disconnect(self._resize_cid)
axes.callbacks.disconnect(self._xlim_cid)
axes.callbacks.disconnect(self._ylim_cid)
def draw(self, renderer, *args, **kwargs):
if self._resample_required:
self._resample_image()
self._resample_required = False
super().draw(renderer, *args, **kwargs)
def remove(self):
self.disconnect_events()
super().remove()
def _xlim_changed(self, ax):
if self._update_extent():
self._resample_required = True
def _ylim_changed(self, ax):
if self._update_extent():
self._resample_required = True
def _resize(self, canvas):
xbins, ybins = self._calculate_bins_from_extent()
if xbins > self._xbins or ybins > self._ybins:
self._resample_required = True
def _calculate_bins_from_extent(self):
bbox = self.get_window_extent().transformed(
self.axes.get_figure().dpi_scale_trans.inverted())
dpi = self.axes.get_figure().dpi
xbins = int(np.ceil(bbox.width * dpi))
ybins = int(np.ceil(bbox.height * dpi))
return xbins, ybins
def _resample_image(self, xbins=None, ybins=None):
if self._resample_required:
extent = self.get_extent()
if xbins is None or ybins is None:
xbins, ybins = self._calculate_bins_from_extent()
x, y, data = get_matrix_2d_ragged(self.ws,
self.normalize_by_bin_width,
histogram2D=True,
transpose=self.transpose,
extent=extent,
xbins=xbins,
ybins=ybins,
spec_info=self.spectrum_info,
maxpooling=self._maxpooling)
# Data is an MxN matrix.
# If origin = upper extent is set as [xmin, xmax, ymax, ymin].
# Data[M,0] is the data at [xmin, ymin], which should be drawn at the top left corner,
# whereas Data[0,0] is the data at [xmin, ymax], which should be drawn at the bottom left corner.
# Origin upper starts drawing the data from top-left, which means we need to horizontally flip the matrix
if self.origin == "upper":
data = np.flip(data, 0)
self.set_data(data)
self._xbins = xbins
self._ybins = ybins
def _update_extent(self):
"""
Update the extent base on xlim and ylim, should be called after pan or zoom action,
this limits the range that the data will be sampled. Return True or False if extents have changed.
"""
new_extent = self.axes.get_xlim() + self.axes.get_ylim()
if new_extent != self.get_extent():
self.set_extent(new_extent)
return True
else:
return False
def get_full_extent(self):
return self._full_extent
def _update_maxpooling_option(self):
"""
Updates the maxpooling option, used when the image is downsampled
If the workspace is large, or ragged, we skip this maxpooling step and set the option as False
"""
axis = self.ws.getAxis(1)
self._maxpooling = (self.ws.getNumberHistograms() <= MAX_HISTOGRAMS and axis.isSpectra()
and not self.ws.isRaggedWorkspace())
def imshow_sampling(axes,
workspace,
cmap=None,
alpha=None,
vmin=None,
vmax=None,
shape=None,
filternorm=1,
filterrad=4.0,
imlim=None,
url=None,
**kwargs):
"""Copy of imshow but replaced AxesImage with SamplingImage and added
callbacks and Mantid Workspace stuff.
See :meth:`matplotlib.axes.Axes.imshow`
To test:
from mantidqt.widgets.sliceviewer.samplingimage import imshow_sampling
fig, ax = plt.subplots()
im = imshow_sampling(ax, workspace, aspect='auto', origin='lower')
fig.show()
"""
normalize_by_bin_width, kwargs = get_normalize_by_bin_width(workspace, axes, **kwargs)
transpose = kwargs.pop('transpose', False)
extent = kwargs.pop('extent', None)
interpolation = kwargs.pop('interpolation', None)
origin = kwargs.pop('origin', None)
norm = kwargs.pop('norm', None)
resample = kwargs.pop('resample', False)
kwargs.pop('distribution', None)
if not extent:
x0, x1, y0, y1 = (workspace.getDimension(0).getMinimum(),
workspace.getDimension(0).getMaximum(),
workspace.getDimension(1).getMinimum(),
workspace.getDimension(1).getMaximum())
if isinstance(workspace, MatrixWorkspace) and not workspace.isCommonBins():
# for MatrixWorkspace the x extent obtained from dimension 0 corresponds to the first spectrum
# this is not correct in case of ragged workspaces, where we need to obtain the global xmin and xmax
# moreover the axis might be in ascending or descending order, so x[0] is not necessarily the minimum
xmax, xmin = None, None # don't initialise with values from first spectrum as could be a monitor
si = workspace.spectrumInfo()
for i in range(workspace.getNumberHistograms()):
if si.hasDetectors(i) and not si.isMonitor(i):
x_axis = workspace.readX(i)
x_i_first = x_axis[0]
x_i_last = x_axis[-1]
x_i_min = min([x_i_first, x_i_last])
x_i_max = max([x_i_first, x_i_last])
# effectively ignore spectra with nan or inf values
if np.isfinite(x_i_min):
xmin = min([x_i_min, xmin]) if xmin else x_i_min
if np.isfinite(x_i_max):
xmax = max([x_i_max, xmax]) if xmax else x_i_max
x0 = xmin if xmin else x0
x1 = xmax if xmax else x1
if workspace.getDimension(1).getNBins() == workspace.getAxis(1).length():
width = workspace.getDimension(1).getBinWidth()
y0 -= width / 2
y1 += width / 2
if origin == "upper":
y0, y1 = y1, y0
extent = (x0, x1, y0, y1)
if transpose:
e1, e2, e3, e4 = extent
extent = e3, e4, e1, e2
# from matplotlib.axes.Axes.imshow
if norm is not None and not isinstance(norm, matplotlib.colors.Normalize):
raise ValueError("'norm' must be an instance of 'mcolors.Normalize'")
aspect = kwargs.pop('aspect', matplotlib.rcParams['image.aspect'])
axes.set_aspect(aspect)
im = SamplingImage(axes,
workspace,
transpose,
cmap,
norm,
interpolation,
origin,
extent,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
normalize_by_bin_width=normalize_by_bin_width,
**kwargs)
im._resample_image(100, 100)
im.set_alpha(alpha)
im.set_url(url)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(axes.patch)
if vmin is not None or vmax is not None:
if norm is not None and isinstance(norm, matplotlib.colors.LogNorm):
if vmin <= 0:
vmin = 0.0001
if vmax <= 0:
vmax = 1
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
axes.add_image(im)
if extent:
axes.set_xlim(extent[0], extent[1])
axes.set_ylim(extent[2], extent[3])
im.connect_events()
return im
| gpl-3.0 | 8,601,503,731,573,481,000 | 38.572993 | 117 | 0.545052 | false |
mrquim/repository.mrquim | script.module.schism.common/lib/requests/packages/urllib3/contrib/appengine.py | 224 | 10865 | """
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Example usage::
from urllib3 import PoolManager
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
if is_appengine_sandbox():
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
http = PoolManager()
r = http.request('GET', 'https://google.com/')
There are `limitations <https://cloud.google.com/appengine/docs/python/\
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
the best choice for your application. There are three options for using
urllib3 on Google App Engine:
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
cost-effective in many circumstances as long as your usage is within the
limitations.
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
Sockets also have `limitations and restrictions
<https://cloud.google.com/appengine/docs/python/sockets/\
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
To use sockets, be sure to specify the following in your ``app.yaml``::
env_variables:
GAE_USE_SOCKETS_HTTPLIB : 'true'
3. If you are using `App Engine Flexible
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
:class:`PoolManager` without any configuration or special environment variables.
"""
from __future__ import absolute_import
import logging
import os
import warnings
from ..packages.six.moves.urllib.parse import urljoin
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation `here
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Notably it will raise an :class:`AppEnginePlatformError` if:
* URLFetch is not available.
* If you attempt to use this on App Engine Flexible, as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True,
urlfetch_retries=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.urlfetch_retries = urlfetch_retries
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
follow_redirects = (
redirect and
retries.redirect != 0 and
retries.total)
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=self.urlfetch_retries and follow_redirects,
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, retries=retries, **response_kw)
# Handle redirect?
redirect_location = redirect and http_response.get_redirect_location()
if redirect_location:
# Check for redirect response
if (self.urlfetch_retries and retries.raise_on_redirect):
raise MaxRetryError(self, url, "too many redirects")
else:
if http_response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=http_response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
return http_response
retries.sleep_for_retry(http_response)
log.debug("Redirecting %s -> %s", url, redirect_location)
redirect_url = urljoin(url, redirect_location)
return self.urlopen(
method, redirect_url, body, headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(http_response.getheader('Retry-After'))
if retries.is_retry(method, http_response.status, has_retry_after):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.debug("Retry: %s", url)
retries.sleep(http_response)
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return None # Defer to URLFetch's default.
if isinstance(timeout, Timeout):
if timeout._read is not None or timeout._connect is not None:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total or default URLFetch timeout.",
AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
| gpl-2.0 | 3,361,181,563,194,577,000 | 35.706081 | 96 | 0.628072 | false |
vuchau/ansible | lib/ansible/inventory/vars_plugins/noop.py | 317 | 1632 | # (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2014, Serge van Ginderachter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class VarsModule(object):
"""
Loads variables for groups and/or hosts
"""
def __init__(self, inventory):
""" constructor """
self.inventory = inventory
self.inventory_basedir = inventory.basedir()
def run(self, host, vault_password=None):
""" For backwards compatibility, when only vars per host were retrieved
This method should return both host specific vars as well as vars
calculated from groups it is a member of """
return {}
def get_host_vars(self, host, vault_password=None):
""" Get host specific variables. """
return {}
def get_group_vars(self, group, vault_password=None):
""" Get group specific variables. """
return {}
| gpl-3.0 | -1,120,924,675,314,709,100 | 31.64 | 79 | 0.683824 | false |
nhenezi/kuma | vendor/packages/pyparsing/examples/sparser.py | 16 | 13196 | #!/usr/bin/env python
"""
NAME:
sparser.py
SYNOPSIS:
sparser.py [options] filename
DESCRIPTION:
The sparser.py script is a Specified PARSER. It is unique (as far as I can
tell) because it doesn't care about the delimiter(s). The user specifies
what is expected, and the order, for each line of text. All of the heavy
lifting is handled by pyparsing (http://pyparsing.sf.net).
OPTIONS:
-h,--help this message
-v,--version version
-d,--debug turn on debug messages
EXAMPLES:
1. As standalone
sparser.py myfile
2. As library
import sparser
...
#Copyright (C) 2006 Tim Cera [email protected]
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 675 Mass Ave, Cambridge, MA 02139, USA.
"""
#===imports======================
import sys
import os
import getopt
import re
import gzip
from pyparsing import *
#===globals======================
modname = "sparser"
__version__ = "0.1"
#--option args--
debug_p = 0
#opt_b=None #string arg, default is undefined
#---positional args, default is empty---
pargs = []
#---other---
#===utilities====================
def msg(txt):
"""Send message to stdout."""
sys.stdout.write(txt)
sys.stdout.flush()
def debug(ftn, txt):
"""Used for debugging."""
if debug_p:
sys.stdout.write("%s.%s:%s\n" % (modname, ftn, txt))
sys.stdout.flush()
def fatal(ftn, txt):
"""If can't continue."""
msg = "%s.%s:FATAL:%s\n" % (modname, ftn, txt)
raise SystemExit, msg
def usage():
"""Prints the docstring."""
print __doc__
#====================================
class ToInteger(TokenConverter):
"""Converter to make token into an integer."""
def postParse( self, instring, loc, tokenlist ):
return int(tokenlist[0])
class ToFloat(TokenConverter):
"""Converter to make token into a float."""
def postParse( self, instring, loc, tokenlist ):
return float(tokenlist[0])
class ParseFileLineByLine:
"""
Bring data from text files into a program, optionally parsing each line
according to specifications in a parse definition file.
ParseFileLineByLine instances can be used like normal file objects (i.e. by
calling readline(), readlines(), and write()), but can also be used as
sequences of lines in for-loops.
ParseFileLineByLine objects also handle compression transparently. i.e. it
is possible to read lines from a compressed text file as if it were not
compressed. Compression is deduced from the file name suffixes '.Z'
(compress/uncompress), '.gz' (gzip/gunzip), and '.bz2' (bzip2).
The parse definition file name is developed based on the input file name.
If the input file name is 'basename.ext', then the definition file is
'basename_def.ext'. If a definition file specific to the input file is not
found, then the program searches for the file 'sparse.def' which would be
the definition file for all files in that directory without a file specific
definition file.
Finally, ParseFileLineByLine objects accept file names that start with '~'
or '~user' to indicate a home directory, as well as URLs (for reading
only).
Constructor:
ParseFileLineByLine(|filename|, |mode|='"r"'), where |filename| is the name
of the file (or a URL) and |mode| is one of '"r"' (read), '"w"' (write) or
'"a"' (append, not supported for .Z files).
"""
def __init__(self, filename, mode = 'r'):
"""Opens input file, and if available the definition file. If the
definition file is available __init__ will then create some pyparsing
helper variables. """
if mode not in ['r', 'w', 'a']:
raise IOError, (0, 'Illegal mode: ' + repr(mode))
if string.find(filename, ':/') > 1: # URL
if mode == 'w':
raise IOError, "can't write to a URL"
import urllib
self.file = urllib.urlopen(filename)
else:
filename = os.path.expanduser(filename)
if mode == 'r' or mode == 'a':
if not os.path.exists(filename):
raise IOError, (2, 'No such file or directory: ' + filename)
filen, file_extension = os.path.splitext(filename)
command_dict = {
('.Z', 'r'):
"self.file = os.popen('uncompress -c ' + filename, mode)",
('.gz', 'r'):
"self.file = gzip.GzipFile(filename, 'rb')",
('.bz2', 'r'):
"self.file = os.popen('bzip2 -dc ' + filename, mode)",
('.Z', 'w'):
"self.file = os.popen('compress > ' + filename, mode)",
('.gz', 'w'):
"self.file = gzip.GzipFile(filename, 'wb')",
('.bz2', 'w'):
"self.file = os.popen('bzip2 > ' + filename, mode)",
('.Z', 'a'):
"raise IOError, (0, 'Can\'t append to .Z files')",
('.gz', 'a'):
"self.file = gzip.GzipFile(filename, 'ab')",
('.bz2', 'a'):
"raise IOError, (0, 'Can\'t append to .bz2 files')",
}
exec command_dict.get((file_extension, mode),
'self.file = open(filename, mode)')
self.grammar = None
# Try to find a parse ('*_def.ext') definition file. First try to find
# a file specific parse definition file, then look for 'sparse.def'
# that would be the definition file for all files within the directory.
# The definition file is pure Python. The one variable that needs to
# be specified is 'parse'. The 'parse' variable is a list of tuples
# defining the name, type, and because it is a list, the order of
# variables on each line in the data file. The variable name is a
# string, the type variable is defined as integer, real, and qString.
# parse = [
# ('year', integer),
# ('month', integer),
# ('day', integer),
# ('value', real),
# ]
definition_file_one = filen + "_def" + file_extension
definition_file_two = os.path.dirname(filen) + os.sep + "sparse.def"
if os.path.exists(definition_file_one):
self.parsedef = definition_file_one
elif os.path.exists(definition_file_two):
self.parsedef = definition_file_two
else:
self.parsedef = None
return None
# Create some handy pyparsing constructs. I kept 'decimal_sep' so that
# could easily change to parse if the decimal separator is a ",".
decimal_sep = "."
sign = oneOf("+ -")
# part of printables without decimal_sep, +, -
special_chars = string.replace('!"#$%&\'()*,./:;<=>?@[\\]^_`{|}~',
decimal_sep, "")
integer = ToInteger(
Combine(Optional(sign) +
Word(nums))).setName("integer")
positive_integer = ToInteger(
Combine(Optional("+") +
Word(nums))).setName("integer")
negative_integer = ToInteger(
Combine("-" +
Word(nums))).setName("integer")
real = ToFloat(
Combine(Optional(sign) +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
positive_real = ToFloat(
Combine(Optional("+") +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
negative_real = ToFloat(
Combine("-" +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
qString = ( sglQuotedString | dblQuotedString ).setName("qString")
# add other characters we should skip over between interesting fields
integer_junk = Optional(
Suppress(
Word(alphas +
special_chars +
decimal_sep))).setName("integer_junk")
real_junk = Optional(
Suppress(
Word(alphas +
special_chars))).setName("real_junk")
qString_junk = SkipTo(qString).setName("qString_junk")
# Now that 'integer', 'real', and 'qString' have been assigned I can
# execute the definition file.
execfile(self.parsedef)
# Build the grammar, combination of the 'integer', 'real, 'qString',
# and '*_junk' variables assigned above in the order specified in the
# definition file.
grammar = []
for nam, expr in parse:
grammar.append( eval(expr.name + "_junk"))
grammar.append( expr.setResultsName(nam) )
self.grammar = And( grammar[1:] + [restOfLine] )
def __del__(self):
"""Delete (close) the file wrapper."""
self.close()
def __getitem__(self, item):
"""Used in 'for line in fp:' idiom."""
line = self.readline()
if not line:
raise IndexError
return line
def readline(self):
"""Reads (and optionally parses) a single line."""
line = self.file.readline()
if self.grammar and line:
try:
return self.grammar.parseString(line).asDict()
except ParseException:
return self.readline()
else:
return line
def readlines(self):
"""Returns a list of all lines (optionally parsed) in the file."""
if self.grammar:
tot = []
# Used this way instead of a 'for' loop against
# self.file.readlines() so that there wasn't two copies of the file
# in memory.
while 1:
line = self.file.readline()
if not line:
break
tot.append(line)
return tot
return self.file.readlines()
def write(self, data):
"""Write to a file."""
self.file.write(data)
def writelines(self, list):
"""Write a list to a file. Each item in the list is a line in the
file.
"""
for line in list:
self.file.write(line)
def close(self):
"""Close the file."""
self.file.close()
def flush(self):
"""Flush in memory contents to file."""
self.file.flush()
#=============================
def main(pargs):
"""This should only be used for testing. The primary mode of operation is
as an imported library.
"""
input_file = sys.argv[1]
fp = ParseFileLineByLine(input_file)
for i in fp:
print i
#-------------------------
if __name__ == '__main__':
ftn = "main"
opts, pargs = getopt.getopt(sys.argv[1:], 'hvd',
['help', 'version', 'debug', 'bb='])
for opt in opts:
if opt[0] == '-h' or opt[0] == '--help':
print modname+": version="+__version__
usage()
sys.exit(0)
elif opt[0] == '-v' or opt[0] == '--version':
print modname+": version="+__version__
sys.exit(0)
elif opt[0] == '-d' or opt[0] == '--debug':
debug_p = 1
elif opt[0] == '--bb':
opt_b = opt[1]
#---make the object and run it---
main(pargs)
#===Revision Log===
#Created by mkpythonproj:
#2006-02-06 Tim Cera
#
| mpl-2.0 | -1,991,065,526,212,388,000 | 34.153425 | 80 | 0.514323 | false |
dbfr3qs/moto | tests/test_ec2/test_spot_instances.py | 11 | 9152 | from __future__ import unicode_literals
from nose.tools import assert_raises
import datetime
import boto
import boto3
from boto.exception import EC2ResponseError
from botocore.exceptions import ClientError
import pytz
import sure # noqa
from moto import mock_ec2, mock_ec2_deprecated
from moto.backends import get_model
from moto.core.utils import iso_8601_datetime_with_milliseconds
@mock_ec2
def test_request_spot_instances():
conn = boto3.client('ec2', 'us-east-1')
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
subnet = conn.create_subnet(
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
subnet_id = subnet['SubnetId']
conn.create_security_group(GroupName='group1', Description='description')
conn.create_security_group(GroupName='group2', Description='description')
start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc)
end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc)
start = iso_8601_datetime_with_milliseconds(start_dt)
end = iso_8601_datetime_with_milliseconds(end_dt)
with assert_raises(ClientError) as ex:
request = conn.request_spot_instances(
SpotPrice="0.5", InstanceCount=1, Type='one-time',
ValidFrom=start, ValidUntil=end, LaunchGroup="the-group",
AvailabilityZoneGroup='my-group',
LaunchSpecification={
"ImageId": 'ami-abcd1234',
"KeyName": "test",
"SecurityGroups": ['group1', 'group2'],
"UserData": "some test data",
"InstanceType": 'm1.small',
"Placement": {
"AvailabilityZone": 'us-east-1c',
},
"KernelId": "test-kernel",
"RamdiskId": "test-ramdisk",
"Monitoring": {
"Enabled": True,
},
"SubnetId": subnet_id,
},
DryRun=True,
)
ex.exception.response['Error']['Code'].should.equal('DryRunOperation')
ex.exception.response['ResponseMetadata'][
'HTTPStatusCode'].should.equal(400)
ex.exception.response['Error']['Message'].should.equal(
'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set')
request = conn.request_spot_instances(
SpotPrice="0.5", InstanceCount=1, Type='one-time',
ValidFrom=start, ValidUntil=end, LaunchGroup="the-group",
AvailabilityZoneGroup='my-group',
LaunchSpecification={
"ImageId": 'ami-abcd1234',
"KeyName": "test",
"SecurityGroups": ['group1', 'group2'],
"UserData": "some test data",
"InstanceType": 'm1.small',
"Placement": {
"AvailabilityZone": 'us-east-1c',
},
"KernelId": "test-kernel",
"RamdiskId": "test-ramdisk",
"Monitoring": {
"Enabled": True,
},
"SubnetId": subnet_id,
},
)
requests = conn.describe_spot_instance_requests()['SpotInstanceRequests']
requests.should.have.length_of(1)
request = requests[0]
request['State'].should.equal("open")
request['SpotPrice'].should.equal("0.5")
request['Type'].should.equal('one-time')
request['ValidFrom'].should.equal(start_dt)
request['ValidUntil'].should.equal(end_dt)
request['LaunchGroup'].should.equal("the-group")
request['AvailabilityZoneGroup'].should.equal('my-group')
launch_spec = request['LaunchSpecification']
security_group_names = [group['GroupName']
for group in launch_spec['SecurityGroups']]
set(security_group_names).should.equal(set(['group1', 'group2']))
launch_spec['ImageId'].should.equal('ami-abcd1234')
launch_spec['KeyName'].should.equal("test")
launch_spec['InstanceType'].should.equal('m1.small')
launch_spec['KernelId'].should.equal("test-kernel")
launch_spec['RamdiskId'].should.equal("test-ramdisk")
launch_spec['SubnetId'].should.equal(subnet_id)
@mock_ec2
def test_request_spot_instances_default_arguments():
"""
Test that moto set the correct default arguments
"""
conn = boto3.client('ec2', 'us-east-1')
request = conn.request_spot_instances(
SpotPrice="0.5",
LaunchSpecification={
"ImageId": 'ami-abcd1234',
}
)
requests = conn.describe_spot_instance_requests()['SpotInstanceRequests']
requests.should.have.length_of(1)
request = requests[0]
request['State'].should.equal("open")
request['SpotPrice'].should.equal("0.5")
request['Type'].should.equal('one-time')
request.shouldnt.contain('ValidFrom')
request.shouldnt.contain('ValidUntil')
request.shouldnt.contain('LaunchGroup')
request.shouldnt.contain('AvailabilityZoneGroup')
launch_spec = request['LaunchSpecification']
security_group_names = [group['GroupName']
for group in launch_spec['SecurityGroups']]
security_group_names.should.equal(["default"])
launch_spec['ImageId'].should.equal('ami-abcd1234')
request.shouldnt.contain('KeyName')
launch_spec['InstanceType'].should.equal('m1.small')
request.shouldnt.contain('KernelId')
request.shouldnt.contain('RamdiskId')
request.shouldnt.contain('SubnetId')
@mock_ec2_deprecated
def test_cancel_spot_instance_request():
conn = boto.connect_ec2()
conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
with assert_raises(EC2ResponseError) as ex:
conn.cancel_spot_instance_requests([requests[0].id], dry_run=True)
ex.exception.error_code.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set')
conn.cancel_spot_instance_requests([requests[0].id])
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(0)
@mock_ec2_deprecated
def test_request_spot_instances_fulfilled():
"""
Test that moto correctly fullfills a spot instance request
"""
conn = boto.ec2.connect_to_region("us-east-1")
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("open")
get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active'
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
request.state.should.equal("active")
@mock_ec2_deprecated
def test_tag_spot_instance_request():
"""
Test that moto correctly tags a spot instance request
"""
conn = boto.connect_ec2()
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request[0].add_tag('tag1', 'value1')
request[0].add_tag('tag2', 'value2')
requests = conn.get_all_spot_instance_requests()
requests.should.have.length_of(1)
request = requests[0]
tag_dict = dict(request.tags)
tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'})
@mock_ec2_deprecated
def test_get_all_spot_instance_requests_filtering():
"""
Test that moto correctly filters spot instance requests
"""
conn = boto.connect_ec2()
request1 = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request2 = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234',
)
request1[0].add_tag('tag1', 'value1')
request1[0].add_tag('tag2', 'value2')
request2[0].add_tag('tag1', 'value1')
request2[0].add_tag('tag2', 'wrong')
requests = conn.get_all_spot_instance_requests(filters={'state': 'active'})
requests.should.have.length_of(0)
requests = conn.get_all_spot_instance_requests(filters={'state': 'open'})
requests.should.have.length_of(3)
requests = conn.get_all_spot_instance_requests(
filters={'tag:tag1': 'value1'})
requests.should.have.length_of(2)
requests = conn.get_all_spot_instance_requests(
filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'})
requests.should.have.length_of(1)
@mock_ec2_deprecated
def test_request_spot_instances_setting_instance_id():
conn = boto.ec2.connect_to_region("us-east-1")
request = conn.request_spot_instances(
price=0.5, image_id='ami-abcd1234')
req = get_model('SpotInstanceRequest', 'us-east-1')[0]
req.state = 'active'
req.instance_id = 'i-12345678'
request = conn.get_all_spot_instance_requests()[0]
assert request.state == 'active'
assert request.instance_id == 'i-12345678'
| apache-2.0 | -1,760,716,982,439,585,300 | 33.149254 | 147 | 0.64292 | false |
LLNL/spack | var/spack/repos/builtin/packages/py-csvkit/package.py | 5 | 1739 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCsvkit(PythonPackage):
"""A library of utilities for working with CSV, the king of tabular file
formats"""
homepage = 'http://csvkit.rtfd.org/'
url = "https://pypi.io/packages/source/c/csvkit/csvkit-0.9.1.tar.gz"
version('1.0.4', sha256='1353a383531bee191820edfb88418c13dfe1cdfa9dd3dc46f431c05cd2a260a0')
version('0.9.1', sha256='92f8b8647becb5cb1dccb3af92a13a4e85702d42ba465ce8447881fb38c9f93a')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='^python@:2.6')
depends_on('[email protected]:', type=('build', 'run'), when='^python@:2.6')
depends_on('[email protected]:', type=('build', 'run'), when='^python@:2.6')
depends_on('[email protected]', type=('build', 'run'), when='@0.9.1')
depends_on('[email protected]', type=('build', 'run'), when='@0.9.1')
depends_on('[email protected]:', type=('build', 'run'), when='@0.9.1')
depends_on('[email protected]:', type=('build', 'run'), when='@0.9.1')
depends_on('[email protected]', type=('build', 'run'), when='@0.9.1')
depends_on('[email protected]:', type=('build', 'run'), when='@1:')
depends_on('[email protected]:', type=('build', 'run'), when='@1:')
depends_on('[email protected]:', type=('build', 'run'), when='@1:')
depends_on('[email protected]:', type=('build', 'run'), when='@1:')
| lgpl-2.1 | -2,445,207,249,556,002,000 | 53.34375 | 95 | 0.608971 | false |
yiakwy/numpy | tools/swig/test/testSuperTensor.py | 100 | 16492 | #! /usr/bin/env python
from __future__ import division
# System imports
from distutils.util import get_platform
from math import sqrt
import os
import sys
import unittest
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0: BadListError = TypeError
else: BadListError = ValueError
import SuperTensor
######################################################################
class SuperTensorTestCase(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNorm(self):
"Test norm function"
print >>sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2))
#Note: cludge to get an answer of the same type as supertensor.
#Answer is simply sqrt(sum(supertensor*supertensor)/16)
answer = np.array([np.sqrt(np.sum(supertensor.astype('d')*supertensor)/16.)], dtype=self.typeCode)[0]
self.assertAlmostEqual(norm(supertensor), answer, 6)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormBadList(self):
"Test norm function with bad list"
print >>sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = [[[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]], [[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]]]
self.assertRaises(BadListError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongDim(self):
"Test norm function with wrong dimensions"
print >>sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongSize(self):
"Test norm function with wrong size"
print >>sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(3*2*2, dtype=self.typeCode).reshape((3, 2, 2))
self.assertRaises(TypeError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormNonContainer(self):
"Test norm function with non-container"
print >>sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
self.assertRaises(TypeError, norm, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMax(self):
"Test max function"
print >>sys.stderr, self.typeStr, "... ",
max = SuperTensor.__dict__[self.typeStr + "Max"]
supertensor = [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]
self.assertEquals(max(supertensor), 8)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxBadList(self):
"Test max function with bad list"
print >>sys.stderr, self.typeStr, "... ",
max = SuperTensor.__dict__[self.typeStr + "Max"]
supertensor = [[[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]], [[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]]]
self.assertRaises(BadListError, max, supertensor)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxNonContainer(self):
"Test max function with non-container"
print >>sys.stderr, self.typeStr, "... ",
max = SuperTensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxWrongDim(self):
"Test max function with wrong dimensions"
print >>sys.stderr, self.typeStr, "... ",
max = SuperTensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, [0, -1, 2, -3])
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMin(self):
"Test min function"
print >>sys.stderr, self.typeStr, "... ",
min = SuperTensor.__dict__[self.typeStr + "Min"]
supertensor = [[[[9, 8], [7, 6]], [[5, 4], [3, 2]]], [[[9, 8], [7, 6]], [[5, 4], [3, 2]]]]
self.assertEquals(min(supertensor), 2)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinBadList(self):
"Test min function with bad list"
print >>sys.stderr, self.typeStr, "... ",
min = SuperTensor.__dict__[self.typeStr + "Min"]
supertensor = [[[["nine", 8], [7, 6]], [["five", 4], [3, 2]]], [[["nine", 8], [7, 6]], [["five", 4], [3, 2]]]]
self.assertRaises(BadListError, min, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinNonContainer(self):
"Test min function with non-container"
print >>sys.stderr, self.typeStr, "... ",
min = SuperTensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, True)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinWrongDim(self):
"Test min function with wrong dimensions"
print >>sys.stderr, self.typeStr, "... ",
min = SuperTensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, [[1, 3], [5, 7]])
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScale(self):
"Test scale function"
print >>sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.arange(3*3*3*3, dtype=self.typeCode).reshape((3, 3, 3, 3))
answer = supertensor.copy()*4
scale(supertensor, 4)
self.assertEquals((supertensor == answer).all(), True)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongType(self):
"Test scale function with wrong type"
print >>sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]],
[[0, 1, 0], [1, 0, 1], [0, 1, 0]],
[[1, 0, 1], [0, 1, 0], [1, 0, 1]]], 'c')
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongDim(self):
"Test scale function with wrong dimensions"
print >>sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1],
[0, 1, 0], [1, 0, 1], [0, 1, 0]], self.typeCode)
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongSize(self):
"Test scale function with wrong size"
print >>sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[[1, 0], [0, 1], [1, 0]],
[[0, 1], [1, 0], [0, 1]],
[[1, 0], [0, 1], [1, 0]]], self.typeCode)
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleNonArray(self):
"Test scale function with non-array"
print >>sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
self.assertRaises(TypeError, scale, True)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloor(self):
"Test floor function"
print >>sys.stderr, self.typeStr, "... ",
supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2))
answer = supertensor.copy()
answer[answer < 4] = 4
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
floor(supertensor, 4)
np.testing.assert_array_equal(supertensor, answer)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongType(self):
"Test floor function with wrong type"
print >>sys.stderr, self.typeStr, "... ",
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
supertensor = np.ones(2*2*2*2, dtype='c').reshape((2, 2, 2, 2))
self.assertRaises(TypeError, floor, supertensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongDim(self):
"Test floor function with wrong type"
print >>sys.stderr, self.typeStr, "... ",
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, floor, supertensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorNonArray(self):
"Test floor function with non-array"
print >>sys.stderr, self.typeStr, "... ",
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
self.assertRaises(TypeError, floor, object)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeil(self):
"Test ceil function"
print >>sys.stderr, self.typeStr, "... ",
supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2))
answer = supertensor.copy()
answer[answer > 5] = 5
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
ceil(supertensor, 5)
np.testing.assert_array_equal(supertensor, answer)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongType(self):
"Test ceil function with wrong type"
print >>sys.stderr, self.typeStr, "... ",
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.ones(2*2*2*2, 'c').reshape((2, 2, 2, 2))
self.assertRaises(TypeError, ceil, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongDim(self):
"Test ceil function with wrong dimensions"
print >>sys.stderr, self.typeStr, "... ",
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.arange(2*2*2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, ceil, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilNonArray(self):
"Test ceil function with non-array"
print >>sys.stderr, self.typeStr, "... ",
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.arange(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2)).tolist()
self.assertRaises(TypeError, ceil, supertensor)
# Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap
def testLUSplit(self):
"Test luSplit function"
print >>sys.stderr, self.typeStr, "... ",
luSplit = SuperTensor.__dict__[self.typeStr + "LUSplit"]
supertensor = np.ones(2*2*2*2, dtype=self.typeCode).reshape((2, 2, 2, 2))
answer_upper = [[[[0, 0], [0, 1]], [[0, 1], [1, 1]]], [[[0, 1], [1, 1]], [[1, 1], [1, 1]]]]
answer_lower = [[[[1, 1], [1, 0]], [[1, 0], [0, 0]]], [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]]
lower, upper = luSplit(supertensor)
self.assertEquals((lower == answer_lower).all(), True)
self.assertEquals((upper == answer_upper).all(), True)
######################################################################
class scharTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
#self.result = int(self.result)
######################################################################
class ucharTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
#self.result = int(self.result)
######################################################################
class shortTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
#self.result = int(self.result)
######################################################################
class ushortTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
#self.result = int(self.result)
######################################################################
class intTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
#self.result = int(self.result)
######################################################################
class uintTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
#self.result = int(self.result)
######################################################################
class longTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
#self.result = int(self.result)
######################################################################
class ulongTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
#self.result = int(self.result)
######################################################################
class longLongTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
#self.result = int(self.result)
######################################################################
class ulongLongTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
#self.result = int(self.result)
######################################################################
class floatTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
class doubleTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite( scharTestCase))
suite.addTest(unittest.makeSuite( ucharTestCase))
suite.addTest(unittest.makeSuite( shortTestCase))
suite.addTest(unittest.makeSuite( ushortTestCase))
suite.addTest(unittest.makeSuite( intTestCase))
suite.addTest(unittest.makeSuite( uintTestCase))
suite.addTest(unittest.makeSuite( longTestCase))
suite.addTest(unittest.makeSuite( ulongTestCase))
suite.addTest(unittest.makeSuite( longLongTestCase))
suite.addTest(unittest.makeSuite(ulongLongTestCase))
suite.addTest(unittest.makeSuite( floatTestCase))
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
print "Testing 4D Functions of Module SuperTensor"
print "NumPy version", np.__version__
print
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| bsd-3-clause | 5,269,493,086,502,433,000 | 41.505155 | 118 | 0.565365 | false |
cala/portaltbc | dep/libmpq/bindings/python/mpq.py | 501 | 10430 | """wrapper for libmpq"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import ctypes
import ctypes.util
import os
libmpq = ctypes.CDLL(ctypes.util.find_library("mpq"))
class Error(Exception):
pass
errors = {
-1: (IOError, "open"),
-2: (IOError, "close"),
-3: (IOError, "seek"),
-4: (IOError, "read"),
-5: (IOError, "write"),
-6: (MemoryError,),
-7: (Error, "file is not an mpq or is corrupted"),
-8: (AssertionError, "not initialized"),
-9: (AssertionError, "buffer size too small"),
-10: (IndexError, "file not in archive"),
-11: (AssertionError, "decrypt"),
-12: (AssertionError, "unpack"),
}
def check_error(result, func, arguments, errors=errors):
try:
error = errors[result]
except KeyError:
return result
else:
raise error[0](*error[1:])
libmpq.libmpq__version.restype = ctypes.c_char_p
libmpq.libmpq__archive_open.errcheck = check_error
libmpq.libmpq__archive_close.errcheck = check_error
libmpq.libmpq__archive_packed_size.errcheck = check_error
libmpq.libmpq__archive_unpacked_size.errcheck = check_error
libmpq.libmpq__archive_offset.errcheck = check_error
libmpq.libmpq__archive_version.errcheck = check_error
libmpq.libmpq__archive_files.errcheck = check_error
libmpq.libmpq__file_packed_size.errcheck = check_error
libmpq.libmpq__file_unpacked_size.errcheck = check_error
libmpq.libmpq__file_offset.errcheck = check_error
libmpq.libmpq__file_blocks.errcheck = check_error
libmpq.libmpq__file_encrypted.errcheck = check_error
libmpq.libmpq__file_compressed.errcheck = check_error
libmpq.libmpq__file_imploded.errcheck = check_error
libmpq.libmpq__file_number.errcheck = check_error
libmpq.libmpq__file_read.errcheck = check_error
libmpq.libmpq__block_open_offset.errcheck = check_error
libmpq.libmpq__block_close_offset.errcheck = check_error
libmpq.libmpq__block_unpacked_size.errcheck = check_error
libmpq.libmpq__block_read.errcheck = check_error
__version__ = libmpq.libmpq__version()
class Reader(object):
def __init__(self, file, libmpq=libmpq):
self._file = file
self._pos = 0
self._buf = []
self._cur_block = 0
libmpq.libmpq__block_open_offset(self._file._archive._mpq,
self._file.number)
def __iter__(self):
return self
def __repr__(self):
return "iter(%r)" % self._file
def seek(self, offset, whence=os.SEEK_SET, os=os):
if whence == os.SEEK_SET:
pass
elif whence == os.SEEK_CUR:
offset += self._pos
elif whence == os.SEEK_END:
offset += self._file.unpacked_size
else:
raise ValueError, "invalid whence"
if offset >= self._pos:
self.read(offset - self._pos)
else:
self._pos = 0
self._buf = []
self._cur_block = 0
self.read(offset)
def tell(self):
return self._pos
def _read_block(self, ctypes=ctypes, libmpq=libmpq):
block_size = ctypes.c_uint64()
libmpq.libmpq__block_unpacked_size(self._file._archive._mpq,
self._file.number, self._cur_block, ctypes.byref(block_size))
block_data = ctypes.create_string_buffer(block_size.value)
libmpq.libmpq__block_read(self._file._archive._mpq,
self._file.number, self._cur_block,
block_data, ctypes.c_uint64(len(block_data)), None)
self._buf.append(block_data.raw)
self._cur_block += 1
def read(self, size=-1):
while size < 0 or sum(map(len, self._buf)) < size:
if self._cur_block == self._file.blocks:
break
self._read_block()
buf = "".join(self._buf)
if size < 0:
ret = buf
self._buf = []
else:
ret = buf[:size]
self._buf = [buf[size:]]
self._pos += len(ret)
return ret
def readline(self, os=os):
line = []
while True:
char = self.read(1)
if char == "":
break
if char not in '\r\n' and line and line[-1] in '\r\n':
self.seek(-1, os.SEEK_CUR)
break
line.append(char)
return ''.join(line)
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, sizehint=-1):
res = []
while sizehint < 0 or sum(map(len, res)) < sizehint:
line = self.readline()
if not line:
break
res.append(line)
return res
xreadlines = __iter__
def __del__(self, libmpq=libmpq):
libmpq.libmpq__block_close_offset(self._file._archive._mpq,
self._file.number)
class File(object):
def __init__(self, archive, number, ctypes=ctypes, libmpq=libmpq):
self._archive = archive
self.number = number
for name, atype in [
("packed_size", ctypes.c_uint64),
("unpacked_size", ctypes.c_uint64),
("offset", ctypes.c_uint64),
("blocks", ctypes.c_uint32),
("encrypted", ctypes.c_uint32),
("compressed", ctypes.c_uint32),
("imploded", ctypes.c_uint32),
]:
data = atype()
func = getattr(libmpq, "libmpq__file_"+name)
func(self._archive._mpq, self.number, ctypes.byref(data))
setattr(self, name, data.value)
def __str__(self, ctypes=ctypes, libmpq=libmpq):
data = ctypes.create_string_buffer(self.unpacked_size)
libmpq.libmpq__file_read(self._archive._mpq, self.number,
data, ctypes.c_uint64(len(data)), None)
return data.raw
def __repr__(self):
return "%r[%i]" % (self._archive, self.number)
def __iter__(self, Reader=Reader):
return Reader(self)
class Archive(object):
def __init__(self, source, ctypes=ctypes, File=File, libmpq=libmpq):
self._source = source
if isinstance(source, File):
assert not source.encrypted
assert not source.compressed
assert not source.imploded
self.filename = source._archive.filename
offset = source._archive.offset + source.offset
else:
self.filename = source
offset = -1
self._mpq = ctypes.c_void_p()
libmpq.libmpq__archive_open(ctypes.byref(self._mpq), self.filename,
ctypes.c_uint64(offset))
self._opened = True
for field_name, field_type in [
("packed_size", ctypes.c_uint64),
("unpacked_size", ctypes.c_uint64),
("offset", ctypes.c_uint64),
("version", ctypes.c_uint32),
("files", ctypes.c_uint32),
]:
func = getattr(libmpq, "libmpq__archive_" + field_name)
data = field_type()
func(self._mpq, ctypes.byref(data))
setattr(self, field_name, data.value)
def __del__(self, libmpq=libmpq):
if getattr(self, "_opened", False):
libmpq.libmpq__archive_close(self._mpq)
def __len__(self):
return self.files
def __contains__(self, item, ctypes=ctypes, libmpq=libmpq):
if isinstance(item, str):
data = ctypes.c_uint32()
try:
libmpq.libmpq__file_number(self._mpq, ctypes.c_char_p(item),
ctypes.byref(data))
except IndexError:
return False
return True
return 0 <= item < self.files
def __getitem__(self, item, ctypes=ctypes, File=File, libmpq=libmpq):
if isinstance(item, str):
data = ctypes.c_int()
libmpq.libmpq__file_number(self._mpq, ctypes.c_char_p(item),
ctypes.byref(data))
item = data.value
else:
if not 0 <= item < self.files:
raise IndexError, "file not in archive"
return File(self, item)
def __repr__(self):
return "mpq.Archive(%r)" % self._source
# Remove clutter - everything except Error and Archive.
del os, check_error, ctypes, errors, File, libmpq, Reader
if __name__ == "__main__":
import sys, random
archive = Archive(sys.argv[1])
print repr(archive)
for k, v in archive.__dict__.iteritems():
#if k[0] == '_': continue
print " " * (4 - 1), k, v
assert '(listfile)' in archive
assert 0 in archive
assert len(archive) == archive.files
files = [x.strip() for x in archive['(listfile)']]
files.extend(xrange(archive.files))
for key in files: #sys.argv[2:] if sys.argv[2:] else xrange(archive.files):
file = archive[key]
print
print " " * (4 - 1), repr(file)
for k, v in file.__dict__.iteritems():
#if k[0] == '_': continue
print " " * (8 - 1), k, v
a = str(file)
b = iter(file).read()
reader = iter(file)
c = []
while True:
l = random.randrange(1, 10)
d = reader.read(l)
if not d: break
assert len(d) <= l
c.append(d)
c = "".join(c)
d = []
reader.seek(0)
for line in reader:
d.append(line)
d = "".join(d)
assert a == b == c == d, map(hash, [a,b,c,d])
assert len(a) == file.unpacked_size
repr(iter(file))
reader.seek(0)
a = reader.readlines()
reader.seek(0)
b = list(reader)
assert a == b
| gpl-2.0 | -2,264,022,977,074,431,500 | 31.391304 | 79 | 0.56232 | false |
lampwins/netbox | netbox/ipam/api/views.py | 1 | 11245 | from django.conf import settings
from django.db.models import Count
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from extras.api.views import CustomFieldModelViewSet
from ipam import filters
from ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF
from utilities.api import FieldChoicesViewSet, ModelViewSet
from utilities.utils import get_subquery
from . import serializers
#
# Field choices
#
class IPAMFieldChoicesViewSet(FieldChoicesViewSet):
fields = (
(Aggregate, ['family']),
(Prefix, ['family', 'status']),
(IPAddress, ['family', 'status', 'role']),
(VLAN, ['status']),
(Service, ['protocol']),
)
#
# VRFs
#
class VRFViewSet(CustomFieldModelViewSet):
queryset = VRF.objects.select_related('tenant').prefetch_related('tags').annotate(
ipaddress_count=get_subquery(IPAddress, 'vrf'),
prefix_count=get_subquery(Prefix, 'vrf')
)
serializer_class = serializers.VRFSerializer
filterset_class = filters.VRFFilter
#
# RIRs
#
class RIRViewSet(ModelViewSet):
queryset = RIR.objects.annotate(
aggregate_count=Count('aggregates')
)
serializer_class = serializers.RIRSerializer
filterset_class = filters.RIRFilter
#
# Aggregates
#
class AggregateViewSet(CustomFieldModelViewSet):
queryset = Aggregate.objects.select_related('rir').prefetch_related('tags')
serializer_class = serializers.AggregateSerializer
filterset_class = filters.AggregateFilter
#
# Roles
#
class RoleViewSet(ModelViewSet):
queryset = Role.objects.annotate(
prefix_count=get_subquery(Prefix, 'role'),
vlan_count=get_subquery(VLAN, 'role')
)
serializer_class = serializers.RoleSerializer
filterset_class = filters.RoleFilter
#
# Prefixes
#
class PrefixViewSet(CustomFieldModelViewSet):
queryset = Prefix.objects.select_related(
'site', 'vrf__tenant', 'tenant', 'vlan', 'role'
).prefetch_related(
'tags'
)
serializer_class = serializers.PrefixSerializer
filterset_class = filters.PrefixFilter
@action(detail=True, url_path='available-prefixes', methods=['get', 'post'])
def available_prefixes(self, request, pk=None):
"""
A convenience method for returning available child prefixes within a parent.
"""
prefix = get_object_or_404(Prefix, pk=pk)
available_prefixes = prefix.get_available_prefixes()
if request.method == 'POST':
# Permissions check
if not request.user.has_perm('ipam.add_prefix'):
raise PermissionDenied()
# Normalize to a list of objects
requested_prefixes = request.data if isinstance(request.data, list) else [request.data]
# Allocate prefixes to the requested objects based on availability within the parent
for i, requested_prefix in enumerate(requested_prefixes):
# Validate requested prefix size
prefix_length = requested_prefix.get('prefix_length')
if prefix_length is None:
return Response(
{
"detail": "Item {}: prefix_length field missing".format(i)
},
status=status.HTTP_400_BAD_REQUEST
)
try:
prefix_length = int(prefix_length)
except ValueError:
return Response(
{
"detail": "Item {}: Invalid prefix length ({})".format(i, prefix_length),
},
status=status.HTTP_400_BAD_REQUEST
)
if prefix.family == 4 and prefix_length > 32:
return Response(
{
"detail": "Item {}: Invalid prefix length ({}) for IPv4".format(i, prefix_length),
},
status=status.HTTP_400_BAD_REQUEST
)
elif prefix.family == 6 and prefix_length > 128:
return Response(
{
"detail": "Item {}: Invalid prefix length ({}) for IPv6".format(i, prefix_length),
},
status=status.HTTP_400_BAD_REQUEST
)
# Find the first available prefix equal to or larger than the requested size
for available_prefix in available_prefixes.iter_cidrs():
if requested_prefix['prefix_length'] >= available_prefix.prefixlen:
allocated_prefix = '{}/{}'.format(available_prefix.network, requested_prefix['prefix_length'])
requested_prefix['prefix'] = allocated_prefix
requested_prefix['vrf'] = prefix.vrf.pk if prefix.vrf else None
break
else:
return Response(
{
"detail": "Insufficient space is available to accommodate the requested prefix size(s)"
},
status=status.HTTP_204_NO_CONTENT
)
# Remove the allocated prefix from the list of available prefixes
available_prefixes.remove(allocated_prefix)
# Initialize the serializer with a list or a single object depending on what was requested
context = {'request': request}
if isinstance(request.data, list):
serializer = serializers.PrefixSerializer(data=requested_prefixes, many=True, context=context)
else:
serializer = serializers.PrefixSerializer(data=requested_prefixes[0], context=context)
# Create the new Prefix(es)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
serializer = serializers.AvailablePrefixSerializer(available_prefixes.iter_cidrs(), many=True, context={
'request': request,
'vrf': prefix.vrf,
})
return Response(serializer.data)
@action(detail=True, url_path='available-ips', methods=['get', 'post'])
def available_ips(self, request, pk=None):
"""
A convenience method for returning available IP addresses within a prefix. By default, the number of IPs
returned will be equivalent to PAGINATE_COUNT. An arbitrary limit (up to MAX_PAGE_SIZE, if set) may be passed,
however results will not be paginated.
"""
prefix = get_object_or_404(Prefix, pk=pk)
# Create the next available IP within the prefix
if request.method == 'POST':
# Permissions check
if not request.user.has_perm('ipam.add_ipaddress'):
raise PermissionDenied()
# Normalize to a list of objects
requested_ips = request.data if isinstance(request.data, list) else [request.data]
# Determine if the requested number of IPs is available
available_ips = prefix.get_available_ips()
if available_ips.size < len(requested_ips):
return Response(
{
"detail": "An insufficient number of IP addresses are available within the prefix {} ({} "
"requested, {} available)".format(prefix, len(requested_ips), len(available_ips))
},
status=status.HTTP_204_NO_CONTENT
)
# Assign addresses from the list of available IPs and copy VRF assignment from the parent prefix
available_ips = iter(available_ips)
prefix_length = prefix.prefix.prefixlen
for requested_ip in requested_ips:
requested_ip['address'] = '{}/{}'.format(next(available_ips), prefix_length)
requested_ip['vrf'] = prefix.vrf.pk if prefix.vrf else None
# Initialize the serializer with a list or a single object depending on what was requested
context = {'request': request}
if isinstance(request.data, list):
serializer = serializers.IPAddressSerializer(data=requested_ips, many=True, context=context)
else:
serializer = serializers.IPAddressSerializer(data=requested_ips[0], context=context)
# Create the new IP address(es)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Determine the maximum number of IPs to return
else:
try:
limit = int(request.query_params.get('limit', settings.PAGINATE_COUNT))
except ValueError:
limit = settings.PAGINATE_COUNT
if settings.MAX_PAGE_SIZE:
limit = min(limit, settings.MAX_PAGE_SIZE)
# Calculate available IPs within the prefix
ip_list = []
for index, ip in enumerate(prefix.get_available_ips(), start=1):
ip_list.append(ip)
if index == limit:
break
serializer = serializers.AvailableIPSerializer(ip_list, many=True, context={
'request': request,
'prefix': prefix.prefix,
'vrf': prefix.vrf,
})
return Response(serializer.data)
#
# IP addresses
#
class IPAddressViewSet(CustomFieldModelViewSet):
queryset = IPAddress.objects.select_related(
'vrf__tenant', 'tenant', 'nat_inside', 'interface__device__device_type', 'interface__virtual_machine'
).prefetch_related(
'nat_outside', 'tags',
)
serializer_class = serializers.IPAddressSerializer
filterset_class = filters.IPAddressFilter
#
# VLAN groups
#
class VLANGroupViewSet(ModelViewSet):
queryset = VLANGroup.objects.select_related('site').annotate(
vlan_count=Count('vlans')
)
serializer_class = serializers.VLANGroupSerializer
filterset_class = filters.VLANGroupFilter
#
# VLANs
#
class VLANViewSet(CustomFieldModelViewSet):
queryset = VLAN.objects.select_related(
'site', 'group', 'tenant', 'role'
).prefetch_related(
'tags'
).annotate(
prefix_count=get_subquery(Prefix, 'role')
)
serializer_class = serializers.VLANSerializer
filterset_class = filters.VLANFilter
#
# Services
#
class ServiceViewSet(ModelViewSet):
queryset = Service.objects.select_related('device').prefetch_related('tags')
serializer_class = serializers.ServiceSerializer
filterset_class = filters.ServiceFilter
| apache-2.0 | -5,679,642,878,907,050,000 | 35.274194 | 118 | 0.596443 | false |
kit-cel/wt | nt2/modulation_pulsformung/Spektren_digitale_Modulation.py | 1 | 2940 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 13 10:31:13 2014
NTII Demo - Quellencodierung - Auswirkungen auf Spektrum des Sendesignals
Systemmodell: Quelle --> QPSK --> Pulsformung
@author: Michael Schwall
"""
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
import scipy.signal as sig
import rrc as rrc
plt.close("all")
###############################################################################
## Systemparameter
###############################################################################
# Anzahl der simulierten Symbole
K = 65536
# Wahrscheinlichkeit fuer ein 1-Bit
P_b_1 = np.array([0.5,0.1])
# Ueberabtastung (Samples pro Symbol)
N = 4
# RRC-Filter, Rolloff-Faktor, Anzahl Filterkoeffizienten
alpha = 0
N_rrc = N*16+1
# FFT Length
N_FFT = 1024
# Pruefe Eingaben
assert (K > 0 and (K & (K - 1)) == 0), 'K muss eine Potenz von 2 sein'
assert (N > 0 and N%2 == 0), 'N muss groesser Null sein und gerade'
assert (alpha >= 0 and alpha <= 1), 'Fuer den Rolloff-Faktor gilt: 0 <= alpha <= 1'
assert (N_rrc > 0 and N_rrc%2 != 0), 'N_rrc muss groesser Null sein und ungerade'
###############################################################################
## Sender
###############################################################################
idx=0
s_tx_rrc = np.zeros((K*N,len(P_b_1)))
while idx < len(P_b_1):
# Bits erzeugen
b = (P_b_1[idx]+np.random.uniform(-1.0,0.0,size=K) >= 0).astype(int)
# BPSK Symbole erzeugen
I = (2*b-1)
print "P(b=1)=%0.2f --> E{I} = %0.2f --> Var{I} = %0.2f" % (P_b_1[idx], I.mean(), I.var())
# Ueberabtasten um Faktor N
s_up = np.zeros(K*N)
s_up[::N] = I;
# Root-Raised-Cosine (RRC) Filter
h_rrc = rrc.get_rrc_ir(N_rrc,N,1.0,alpha)
s_tx_rrc[:,idx] = sig.lfilter(h_rrc,1.0,s_up)
idx += 1
##############################################################################
# Ausgabe
##############################################################################
# Einschwingzeit RRC Filter (Hilfsgroesse)
N_osc = (N_rrc-1)/2
fig1 = plt.figure()
fig1.suptitle("Pulsformung (RRC, alpha=%0.2f)" % alpha, fontsize=14, fontweight='bold')
ax1 = fig1.add_subplot(1,2,1)
ax1.set_title('Impulsantwort RRC')
ax1.stem(np.array(np.arange(-N_osc,N_osc+1)),h_rrc)
ax1.set_xlim(-N_osc,N_osc+1)
ax1.grid(True)
ax1.set_xlabel('k (t/Ts/N)')
ax1.set_ylabel('Amplitude')
ax2 = fig1.add_subplot(1,2,2)
ax2.set_title('PSD QPSK mit RRC-Pulsformung')
idx=0
while idx < len(P_b_1):
Pxx_rrc = 1/N_FFT*(np.abs(np.fft.fftshift(np.fft.fft(np.reshape(s_tx_rrc[:,idx],(-1,N_FFT)),axis=1)))**2).sum(0)
f = np.linspace(-0.5,0.5,len(Pxx_rrc))
ax2.plot(f, 10*np.log10(Pxx_rrc))
idx += 1
start, end = ax2.get_ylim()
ax2.yaxis.set_ticks(np.arange(start, end, 10))
ax2.set_xlim(-0.5,0.5)
ax2.grid(True)
ax2.set_xlabel('n (f/N/Ts)')
ax2.set_ylabel('Amplitude [dB]')
plt.show()
| gpl-2.0 | -8,960,033,279,247,890,000 | 25.486486 | 116 | 0.52585 | false |
314ish/StudbookToolkit | GTT/SPARKS.py | 1 | 1270 | from dbfread import DBF
class SPARKSReader:
"""These objects can be used to read SPARKS formatted files. SPARKS uses
the dBase format for data storage
For more information on SPARKS see
http://www2.isis.org/support/SPARKS/Pages/home.aspx
For more information on dBase see https://en.wikipedia.org/wiki/.dbf
"""
def __init__(self, filename):
"""Initialize an ExcelReader: object.
Args:
filename (str): name of the file to read from
Returns:
SPARKSReader:
"""
self.db = DBF(filename)
def get_header_as_list(self):
"""return header/metadata as a python list.
Returns:
list: a list with each element representing a single element of data
sort of excel formatter
"""
return self.db.field_names
def get_records_as_list(self):
"""read all records from this file into a (2-dimensional) list. Each
element of the list is list representing an entire row of data. In
each internal list each element is a single column.
Returns:
list:
"""
return_me = []
for record in self.db:
return_me.append(record.values())
return return_me
| mit | 5,157,971,634,103,487,000 | 25.458333 | 79 | 0.608661 | false |
pombredanne/1trillioneuros | libs/relevance/__init__.py | 3 | 1449 | #!/usr/bin/env python
# Encoding: utf-8
# -----------------------------------------------------------------------------
# Project : OKF - Spending Stories
# -----------------------------------------------------------------------------
# Author : Edouard Richard <[email protected]>
# -----------------------------------------------------------------------------
# License : GNU General Public License
# -----------------------------------------------------------------------------
# Creation : 21-Aug-2013
# Last mod : 21-Aug-2013
# -----------------------------------------------------------------------------
# This file is part of Spending Stories.
#
# Spending Stories is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Spending Stories is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Spending Stories. If not, see <http://www.gnu.org/licenses/>.
from relevance import Relevance
from processor import Processor
# EOF
| gpl-3.0 | -4,150,838,082,147,942,400 | 44.28125 | 79 | 0.509317 | false |
pauloricardomg/cassandra-dtest | fqltool_test.py | 6 | 6364 | import pytest
import logging
import os
import subprocess
import tempfile
from dtest import Tester
from shutil import rmtree
since = pytest.mark.since
logger = logging.getLogger(__name__)
@since('4.0')
class TestFQLTool(Tester):
"""
Makes sure fqltool replay and fqltool compare work
@jira_ticket CASSANDRA-14690
"""
def test_replay(self):
"""
Generates a full query log, wipes the nodes and replays the
query log, then makes sure that the data is correct.
@jira_ticket CASSANDRA-14690
"""
self.cluster.populate(2).start(wait_for_binary_proto=True)
node1, node2 = self.cluster.nodelist()
with tempfile.TemporaryDirectory() as temp_dir:
tmpdir = tempfile.mkdtemp(dir=temp_dir)
tmpdir2 = tempfile.mkdtemp(dir=temp_dir)
node1.nodetool("enablefullquerylog --path={}".format(tmpdir))
node2.nodetool("enablefullquerylog --path={}".format(tmpdir2))
node1.stress(['write', 'n=1000'])
node1.flush()
node2.flush()
node1.nodetool("disablefullquerylog")
node2.nodetool("disablefullquerylog")
node1.stop(wait_other_notice=True)
node2.stop(wait_other_notice=True)
node1.clear()
node2.clear()
node1.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
# make sure the node is empty:
got_exception = False
try:
node1.stress(['read', 'n=1000'])
except Exception:
got_exception = True
assert got_exception
# replay the log files
self._run_fqltool_replay(node1, [tmpdir, tmpdir2], "127.0.0.1", None, None)
# and verify the data is there
node1.stress(['read', 'n=1000'])
def test_compare(self):
"""
uses fqltool replay to compare two runs of the same query log and makes
sure that the results match
@jira_ticket CASSANDRA-14690
"""
self.cluster.populate(1).start(wait_for_binary_proto=True)
node1 = self.cluster.nodelist()[0]
with tempfile.TemporaryDirectory() as temp_dir:
results1 = tempfile.mkdtemp(dir=temp_dir)
queries1 = tempfile.mkdtemp(dir=temp_dir)
results2 = tempfile.mkdtemp(dir=temp_dir)
queries2 = tempfile.mkdtemp(dir=temp_dir)
fqldir = tempfile.mkdtemp(dir=temp_dir)
node1.stress(['write', 'n=1000'])
node1.flush()
node1.nodetool("enablefullquerylog --path={}".format(fqldir))
node1.stress(['read', 'n=1000'])
node1.nodetool("disablefullquerylog")
self._run_fqltool_replay(node1, [fqldir], "127.0.0.1", queries1, results1)
self._run_fqltool_replay(node1, [fqldir], "127.0.0.1", queries2, results2)
output = self._run_fqltool_compare(node1, queries1, [results1, results2])
assert b"MISMATCH" not in output # running the same reads against the same data
def test_compare_mismatch(self):
"""
generates two fql log files with different data (seq is different when running stress)
then asserts that the replays of each generates a mismatch
@jira_ticket CASSANDRA-14690
"""
self.cluster.populate(1).start(wait_for_binary_proto=True)
node1 = self.cluster.nodelist()[0]
with tempfile.TemporaryDirectory() as temp_dir:
fqldir1 = tempfile.mkdtemp(dir=temp_dir)
fqldir2 = tempfile.mkdtemp(dir=temp_dir)
results1 = tempfile.mkdtemp(dir=temp_dir)
queries1 = tempfile.mkdtemp(dir=temp_dir)
results2 = tempfile.mkdtemp(dir=temp_dir)
queries2 = tempfile.mkdtemp(dir=temp_dir)
node1.nodetool("enablefullquerylog --path={}".format(fqldir1))
node1.stress(['write', 'n=1000'])
node1.flush()
node1.stress(['read', 'n=1000'])
node1.nodetool("disablefullquerylog")
node1.stop()
for d in node1.data_directories():
rmtree(d)
os.mkdir(d)
node1.start(wait_for_binary_proto=True)
node1.nodetool("enablefullquerylog --path={}".format(fqldir2))
node1.stress(['write', 'n=1000', '-pop', 'seq=1000..2000'])
node1.flush()
node1.stress(['read', 'n=1000', '-pop', 'seq=1000..2000'])
node1.nodetool("disablefullquerylog")
node1.stop()
for d in node1.data_directories():
rmtree(d)
os.mkdir(d)
node1.start(wait_for_binary_proto=True)
self._run_fqltool_replay(node1, [fqldir1], "127.0.0.1", queries1, results1)
node1.stop()
for d in node1.data_directories():
rmtree(d)
os.mkdir(d)
node1.start(wait_for_binary_proto=True)
self._run_fqltool_replay(node1, [fqldir2], "127.0.0.1", queries2, results2)
output = self._run_fqltool_compare(node1, queries1, [results1, results2])
assert b"MISMATCH" in output # compares two different stress runs, should mismatch
def _run_fqltool_replay(self, node, logdirs, target, queries, results):
fqltool = self.fqltool(node)
args = [fqltool, "replay", "--target {}".format(target)]
if queries is not None:
args.append("--store-queries {}".format(queries))
if results is not None:
args.append("--results {}".format(results))
args.extend(logdirs)
rc = subprocess.call(args)
assert rc == 0
def _run_fqltool_compare(self, node, queries, results):
fqltool = self.fqltool(node)
args = [fqltool, "compare", "--queries {}".format(queries)]
args.extend([os.path.join(r, "127.0.0.1") for r in results])
logger.info(args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
logger.info(stdout)
return stdout
def fqltool(self, node):
cdir = node.get_install_dir()
fqltool = os.path.join(cdir, 'tools', 'bin', 'fqltool')
return fqltool
| apache-2.0 | -3,819,239,366,997,937,000 | 39.025157 | 95 | 0.588938 | false |
rooi/CouchPotatoServer | libs/pyutil/test/out_of_shape/test_strutil.py | 106 | 1713 | #!/usr/bin/env python
# Copyright (c) 2004-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import unittest
from pyutil.assertutil import _assert
from pyutil import strutil
class Teststrutil(unittest.TestCase):
def test_short_input(self):
self.failUnless(strutil.pop_trailing_newlines("\r\n") == "")
self.failUnless(strutil.pop_trailing_newlines("\r") == "")
self.failUnless(strutil.pop_trailing_newlines("x\r\n") == "x")
self.failUnless(strutil.pop_trailing_newlines("x\r") == "x")
def test_split(self):
_assert(strutil.split_on_newlines("x\r\ny") == ["x", "y",], strutil.split_on_newlines("x\r\ny"))
_assert(strutil.split_on_newlines("x\r\ny\r\n") == ["x", "y", '',], strutil.split_on_newlines("x\r\ny\r\n"))
_assert(strutil.split_on_newlines("x\n\ny\n\n") == ["x", '', "y", '', '',], strutil.split_on_newlines("x\n\ny\n\n"))
def test_commonprefix(self):
_assert(strutil.commonprefix(["foo","foobarooo", "foosplat",]) == 'foo', strutil.commonprefix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonprefix(["foo","afoobarooo", "foosplat",]) == '', strutil.commonprefix(["foo","afoobarooo", "foosplat",]))
def test_commonsuffix(self):
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplat",]) == '', strutil.commonsuffix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplato",]) == 'o', strutil.commonsuffix(["foo","foobarooo", "foosplato",]))
_assert(strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]) == 'foo', strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]))
| gpl-3.0 | 380,719,847,886,831,940 | 56.1 | 150 | 0.638062 | false |
sirex/Misago | misago/core/tests/test_migrationutils.py | 8 | 1103 | from django.apps import apps
from django.test import TestCase
from misago.core import migrationutils
from misago.core.models import CacheVersion
class CacheBusterUtilsTests(TestCase):
def test_cachebuster_register_cache(self):
"""
cachebuster_register_cache registers cache on migration successfully
"""
cache_name = 'eric_licenses'
migrationutils.cachebuster_register_cache(apps, cache_name)
CacheVersion.objects.get(cache=cache_name)
def test_cachebuster_unregister_cache(self):
"""
cachebuster_unregister_cache removes cache on migration successfully
"""
cache_name = 'eric_licenses'
migrationutils.cachebuster_register_cache(apps, cache_name)
CacheVersion.objects.get(cache=cache_name)
migrationutils.cachebuster_unregister_cache(apps, cache_name)
with self.assertRaises(CacheVersion.DoesNotExist):
CacheVersion.objects.get(cache=cache_name)
with self.assertRaises(ValueError):
migrationutils.cachebuster_unregister_cache(apps, cache_name)
| gpl-2.0 | 4,655,710,105,555,687,000 | 34.580645 | 76 | 0.710789 | false |
luuvish/libvio | script/test/model/__init__.py | 1 | 6065 | # -*- coding: utf-8 -*-
'''
================================================================================
This confidential and proprietary software may be used only
as authorized by a licensing agreement from Thumb o'Cat Inc.
In the event of publication, the following notice is applicable:
Copyright (C) 2013 - 2014 Thumb o'Cat
All right reserved.
The entire notice above must be reproduced on all authorized copies.
================================================================================
File : model.py
Author(s) : Luuvish
Version : 2.0
Revision :
2.0 May 12, 2014 Executor classify
================================================================================
'''
__all__ = ('rootpath', 'ModelExecutor')
__version__ = '2.0.0'
from sys import stdout, stderr
from os.path import join, normpath, dirname, exists, splitext, basename
rootpath = normpath(join(dirname(__file__), '../../..'))
class Executor(object):
codecs = ('h264', 'hevc', 'vc1', 'vp8', 'vp9')
actions = ('decode', 'encode', 'digest', 'digest_by_frames', 'compare')
def __init__(self, **kwargs):
self.stdout = kwargs.get('stdout', stdout)
self.stderr = kwargs.get('stderr', stderr)
self.defaults = {'decode':[], 'encode':[], 'digest':[]}
def execute(self):
return ''
def options(self, source, target):
return []
def mkdir(self, target):
from os import makedirs
outdir = normpath(dirname(target))
if not exists(outdir):
makedirs(outdir)
def decode(self, source, target):
from subprocess import call
execute = self.execute()
options = self.defaults['decode'] + self.options(source, target)
self.mkdir(target)
try:
call([execute] + options, stdout=self.stdout, stderr=self.stderr)
except Exception as e:
raise e
if not exists(target):
raise Exception('decode error: %s' % basename(source))
def encode(self, source, target, option):
from subprocess import call
execute = self.execute()
options = self.defaults['encode'] + self.options(source, target)
self.mkdir(target)
try:
call([execute] + options, stdout=self.stdout, stderr=self.stderr)
except Exception as e:
raise e
if not exists(target):
raise Exception('encode error: %s' % basename(source))
def digest(self, source, target=None):
from subprocess import call
from os import remove
outname, outext = splitext(basename(source))
output = '.' + outname + '.yuv.md5' if target is None else target
execute = self.execute()
options = self.defaults['digest'] + self.options(source, output)
if target is not None:
self.mkdir(target)
try:
call([execute] + options, stdout=self.stdout, stderr=self.stderr)
except Exception as e:
raise e
if not exists(output):
raise Exception('digest error: %s' % basename(source))
lines = []
with open(output, 'rt') as f:
lines = [line.rstrip().lower() for line in f]
if target is None:
remove(output)
return lines
def digest_by_frames(self, source, target=None, frames=0):
from hashlib import md5
from os import remove
from os.path import getsize
if frames <= 0:
raise Exception('digest error: %s' % basename(source))
outname, outext = splitext(basename(source))
output = '.' + outname + '.yuv'
try:
self.decode(source, output)
except Exception as e:
raise e
if not exists(output):
raise Exception('digest error: %s' % basename(source))
lines = []
with open(output, 'rb') as f:
size = getsize(output) / frames
while True:
data = f.read(size)
if len(data) <= 0:
break
md5hash = md5()
md5hash.update(data)
lines.append(md5hash.hexdigest().lower())
remove(output)
if target is not None:
with open(target, 'wt') as f:
for line in lines:
f.write('%s\n' % line)
return lines
def compare(self, source, target):
if not exists(target):
raise Exception('digest no exists: %s' % basename(target))
hashs = []
with open(target, 'rt') as f:
hashs = [line.rstrip().lower() for line in f]
nhash = len(hashs)
lines = []
try:
if 'digest' in self.actions:
lines = self.digest(source, None)
else:
lines = self.digest_by_frames(source, None, nhash)
except Exception as e:
raise e
nline = len(lines)
if nline != nhash:
raise Exception('decoded frames is different: %s' % basename(source))
for i in xrange(nline):
line = lines[i]
hash = hashs[i]
if line != hash:
raise Exception('mismatch %d %s: %s != %s' % (i, basename(source), line, hash))
return lines
class ModelExecutor(Executor):
model = ''
def __init__(self, codec, **kwargs):
super(ModelExecutor, self).__init__(**kwargs)
if codec not in self.codecs:
raise Exception('codec must be one of %s' % list(self.codecs))
for action in Executor.actions:
if action not in self.actions:
setattr(self, action, self.noslot)
def noslot(self, *largs, **kwargs):
raise Exception('action must be one of %s' % list(self.actions))
def bind(self, globber):
from .. import Globber
if not isinstance(globber, Globber):
raise Exception('bind require Globber class')
return globber.bind(self)
| mit | -1,093,440,940,457,488,400 | 28.158654 | 95 | 0.535697 | false |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/pylab_examples/toggle_images.py | 12 | 1277 | #!/usr/bin/env python
""" toggle between two images by pressing "t"
The basic idea is to load two images (they can be different shapes) and plot
them to the same axes with hold "on". Then, toggle the visible property of
them using keypress event handling
If you want two images with different shapes to be plotted with the same
extent, they must have the same "extent" property
As usual, we'll define some random images for demo. Real data is much more
exciting!
Note, on the wx backend on some platforms (eg linux), you have to
first click on the figure before the keypress events are activated.
If you know how to fix this, please email us!
"""
from pylab import *
# two images x1 is initially visible, x2 is not
x1 = rand(100, 100)
x2 = rand(150, 175)
# arbitrary extent - both images must have same extent if you want
# them to be resampled into the same axes space
extent = (0,1,0,1)
im1 = imshow(x1, extent=extent)
im2 = imshow(x2, extent=extent, hold=True)
im2.set_visible(False)
def toggle_images(event):
'toggle the visible state of the two images'
if event.key != 't': return
b1 = im1.get_visible()
b2 = im2.get_visible()
im1.set_visible(not b1)
im2.set_visible(not b2)
draw()
connect('key_press_event', toggle_images)
show()
| mit | -2,291,458,851,836,220,000 | 28.022727 | 76 | 0.722005 | false |
marratj/ansible | lib/ansible/plugins/terminal/dellos6.py | 18 | 2789 | # 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error: (?:(?!\bdoes not exist\b)(?!\balready exists\b)(?!\bHost not found\b)(?!\bnot active\b).)*$"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
]
def on_authorize(self, passwd=None):
if self._get_prompt().endswith('#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
# in dellos6 the terminal settings are accepted after the privilege mode
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_deauthorize(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| gpl-3.0 | -1,420,981,539,664,300,000 | 36.689189 | 126 | 0.628899 | false |
2014c2g12/c2g12 | c2wp/wsgi/static/Brython2.1.0-20140419-113919/Lib/re.py | 54 | 11411 | #
# Copyright (c) 2014 Olemis Lang. All rights reserved.
#
# Choose either Javascript (faster) or Python engine based on regex complexity
# with a noticeable preference for the former.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode digits.
\D Matches any non-digit character; equivalent to [^\d].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode whitespace characters.
\S Matches any non-whitespace character; equivalent to [^\s].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]
in bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the
range of Unicode alphanumeric characters (letters plus digits
plus underscore).
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
A ASCII For string patterns, make \w, \W, \b, \B, \d, \D
match the corresponding ASCII character categories
(rather than the whole Unicode categories, which is the
default).
For bytes patterns, this flag is the only available
behaviour and needn't be specified.
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE For compatibility only. Ignored for string patterns (it
is the default), and forbidden for bytes patterns.
This module also defines an exception 'error'.
"""
import sys
import _jsre
_pymdl = [None]
if not _jsre._is_valid():
from pyre import *
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "A", "I", "L", "M", "S", "X",
"U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE",
# TODO: brython - same exception class in sre_constants and _jsre
#"error"
]
__version__ = "2.2.1"
# flags
A = ASCII = _jsre.A # assume ascii "locale"
I = IGNORECASE = _jsre.I # ignore case
L = LOCALE = _jsre.L # assume current 8-bit locale
U = UNICODE = _jsre.U # assume unicode "locale"
M = MULTILINE = _jsre.M # make anchors look for newline
S = DOTALL = _jsre.S # make dot match newline
X = VERBOSE = _jsre.X # ignore whitespace and comments
# sre exception
# TODO: brython - same exception class in sre_constants and _jsre
#error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def _pyre():
mdl = _pymdl[0]
if mdl is None:
import pyre
_pymdl[0] = pyre
return pyre
else:
return mdl
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
if _jsre._is_valid(pattern):
return _jsre.match(pattern, string, flags)
else:
return _pyre().match(pattern, string, flags)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
if _jsre._is_valid(pattern):
return _jsre.search(pattern, string, flags)
else:
return _pyre().search(pattern, string, flags)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
if _jsre._is_valid(pattern):
return _jsre.sub(pattern, repl, string, count, flags)
else:
return _pyre().sub(pattern, repl, string, count, flags)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
if _jsre._is_valid(pattern):
return _jsre.subn(pattern, repl, string, count, flags)
else:
return _pyre().subn(pattern, repl, string, count, flags)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings. If
capturing parentheses are used in pattern, then the text of all
groups in the pattern are also returned as part of the resulting
list. If maxsplit is nonzero, at most maxsplit splits occur,
and the remainder of the string is returned as the final element
of the list."""
if _jsre._is_valid(pattern):
return _jsre.split(pattern, string, maxsplit, flags)
else:
return _pyre().split(pattern, string, maxsplit, flags)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more capturing groups are present in the pattern, return
a list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
if _jsre._is_valid(pattern):
return _jsre.findall(pattern, string, flags)
else:
return _pyre().findall(pattern, string, flags)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _pyre().finditer(pattern, string, flags)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
if _jsre._is_valid(pattern):
return _jsre.compile(pattern, flags)
else:
return _pyre().compile(pattern, flags)
def purge():
"Clear the regular expression caches"
if _pymdl[0] is not None:
return _pymdl[0].purge()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _pyre().template(pattern, flags)
def escape(pattern):
"""
Escape all the characters in pattern except ASCII letters, numbers and '_'.
"""
# FIXME: Do not load _re module
return _pyre().escape(pattern)
| gpl-2.0 | -6,828,986,926,551,470,000 | 43.74902 | 79 | 0.656472 | false |
LingxiaoShawn/Shawn-fusion2017 | lib/werkzeug/contrib/fixers.py | 464 | 9949 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-1 * self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
| apache-2.0 | 533,639,904,456,775,500 | 39.77459 | 82 | 0.625792 | false |
nvoron23/socialite | jython/Lib/test/test_closuregen.py | 24 | 1235 | """Jython bug with cell variables and yield"""
from __future__ import generators
def single_closure_single_value():
value = 0
a_closure = lambda : value
yield a_closure()
yield a_closure()
def single_closure_multiple_values():
value = 0
a_closure = lambda : value
yield a_closure()
value = 1
yield a_closure()
def multiple_closures_single_value():
value = 0
a_closure = lambda : value
yield a_closure()
a_closure = lambda : value
yield a_closure()
def multiple_closures_multiple_values():
value = 0
a_closure = lambda : value
yield a_closure()
value = 1
a_closure = lambda : value
yield a_closure()
tests={}
for name in dir():
if 'closure' in name:
test = eval(name)
if name.endswith('single_value'):
expected = [0,0]
else:
expected = [0,1]
tests[test] = expected
def test_main(verbose=None):
from test.test_support import verify
import sys
for func in tests:
expected = tests[func]
result = list(func())
verify(result == expected, "%s: expected %s, got %s" % (
func.__name__, expected, result))
if __name__ == '__main__': test_main(1)
| apache-2.0 | -597,127,672,516,855,300 | 23.215686 | 64 | 0.587854 | false |
variablehair/Eggplantato | cogs/productivity.py | 1 | 14184 | import discord
from discord.ext import commands
import sqlite3
from cogs.utils import errors
from ast import literal_eval
import re
import datetime
import aiohttp
from urllib.parse import parse_qs
from lxml import etree
# Google functions are copied from RoboDanny, accessed on April 6, 2017. https://github.com/Rapptz/RoboDanny/blob/master/cogs/buttons.py#L78
def DayParser(str_arg):
"""Parses the day of the week from a string."""
compiled = re.compile(r"^(?:(?P<monday>(?:m(?:on)?))|(?P<tuesday>tu(?:es?)?)|(?P<wednesday>w(?:ed)?)|"
"(?P<thursday>th(?:u(?:rs)?)?)|(?P<friday>f(?:ri)?)|(?P<saturday>sat?)|(?P<sunday>sun?))$|^(?P<dayfull>"
"(?:(?:mon)|(?:tues)|(?:wednes)|(?:thurs)|(?:fri)|(?:satur)|(?:sun))day)$")
match = compiled.match(str_arg.lower())
if match is None or not match.group(0):
raise commands.BadArgument('Failed to parse day.')
elif match.group("dayfull"):
return match.group("dayfull")
else:
return [k for k, v in match.groupdict().items() if v][0]
def TimeParser(str_arg):
"""Parses an amount of seconds from a string of *w*d*h*m*s. Max time of 1 year.
Does not require knowledge of the user's current time. Returns timedelta object."""
compiled = re.compile(r"^(?P<weeks>[0-9]{1,2}[Ww])?(?P<days>[0-9]{1,3}[Dd])?"
"(?P<hours>[0-9]{1,4}[Hh])?(?P<minutes>[0-9]{1,6}[Mm])?(?P<seconds>[0-9]{1,8}[Ss])?$")
match = compiled.match(str_arg)
if match is None or not match.group(0):
raise commands.BadArgument('Failed to parse time.')
int_seconds = 0
if match.group("weeks") is not None:
int_seconds += int(match.group("weeks")) * 604800
if match.group("days") is not None:
int_seconds += int(match.group("days")) * 86400
if match.group("hours") is not None:
int_seconds += int(match.group("hours")) * 3600
if match.group("minutes") is not None:
int_seconds += int(match.group("minutes")) * 60
if match.group("seconds") is not None:
int_seconds += int(match.group("seconds"))
if int_seconds < 31540000: #1 year
return datetime.timedelta(seconds=int_seconds)
else:
raise commands.BadArgument('Time specified cannot exceed 1 year.')
def AmPmParser(str_arg):
"""Parses a 4 digit time literal from user input with optional AM or PM"""
compiled = re.compile(r"(?P<timelit>^[0-9]{1,4})(?P<am>[Aa][Mm]?$)?(?P<pm>[Pp][Mm]?$)?")
match = compiled.match(str_arg)
if match is None or not match.group(0):
raise commands.BadArgument('Failed to parse time.')
int_timelit = int(match.group("timelit"))
# check if <=12 with ampm (e.g. 12am), if >24 when it's a 2 char input, or >2399
if int_timelit > 2399 or (len(match.group("timelit")) <= 2 and int_timelit > 24) \
or (int_timelit > 12 and (match.group("pm") or match.group("am"))):
raise commands.BadArgument('Time argument too long.')
class Productivity():
"""Tools to make your life easier (?)"""
def __init__(self, bot):
self.bot = bot
self.todoconn = sqlite3.connect('data/todo.db')
try:
c = self.todoconn.cursor()
c.execute("SELECT user FROM lists WHERE user='debug'")
except sqlite3.OperationalError as e:
print(str(e))
raise errors.DatabaseError("Error loading todo database; did you run initdb?")
finally:
self.todoconn.close()
@commands.command(pass_context=True, aliases=[""])
async def settime(self, ctx, str_usertime):
"""Set your time zone. You can tell me your UTC offset (`-8` or `+350`) **or** the most recent multiple of 30 \
minutes (e.g. if it is currently `1:54PM`, you use `settime 1330` or `settime 130pm`)"""
pass #todo: this
@commands.command(pass_context=True, aliases=["remind", "timer"])
async def remindme(self, ctx, str_left : str, str_right: str=''):
if str_right:
# do stuff
await ctx.send("2 args")
else:
try:
await ctx.send(DayParser(str_left))
except commands.BadArgument:
await ctx.send("Your day is not formatted correctly!"
" You can use shorthand `(m or TU or wEd)` or the full name of the day `(sunday)`")
@commands.group(pass_context=True, invoke_without_context=True)
async def todo(self, ctx):
"""A simple todo list. `todo add [task]` to add something, `todo` to look at your list. `todo help` for more commands"""
if ctx.invoked_subcommand is None:
await ctx.invoke(self._todoget)
@todo.command(name="get", alias=["list"])
async def _todoget(self, ctx):
"""Prints your todo list"""
self.todoconn = sqlite3.connect('data/todo.db')
c = self.todoconn.cursor()
c.execute("SELECT tasks FROM lists WHERE user=?", (str(ctx.message.author.id),))
tup_data = c.fetchone()
if tup_data is None:
await ctx.send("You don't have anything in your todo list! Add something with `todo add [task]`.")
else:
list_user_tasks = literal_eval(tup_data[0])
str_ret = ":eggplant: Your todo list:\n"
for i in range(len(list_user_tasks)):
str_ret = "".join([str_ret, str(i+1), ": ", list_user_tasks[i], "\n"])
await ctx.send(str_ret)
self.todoconn.close()
@todo.command(pass_context=True, name="add")
async def _todoadd(self, ctx, *args):
"""Adds a task to your list"""
if args == []:
await ctx.send("You must specify a task to add!")
return
else:
self.todoconn = sqlite3.connect('data/todo.db')
c = self.todoconn.cursor()
c.execute("SELECT tasks FROM lists WHERE user=?", (str(ctx.message.author.id),))
tup_data = c.fetchone()
str_task = " ".join(args)
if tup_data is None:
c.execute("INSERT INTO lists VALUES (?,?)", (str(ctx.message.author.id), "[\'" + str_task + "\']"))
else:
list_user_tasks = literal_eval(tup_data[0])
list_user_tasks.append(str_task)
c.execute("UPDATE lists SET tasks=? WHERE user=?", (str(list_user_tasks), str(ctx.message.author.id)))
self.todoconn.commit()
self.todoconn.close()
await ctx.send("{} added successfully!".format(str_task))
@todo.command(name="remove", aliases=["delete", "rm", "del"])
async def _todoremove(self, ctx, *args):
"""Removes a task from your list"""
if len(args) == 0:
await ctx.send("You must specify the number of a task to remove.")
return
self.todoconn = sqlite3.connect('data/todo.db')
c = self.todoconn.cursor()
c.execute("SELECT tasks FROM lists WHERE user=?", (str(ctx.message.author.id),))
tup_data = c.fetchone()
if tup_data is None:
await ctx.send("Your todo list is empty.")
else:
list_user_tasks = literal_eval(tup_data[0])
try:
int_taskid = int(args[0])
if int_taskid <= 0 or int_taskid > len(list_user_tasks):
await ctx.send("Please enter a valid index!")
else:
await ctx.send("Task `{}` removed successfully.".format(list_user_tasks.pop(int_taskid - 1)))
c.execute("UPDATE lists SET tasks=? WHERE user=?", (str(list_user_tasks), str(ctx.message.author.id)))
self.todoconn.commit()
except ValueError:
str_task = " ".join(args)
try:
list_user_tasks.remove(str_task)
await ctx.send("Task `{}` removed successfully.".format(str_task))
c.execute("UPDATE lists SET tasks=? WHERE user=?", (str(list_user_tasks), str(ctx.message.author.id)))
self.todoconn.commit()
except ValueError:
await ctx.send("Task `{}` not found.".format(str_task))
self.todoconn.close()
def parse_google_card(self, node):
if node is None:
return None
e = discord.Embed(colour=0x738bd7)
# check if it's a calculator card:
calculator = node.find(".//table/tr/td/span[@class='nobr']/h2[@class='r']")
if calculator is not None:
e.title = 'Calculator'
e.description = ''.join(calculator.itertext())
return e
parent = node.getparent()
# check for unit conversion card
unit = parent.find(".//ol//div[@class='_Tsb']")
if unit is not None:
e.title = 'Unit Conversion'
e.description = ''.join(''.join(n.itertext()) for n in unit)
return e
# check for currency conversion card
currency = parent.find(".//ol/table[@class='std _tLi']/tr/td/h2")
if currency is not None:
e.title = 'Currency Conversion'
e.description = ''.join(currency.itertext())
return e
# check for release date card
release = parent.find(".//div[@id='_vBb']")
if release is not None:
try:
e.description = ''.join(release[0].itertext()).strip()
e.title = ''.join(release[1].itertext()).strip()
return e
except:
return None
# check for definition card
words = parent.find(".//ol/div[@class='g']/div/h3[@class='r']/div")
if words is not None:
try:
definition_info = words.getparent().getparent()[1] # yikes
except:
pass
else:
try:
# inside is a <div> with two <span>
# the first is the actual word, the second is the pronunciation
e.title = words[0].text
e.description = words[1].text
except:
return None
# inside the table there's the actual definitions
# they're separated as noun/verb/adjective with a list
# of definitions
for row in definition_info:
if len(row.attrib) != 0:
# definitions are empty <tr>
# if there is something in the <tr> then we're done
# with the definitions
break
try:
data = row[0]
lexical_category = data[0].text
body = []
for index, definition in enumerate(data[1], 1):
body.append('%s. %s' % (index, definition.text))
e.add_field(name=lexical_category, value='\n'.join(body), inline=False)
except:
continue
return e
# check for "time in" card
time_in = parent.find(".//ol//div[@class='_Tsb _HOb _Qeb']")
if time_in is not None:
try:
time_place = ''.join(time_in.find("span[@class='_HOb _Qeb']").itertext()).strip()
the_time = ''.join(time_in.find("div[@class='_rkc _Peb']").itertext()).strip()
the_date = ''.join(time_in.find("div[@class='_HOb _Qeb']").itertext()).strip()
except:
return None
else:
e.title = time_place
e.description = '%s\n%s' % (the_time, the_date)
return e
# check for weather card
# this one is the most complicated of the group lol
# everything is under a <div class="e"> which has a
# <h3>{{ weather for place }}</h3>
# string, the rest is fucking table fuckery.
weather = parent.find(".//ol//div[@class='e']")
if weather is None:
return None
location = weather.find('h3')
if location is None:
return None
e.title = ''.join(location.itertext())
table = weather.find('table')
if table is None:
return None
# This is gonna be a bit fucky.
# So the part we care about is on the second data
# column of the first tr
try:
tr = table[0]
img = tr[0].find('img')
category = img.get('alt')
image = 'https:' + img.get('src')
temperature = tr[1].xpath("./span[@class='wob_t']//text()")[0]
except:
return None # RIP
else:
e.set_thumbnail(url=image)
e.description = '*%s*' % category
e.add_field(name='Temperature', value=temperature)
# On the 4th column it tells us our wind speeds
try:
wind = ''.join(table[3].itertext()).replace('Wind: ', '')
except:
return None
else:
e.add_field(name='Wind', value=wind)
# On the 5th column it tells us our humidity
try:
humidity = ''.join(table[4][0].itertext()).replace('Humidity: ', '')
except:
return None
else:
e.add_field(name='Humidity', value=humidity)
return e
async def get_google_entries(self, query):
params = {
'q': query,
'safe': 'on'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64)'
}
# list of URLs
entries = []
# the result of a google card, an embed
card = None
async with aiohttp.get('https://www.google.com/search', params=params, headers=headers) as resp:
if resp.status != 200:
raise RuntimeError('Google somehow failed to respond.')
root = etree.fromstring(await resp.text(), etree.HTMLParser())
# with open('google.html', 'w', encoding='utf-8') as f:
# f.write(etree.tostring(root, pretty_print=True).decode('utf-8'))
"""
Tree looks like this.. sort of..
<div class="g">
...
<h3>
<a href="/url?q=<url>" ...>title</a>
</h3>
...
<span class="st">
<span class="f">date here</span>
summary here, can contain <em>tag</em>
</span>
</div>
"""
card_node = root.find(".//div[@id='topstuff']")
card = self.parse_google_card(card_node)
search_nodes = root.findall(".//div[@class='g']")
for node in search_nodes:
url_node = node.find('.//h3/a')
if url_node is None:
continue
url = url_node.attrib['href']
if not url.startswith('/url?'):
continue
url = parse_qs(url[5:])['q'][0] # get the URL from ?q query string
# if I ever cared about the description, this is how
entries.append(url)
# short = node.find(".//span[@class='st']")
# if short is None:
# entries.append((url, ''))
# else:
# text = ''.join(short.itertext())
# entries.append((url, text.replace('...', '')))
return card, entries
@commands.command(aliases=['google'], pass_context=True)
async def g(self, ctx, *, query):
"""Searches google and gives you top result."""
try:
card, entries = await self.get_google_entries(query)
except RuntimeError as e:
await ctx.send(str(e))
else:
if card:
value = '\n'.join(entries[:3])
if value:
card.add_field(name='Search Results', value=value, inline=False)
return await ctx.send(embed=card)
if len(entries) == 0:
return await ctx.send('No results found... sorry.')
next_two = entries[1:3]
first_entry = entries[0]
if first_entry[-1] == ')':
first_entry = first_entry[:-1] + '%29'
if next_two:
formatted = '\n'.join(map(lambda x: '<%s>' % x, next_two))
msg = '{}\n\n**See also:**\n{}'.format(first_entry, formatted)
else:
msg = first_entry
await ctx.send(msg)
def setup(bot):
bot.add_cog(Productivity(bot)) | mit | 1,983,462,240,837,057,800 | 31.695962 | 140 | 0.616822 | false |
bob-white/UnityIronPythonConsole | Assets/IronPythonConsole/Plugins/Lib/zipfile.py | 81 | 54020 | """
Read and write ZIP files.
"""
import struct, os, time, sys, shutil
import binascii, cStringIO, stat
import io
import re
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
class BadZipfile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile # The exception raised by this module
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = "<4s4H2LH"
stringEndArchive = "PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = "PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = "PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = "PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = "PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append("")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
comment = data[start+sizeEndCentDir:]
# check that comment length is correct
if endrec[_ECD_COMMENT_SIZE] == len(comment):
# Append the archive comment and start offset
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
if isinstance(self.filename, unicode):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
else:
return self.filename, self.flag_bits
def _decodeFilename(self):
if self.flag_bits & 0x800:
return self.filename.decode('utf-8')
else:
return self.filename
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError, "Corrupt extra field %s"%(ln,)
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFFL:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffffL:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
def __call__(self, c):
"""Decrypt a single character."""
c = ord(c)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
c = chr(c)
self._UpdateKeys(c)
return c
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(r'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None):
self._fileobj = fileobj
self._decrypter = decrypter
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
self._compress_left = zipinfo.compress_size
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
self._unconsumed = ''
self._readbuffer = ''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find('\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = ''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == '':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + '\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = ''
if n is None:
n = -1
while True:
if n < 0:
data = self.read1(n)
elif n > len(buf):
data = self.read1(n - len(buf))
else:
return buf
if len(data) == 0:
return buf
buf += data
def _update_crc(self, newdata, eof):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if eof and self._running_crc != self._expected_crc:
raise BadZipfile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = ''.join(map(self._decrypter, data))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError, 'Mode must be "r", "w" or "a"'
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipfile("File is not a zip file")
if not endrec:
raise BadZipfile, "File is not a zip file"
if self.debug > 1:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile, "Bad magic number for central directory"
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print "total", total
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipfile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
self.pwd = pwd
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
if not self.fp:
raise RuntimeError, \
"Attempt to read ZIP archive that was already closed"
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipfile, "Bad magic number for file header"
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
raise BadZipfile, \
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError, "File %s is encrypted, " \
"password required for extraction" % name
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
bytes = zef_file.read(12)
h = map(zd, bytes[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if ord(h[11]) != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = file(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
"""Write a file into the archive. The contents is the string
'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
self.fp.write(bytes)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffffL
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print >>sys.stderr, (structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print "Compiling", file_py
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError,err:
print err.msg
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print USAGE
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print "Done testing"
elif args[0] == '-e':
if len(args) != 3:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print USAGE
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
| mpl-2.0 | -4,242,678,457,800,287,000 | 36.592206 | 103 | 0.555091 | false |
gioman/QGIS | python/plugins/processing/algs/saga/SagaAlgorithmProvider.py | 2 | 5490 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SagaAlgorithmProvider.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsProcessingProvider,
QgsProcessingUtils,
QgsMessageLog)
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from processing.tools.system import isWindows, isMac
from .SagaAlgorithm import SagaAlgorithm
from .SplitRGBBands import SplitRGBBands
from . import SagaUtils
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class SagaAlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
self.algs = []
def load(self):
ProcessingConfig.settingIcons[self.name()] = self.icon()
ProcessingConfig.addSetting(Setting("SAGA", 'ACTIVATE_SAGA',
self.tr('Activate'), True))
ProcessingConfig.addSetting(Setting("SAGA",
SagaUtils.SAGA_IMPORT_EXPORT_OPTIMIZATION,
self.tr('Enable SAGA Import/Export optimizations'), False))
ProcessingConfig.addSetting(Setting("SAGA",
SagaUtils.SAGA_LOG_COMMANDS,
self.tr('Log execution commands'), True))
ProcessingConfig.addSetting(Setting("SAGA",
SagaUtils.SAGA_LOG_CONSOLE,
self.tr('Log console output'), True))
ProcessingConfig.readSettings()
self.refreshAlgorithms()
return True
def unload(self):
ProcessingConfig.removeSetting('ACTIVATE_SAGA')
ProcessingConfig.removeSetting(SagaUtils.SAGA_LOG_CONSOLE)
ProcessingConfig.removeSetting(SagaUtils.SAGA_LOG_COMMANDS)
def isActive(self):
return ProcessingConfig.getSetting('ACTIVATE_SAGA')
def setActive(self, active):
ProcessingConfig.setSettingValue('ACTIVATE_SAGA', active)
def loadAlgorithms(self):
version = SagaUtils.getInstalledVersion(True)
if version is None:
QgsMessageLog.logMessage(self.tr('Problem with SAGA installation: SAGA was not found or is not correctly installed'),
self.tr('Processing'), QgsMessageLog.CRITICAL)
return
if not version.startswith('2.3.'):
QgsMessageLog.logMessage(self.tr('Problem with SAGA installation: unsupported SAGA version found.'),
self.tr('Processing'),
QgsMessageLog.CRITICAL)
return
self.algs = []
folder = SagaUtils.sagaDescriptionPath()
for descriptionFile in os.listdir(folder):
if descriptionFile.endswith('txt'):
try:
alg = SagaAlgorithm(os.path.join(folder, descriptionFile))
if alg.name().strip() != '':
self.algs.append(alg)
else:
QgsMessageLog.logMessage(self.tr('Could not open SAGA algorithm: {}'.format(descriptionFile)),
self.tr('Processing'), QgsMessageLog.CRITICAL)
except Exception as e:
QgsMessageLog.logMessage(self.tr('Could not open SAGA algorithm: {}\n{}'.format(descriptionFile, str(e))),
self.tr('Processing'), QgsMessageLog.CRITICAL)
self.algs.append(SplitRGBBands())
for a in self.algs:
self.addAlgorithm(a)
def name(self):
version = SagaUtils.getInstalledVersion()
return 'SAGA ({})'.format(version) if version is not None else 'SAGA'
def id(self):
return 'saga'
def supportedOutputVectorLayerExtensions(self):
return ['shp']
def supportedOutputRasterLayerExtensions(self):
return ['sdat']
def getSupportedOutputTableLayerExtensions(self):
return ['dbf']
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'saga.png'))
def tr(self, string, context=''):
if context == '':
context = 'SagaAlgorithmProvider'
return QCoreApplication.translate(context, string)
| gpl-2.0 | -1,666,347,930,441,016,800 | 39.970149 | 129 | 0.543169 | false |
sidhujag/devcoin093 | share/qt/extract_strings_qt.py | 321 | 1873 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| mit | -1,139,222,108,549,046,300 | 23.012821 | 80 | 0.578217 | false |
asimshankar/tensorflow | tensorflow/contrib/cluster_resolver/cluster_resolver_initialization_test.py | 14 | 2135 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests to ensure ClusterResolvers are usable via the old contrib path."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver import SimpleClusterResolver
from tensorflow.contrib.cluster_resolver.python.training import cluster_resolver
from tensorflow.contrib.cluster_resolver.python.training import UnionClusterResolver
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class ClusterResolverInitializationTest(test.TestCase):
def testCreateSimpleClusterResolverFromLib(self):
base_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
cluster_resolver.SimpleClusterResolver(base_cluster_spec)
def testCreateSimpleClusterResolver(self):
base_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
SimpleClusterResolver(base_cluster_spec)
def testCreateUnionClusterResolver(self):
base_cluster_spec = server_lib.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
simple_cr = SimpleClusterResolver(base_cluster_spec)
UnionClusterResolver(simple_cr)
if __name__ == "__main__":
test.main()
| apache-2.0 | -513,559,722,014,824,640 | 39.283019 | 84 | 0.709602 | false |
rlefevre1/hpp-rbprm-corba | script/scenarios/demos/siggraph_asia/plan_execute.py | 10 | 2981 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.gepetto import Viewer
import time
from hpp.corbaserver.rbprm.tools.cwc_trajectory_helper import step, clean,stats, saveAllData, play_traj
from hpp.gepetto import PathPlayer
def act(i, numOptim = 0, use_window = 0, friction = 0.5, optim_effectors = True, verbose = False, draw = False):
return step(fullBody, configs, i, numOptim, pp, limbsCOMConstraints, 0.4, optim_effectors = optim_effectors, time_scale = 20., useCOMConstraints = True, use_window = use_window,
verbose = verbose, draw = draw)
def play(frame_rate = 1./24.):
play_traj(fullBody,pp,frame_rate)
def saveAll(name):
saveAllData(fullBody, r, name)
def initConfig():
r.client.gui.setVisibility("hrp2_14", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
r(q_init)
def endConfig():
r.client.gui.setVisibility("hrp2_14", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
r(q_goal)
def rootPath():
tp.cl.problem.selectProblem("rbprm_path")
r.client.gui.setVisibility("hrp2_14", "OFF")
tp.r.client.gui.setVisibility("toto", "OFF")
r.client.gui.setVisibility("hyq", "OFF")
r.client.gui.setVisibility("hrp2_trunk_flexible", "ON")
tp.pp(0)
r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
r.client.gui.setVisibility("hyq", "ON")
tp.cl.problem.selectProblem("default")
def genPlan(stepsize=0.1, rob = 2, filt = True):
r.client.gui.setVisibility("hrp2_14", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
global configs
start = time.clock()
configs = fullBody.interpolate(stepsize, 1, rob, filt)
end = time.clock()
print "Contact plan generated in " + str(end-start) + "seconds"
def contactPlan(step = 0.5, rob = 2):
r.client.gui.setVisibility("hyq", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hyq_trunk_large", "OFF")
for i in range(0,len(configs)):
r(configs[i]);
time.sleep(step)
def a():
print "initial configuration"
initConfig()
def b():
print "end configuration"
endConfig()
def c():
print "displaying root path"
rootPath()
def d(step=0.1, filt = True):
print "computing contact plan"
genPlan(step, filt = filt)
return configs
def e(step = 0.5, rob = 2, qs=None):
if(qs != None):
global configs
configs = qs[:]
print "displaying contact plan"
contactPlan(step, rob)
r = None
tp = None
pp = None
fullBody = None
def init_plan_execute(robot, viewer, pathplanner, pplayer):
global r
global tp
global pp
global fullBody
r = viewer
tp = pathplanner
pp = pplayer
fullBody = robot
| lgpl-3.0 | -7,302,330,887,147,135,000 | 27.122642 | 178 | 0.70312 | false |
fermuch/PokemonGo-Bot | pokemongo_bot/metrics.py | 10 | 4952 | import time
from datetime import timedelta
class Metrics(object):
def __init__(self, bot):
self.bot = bot
self.start_time = time.time()
self.dust = {'start': None, 'latest': None}
self.xp = {'start': None, 'latest': None}
self.distance = {'start': None, 'latest': None}
self.encounters = {'start': None, 'latest': None}
self.throws = {'start': None, 'latest': None}
self.captures = {'start': None, 'latest': None}
self.visits = {'start': None, 'latest': None}
self.unique_mons = {'start': None, 'latest': None}
self.evolutions = {'start': None, 'latest': None}
self.releases = 0
self.highest_cp = {'cp': 0, 'desc': ''}
self.most_perfect = {'potential': 0, 'desc': ''}
def runtime(self):
return timedelta(seconds=round(time.time() - self.start_time))
def xp_earned(self):
return self.xp['latest'] - self.xp['start']
def xp_per_hour(self):
return self.xp_earned()/(time.time() - self.start_time)*3600
def distance_travelled(self):
return self.distance['latest'] - self.distance['start']
def num_encounters(self):
return self.encounters['latest'] - self.encounters['start']
def num_throws(self):
return self.throws['latest'] - self.throws['start']
def num_captures(self):
return self.captures['latest'] - self.captures['start']
def num_visits(self):
return self.visits['latest'] - self.visits['start']
def num_new_mons(self):
return self.unique_mons['latest'] - self.unique_mons['start']
def num_evolutions(self):
return self.evolutions['latest'] - self.evolutions['start']
def earned_dust(self):
return self.dust['latest'] - self.dust['start']
def captured_pokemon(self, name, cp, iv_display, potential):
if cp > self.highest_cp['cp']:
self.highest_cp = \
{'cp': cp, 'desc': '{} [CP: {}] [IV: {}] Potential: {} '
.format(name, cp, iv_display, potential)}
if potential > self.most_perfect['potential']:
self.most_perfect = \
{'potential': potential, 'desc': '{} [CP: {}] [IV: {}] Potential: {} '
.format(name, cp, iv_display, potential)}
return
def released_pokemon(self, count=1):
self.releases += count
def capture_stats(self):
request = self.bot.api.create_request()
request.get_inventory()
request.get_player()
response_dict = request.call()
try:
self.dust['latest'] = response_dict['responses']['GET_PLAYER']['player_data']['currencies'][1]['amount']
if self.dust['start'] is None: self.dust['start'] = self.dust['latest']
for item in response_dict['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']:
if 'inventory_item_data' in item:
if 'player_stats' in item['inventory_item_data']:
playerdata = item['inventory_item_data']['player_stats']
self.xp['latest'] = playerdata.get('experience', 0)
if self.xp['start'] is None: self.xp['start'] = self.xp['latest']
self.visits['latest'] = playerdata.get('poke_stop_visits', 0)
if self.visits['start'] is None: self.visits['start'] = self.visits['latest']
self.captures['latest'] = playerdata.get('pokemons_captured', 0)
if self.captures['start'] is None: self.captures['start'] = self.captures['latest']
self.distance['latest'] = playerdata.get('km_walked', 0)
if self.distance['start'] is None: self.distance['start'] = self.distance['latest']
self.encounters['latest'] = playerdata.get('pokemons_encountered', 0)
if self.encounters['start'] is None: self.encounters['start'] = self.encounters['latest']
self.throws['latest'] = playerdata.get('pokeballs_thrown', 0)
if self.throws['start'] is None: self.throws['start'] = self.throws['latest']
self.unique_mons['latest'] = playerdata.get('unique_pokedex_entries', 0)
if self.unique_mons['start'] is None: self.unique_mons['start'] = self.unique_mons['latest']
self.visits['latest'] = playerdata.get('poke_stop_visits', 0)
if self.visits['start'] is None: self.visits['start'] = self.visits['latest']
self.evolutions['latest'] = playerdata.get('evolutions', 0)
if self.evolutions['start'] is None: self.evolutions['start'] = self.evolutions['latest']
except KeyError:
# Nothing we can do if there's no player info.
return
| gpl-3.0 | -5,262,382,501,267,341,000 | 42.823009 | 116 | 0.555533 | false |
Liyier/learning_log | env/Lib/site-packages/django/conf/locale/pt_BR/formats.py | 504 | 1434 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit | 9,131,077,456,077,068,000 | 38.805556 | 90 | 0.558269 | false |
chongtianfeiyu/kbengine | kbe/res/scripts/common/Lib/test/test_getpass.py | 97 | 6437 | import getpass
import os
import unittest
from io import BytesIO, StringIO, TextIOWrapper
from unittest import mock
from test import support
try:
import termios
except ImportError:
termios = None
try:
import pwd
except ImportError:
pwd = None
@mock.patch('os.environ')
class GetpassGetuserTest(unittest.TestCase):
def test_username_takes_username_from_env(self, environ):
expected_name = 'some_name'
environ.get.return_value = expected_name
self.assertEqual(expected_name, getpass.getuser())
def test_username_priorities_of_env_values(self, environ):
environ.get.return_value = None
try:
getpass.getuser()
except ImportError: # in case there's no pwd module
pass
self.assertEqual(
environ.get.call_args_list,
[mock.call(x) for x in ('LOGNAME', 'USER', 'LNAME', 'USERNAME')])
def test_username_falls_back_to_pwd(self, environ):
expected_name = 'some_name'
environ.get.return_value = None
if pwd:
with mock.patch('os.getuid') as uid, \
mock.patch('pwd.getpwuid') as getpw:
uid.return_value = 42
getpw.return_value = [expected_name]
self.assertEqual(expected_name,
getpass.getuser())
getpw.assert_called_once_with(42)
else:
self.assertRaises(ImportError, getpass.getuser)
class GetpassRawinputTest(unittest.TestCase):
def test_flushes_stream_after_prompt(self):
# see issue 1703
stream = mock.Mock(spec=StringIO)
input = StringIO('input_string')
getpass._raw_input('some_prompt', stream, input=input)
stream.flush.assert_called_once_with()
def test_uses_stderr_as_default(self):
input = StringIO('input_string')
prompt = 'some_prompt'
with mock.patch('sys.stderr') as stderr:
getpass._raw_input(prompt, input=input)
stderr.write.assert_called_once_with(prompt)
@mock.patch('sys.stdin')
def test_uses_stdin_as_default_input(self, mock_input):
mock_input.readline.return_value = 'input_string'
getpass._raw_input(stream=StringIO())
mock_input.readline.assert_called_once_with()
@mock.patch('sys.stdin')
def test_uses_stdin_as_different_locale(self, mock_input):
stream = TextIOWrapper(BytesIO(), encoding="ascii")
mock_input.readline.return_value = "Hasło: "
getpass._raw_input(prompt="Hasło: ",stream=stream)
mock_input.readline.assert_called_once_with()
def test_raises_on_empty_input(self):
input = StringIO('')
self.assertRaises(EOFError, getpass._raw_input, input=input)
def test_trims_trailing_newline(self):
input = StringIO('test\n')
self.assertEqual('test', getpass._raw_input(input=input))
# Some of these tests are a bit white-box. The functional requirement is that
# the password input be taken directly from the tty, and that it not be echoed
# on the screen, unless we are falling back to stderr/stdin.
# Some of these might run on platforms without termios, but play it safe.
@unittest.skipUnless(termios, 'tests require system with termios')
class UnixGetpassTest(unittest.TestCase):
def test_uses_tty_directly(self):
with mock.patch('os.open') as open, \
mock.patch('io.FileIO') as fileio, \
mock.patch('io.TextIOWrapper') as textio:
# By setting open's return value to None the implementation will
# skip code we don't care about in this test. We can mock this out
# fully if an alternate implementation works differently.
open.return_value = None
getpass.unix_getpass()
open.assert_called_once_with('/dev/tty',
os.O_RDWR | os.O_NOCTTY)
fileio.assert_called_once_with(open.return_value, 'w+')
textio.assert_called_once_with(fileio.return_value)
def test_resets_termios(self):
with mock.patch('os.open') as open, \
mock.patch('io.FileIO'), \
mock.patch('io.TextIOWrapper'), \
mock.patch('termios.tcgetattr') as tcgetattr, \
mock.patch('termios.tcsetattr') as tcsetattr:
open.return_value = 3
fake_attrs = [255, 255, 255, 255, 255]
tcgetattr.return_value = list(fake_attrs)
getpass.unix_getpass()
tcsetattr.assert_called_with(3, mock.ANY, fake_attrs)
def test_falls_back_to_fallback_if_termios_raises(self):
with mock.patch('os.open') as open, \
mock.patch('io.FileIO') as fileio, \
mock.patch('io.TextIOWrapper') as textio, \
mock.patch('termios.tcgetattr'), \
mock.patch('termios.tcsetattr') as tcsetattr, \
mock.patch('getpass.fallback_getpass') as fallback:
open.return_value = 3
fileio.return_value = BytesIO()
tcsetattr.side_effect = termios.error
getpass.unix_getpass()
fallback.assert_called_once_with('Password: ',
textio.return_value)
def test_flushes_stream_after_input(self):
# issue 7208
with mock.patch('os.open') as open, \
mock.patch('io.FileIO'), \
mock.patch('io.TextIOWrapper'), \
mock.patch('termios.tcgetattr'), \
mock.patch('termios.tcsetattr'):
open.return_value = 3
mock_stream = mock.Mock(spec=StringIO)
getpass.unix_getpass(stream=mock_stream)
mock_stream.flush.assert_called_with()
def test_falls_back_to_stdin(self):
with mock.patch('os.open') as os_open, \
mock.patch('sys.stdin', spec=StringIO) as stdin:
os_open.side_effect = IOError
stdin.fileno.side_effect = AttributeError
with support.captured_stderr() as stderr:
with self.assertWarns(getpass.GetPassWarning):
getpass.unix_getpass()
stdin.readline.assert_called_once_with()
self.assertIn('Warning', stderr.getvalue())
self.assertIn('Password:', stderr.getvalue())
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 6,492,889,030,699,983,000 | 38.453988 | 79 | 0.602861 | false |
denik/vwoptimize | vwoptimizelib/third_party/networkx/algorithms/bipartite/basic.py | 32 | 6070 | # -*- coding: utf-8 -*-
"""
==========================
Bipartite Graph Algorithms
==========================
"""
# Copyright (C) 2013-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <[email protected]>',
'Aric Hagberg <[email protected]>'])
__all__ = [ 'is_bipartite',
'is_bipartite_node_set',
'color',
'sets',
'density',
'degrees']
def color(G):
"""Returns a two-coloring of the graph.
Raises an exception if the graph is not bipartite.
Parameters
----------
G : NetworkX graph
Returns
-------
color : dictionary
A dictionary keyed by node with a 1 or 0 as data for each node color.
Raises
------
NetworkXError if the graph is not two-colorable.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> c = bipartite.color(G)
>>> print(c)
{0: 1, 1: 0, 2: 1, 3: 0}
You can use this to set a node attribute indicating the biparite set:
>>> nx.set_node_attributes(G, 'bipartite', c)
>>> print(G.node[0]['bipartite'])
1
>>> print(G.node[1]['bipartite'])
0
"""
if G.is_directed():
import itertools
def neighbors(v):
return itertools.chain.from_iterable([G.predecessors_iter(v),
G.successors_iter(v)])
else:
neighbors=G.neighbors_iter
color = {}
for n in G: # handle disconnected graphs
if n in color or len(G[n])==0: # skip isolates
continue
queue = [n]
color[n] = 1 # nodes seen with color (1 or 0)
while queue:
v = queue.pop()
c = 1 - color[v] # opposite color of node v
for w in neighbors(v):
if w in color:
if color[w] == color[v]:
raise nx.NetworkXError("Graph is not bipartite.")
else:
color[w] = c
queue.append(w)
# color isolates with 0
color.update(dict.fromkeys(nx.isolates(G),0))
return color
def is_bipartite(G):
""" Returns True if graph G is bipartite, False if not.
Parameters
----------
G : NetworkX graph
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> print(bipartite.is_bipartite(G))
True
See Also
--------
color, is_bipartite_node_set
"""
try:
color(G)
return True
except nx.NetworkXError:
return False
def is_bipartite_node_set(G,nodes):
"""Returns True if nodes and G/nodes are a bipartition of G.
Parameters
----------
G : NetworkX graph
nodes: list or container
Check if nodes are a one of a bipartite set.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> X = set([1,3])
>>> bipartite.is_bipartite_node_set(G,X)
True
Notes
-----
For connected graphs the bipartite sets are unique. This function handles
disconnected graphs.
"""
S=set(nodes)
for CC in nx.connected_component_subgraphs(G):
X,Y=sets(CC)
if not ( (X.issubset(S) and Y.isdisjoint(S)) or
(Y.issubset(S) and X.isdisjoint(S)) ):
return False
return True
def sets(G):
"""Returns bipartite node sets of graph G.
Raises an exception if the graph is not bipartite.
Parameters
----------
G : NetworkX graph
Returns
-------
(X,Y) : two-tuple of sets
One set of nodes for each part of the bipartite graph.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> X, Y = bipartite.sets(G)
>>> list(X)
[0, 2]
>>> list(Y)
[1, 3]
See Also
--------
color
"""
c = color(G)
X = set(n for n in c if c[n]) # c[n] == 1
Y = set(n for n in c if not c[n]) # c[n] == 0
return (X, Y)
def density(B, nodes):
"""Return density of bipartite graph B.
Parameters
----------
G : NetworkX graph
nodes: list or container
Nodes in one set of the bipartite graph.
Returns
-------
d : float
The bipartite density
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.complete_bipartite_graph(3,2)
>>> X=set([0,1,2])
>>> bipartite.density(G,X)
1.0
>>> Y=set([3,4])
>>> bipartite.density(G,Y)
1.0
See Also
--------
color
"""
n=len(B)
m=nx.number_of_edges(B)
nb=len(nodes)
nt=n-nb
if m==0: # includes cases n==0 and n==1
d=0.0
else:
if B.is_directed():
d=m/(2.0*float(nb*nt))
else:
d= m/float(nb*nt)
return d
def degrees(B, nodes, weight=None):
"""Return the degrees of the two node sets in the bipartite graph B.
Parameters
----------
G : NetworkX graph
nodes: list or container
Nodes in one set of the bipartite graph.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
(degX,degY) : tuple of dictionaries
The degrees of the two bipartite sets as dictionaries keyed by node.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.complete_bipartite_graph(3,2)
>>> Y=set([3,4])
>>> degX,degY=bipartite.degrees(G,Y)
>>> degX
{0: 2, 1: 2, 2: 2}
See Also
--------
color, density
"""
bottom=set(nodes)
top=set(B)-bottom
return (B.degree(top,weight),B.degree(bottom,weight))
| mit | 3,557,615,093,211,105,300 | 22.897638 | 78 | 0.536573 | false |
Matty-Downing2169/opencamlib | scripts/voronoi/voronoi_8_scale-test.py | 7 | 6627 | import ocl
import camvtk
import time
import vtk
import datetime
import math
import random
import numpy as np
import gc
def drawVertex(myscreen, p, vertexColor, rad=1):
myscreen.addActor( camvtk.Sphere( center=(p.x,p.y,p.z), radius=rad, color=vertexColor ) )
def drawEdge(myscreen, e, edgeColor=camvtk.yellow):
p1 = e[0]
p2 = e[1]
myscreen.addActor( camvtk.Line( p1=( p1.x,p1.y,p1.z), p2=(p2.x,p2.y,p2.z), color=edgeColor ) )
def drawFarCircle(myscreen, r, circleColor):
myscreen.addActor( camvtk.Circle( center=(0,0,0), radius=r, color=circleColor ) )
def drawDiagram( myscreen, vd ):
drawFarCircle(myscreen, vd.getFarRadius(), camvtk.pink)
for v in vd.getGenerators():
drawVertex(myscreen, v, camvtk.green, 2)
for v in vd.getVoronoiVertices():
drawVertex(myscreen, v, camvtk.red, 1)
for v in vd.getFarVoronoiVertices():
drawVertex(myscreen, v, camvtk.pink, 10)
vde = vd.getVoronoiEdges()
print " got ",len(vde)," Voronoi edges"
for e in vde:
drawEdge(myscreen,e, camvtk.cyan)
class VD:
def __init__(self, myscreen, vd, scale=1):
self.myscreen = myscreen
self.gen_pts=[ocl.Point(0,0,0)]
self.generators = camvtk.PointCloud(pointlist=self.gen_pts)
self.verts=[]
self.far=[]
self.edges =[]
self.generatorColor = camvtk.green
self.vertexColor = camvtk.red
self.edgeColor = camvtk.cyan
self.vdtext = camvtk.Text()
self.vdtext.SetPos( (50, myscreen.height-50) )
self.Ngen = 0
self.vdtext_text = ""
self.scale=scale
self.setVDText(vd)
myscreen.addActor(self.vdtext)
def setVDText(self, vd):
self.Ngen = len( vd.getGenerators() )-3
self.vdtext_text = "VD with " + str(self.Ngen) + " generators. SCALE= " + str(vd.getFarRadius())
self.vdtext.SetText( self.vdtext_text )
self.vdtext.SetSize(32)
def setGenerators(self, vd):
if len(self.gen_pts)>0:
self.myscreen.removeActor( self.generators )
#self.generators=[]
self.gen_pts = []
for p in vd.getGenerators():
self.gen_pts.append(self.scale*p)
self.generators= camvtk.PointCloud(pointlist=self.gen_pts)
self.generators.SetPoints()
self.myscreen.addActor(self.generators)
self.setVDText(vd)
self.myscreen.render()
def setFar(self, vd):
for p in vd.getFarVoronoiVertices():
p=self.scale*p
self.myscreen.addActor( camvtk.Sphere( center=(p.x,p.y,p.z), radius=4, color=camvtk.pink ) )
self.myscreen.render()
def setVertices(self, vd):
for p in self.verts:
self.myscreen.removeActor(p)
self.verts = []
for p in vd.getVoronoiVertices():
p=self.scale*p
actor = camvtk.Sphere( center=(p.x,p.y,p.z), radius=0.000005, color=self.vertexColor )
self.verts.append(actor)
self.myscreen.addActor( actor )
self.myscreen.render()
def setEdgesPolydata(self, vd):
self.edges = []
self.edges = vd.getEdgesGenerators()
self.epts = vtk.vtkPoints()
nid = 0
lines=vtk.vtkCellArray()
for e in self.edges:
p1 = self.scale*e[0]
p2 = self.scale*e[1]
self.epts.InsertNextPoint( p1.x, p1.y, p1.z)
self.epts.InsertNextPoint( p2.x, p2.y, p2.z)
line = vtk.vtkLine()
line.GetPointIds().SetId(0,nid)
line.GetPointIds().SetId(1,nid+1)
nid = nid+2
lines.InsertNextCell(line)
linePolyData = vtk.vtkPolyData()
linePolyData.SetPoints(self.epts)
linePolyData.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(linePolyData)
self.edge_actor = vtk.vtkActor()
self.edge_actor.SetMapper(mapper)
self.edge_actor.GetProperty().SetColor( camvtk.cyan )
self.myscreen.addActor( self.edge_actor )
self.myscreen.render()
def setEdges(self, vd):
for e in self.edges:
myscreen.removeActor(e)
self.edges = []
for e in vd.getEdgesGenerators():
p1 = self.scale*e[0]
p2 = self.scale*e[1]
actor = camvtk.Line( p1=( p1.x,p1.y,p1.z), p2=(p2.x,p2.y,p2.z), color=self.edgeColor )
self.myscreen.addActor(actor)
self.edges.append(actor)
self.myscreen.render()
def setAll(self, vd):
self.setGenerators(vd)
#self.setFar(vd)
#self.setVertices(vd)
self.setEdges(vd)
def addVertexSlow(myscreen, vd, vod, p):
pass
def drawDiag(far, framenr):
myscreen = camvtk.VTKScreen()
myscreen.camera.SetFocalPoint(0, 0, 0)
camvtk.drawOCLtext(myscreen)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
scale=10000
#far = 0.00001
vd = ocl.VoronoiDiagram(far,1200)
camPos = 0.4* (far/0.00001)
myscreen.camera.SetPosition(camPos/10000, 0, camPos)
myscreen.camera.SetClippingRange(-2*camPos,2*camPos)
random.seed(42)
vod = VD(myscreen,vd,scale)
drawFarCircle(myscreen, scale*vd.getFarRadius(), camvtk.orange)
Nmax = 300
plist=[]
for n in range(Nmax):
x=-far/2+far*random.random()
y=-far/2+far*random.random()
plist.append( ocl.Point(x,y) )
n=1
#ren = [300]
for p in plist:
print "PYTHON: adding generator: ",n," at ",p
#if n in ren:
vd.addVertexSite( p )
n=n+1
vod.setAll(vd)
myscreen.render()
w2if.Modified()
lwr.SetFileName("frames/vd_v_"+ ('%05d' % framenr)+".png")
lwr.Write()
print "PYTHON All DONE."
myscreen.render()
#myscreen.iren.Start()
if __name__ == "__main__":
print ocl.revision()
maxf = 0.00001
minf = 0.00000001
lmaxf = math.log(maxf)
lminf = math.log(minf)
Nframes = 5
lrange = np.arange(lmaxf,lminf, -(lmaxf-lminf)/Nframes)
print lrange
fars = []
for l in lrange:
f = math.exp(l)
fars.append(f)
print fars
#exit()
#farvals = [0.1 , 0.01]
n=1
for f in fars:
print "****************"
print "PYTHON diagram with f= ",f
print "****************"
drawDiag(f,n)
n=n+1
gc.collect()
| gpl-3.0 | -4,450,914,438,304,293,400 | 29.399083 | 104 | 0.575223 | false |
paulhayes/fofix | src/Svg.py | 2 | 6963 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire X #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 myfingershurt #
# 2008 evilynux <[email protected]> #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from __future__ import with_statement
from OpenGL.GL import *
import numpy as np
import cmgl
import Log
from Texture import Texture
from PIL import Image
from constants import *
#stump: the last few stubs of DummyAmanith.py are inlined here since this
# is the only place in the whole program that uses it now that we've pruned
# the dead SVG code.
class SvgContext(object):
def __init__(self, geometry):
self.geometry = geometry
self.setGeometry(geometry)
self.setProjection(geometry)
glMatrixMode(GL_MODELVIEW)
def setGeometry(self, geometry = None):
glViewport(geometry[0], geometry[1], geometry[2], geometry[3])
glScalef(geometry[2] / 640.0, geometry[3] / 480.0, 1.0)
def setProjection(self, geometry = None):
geometry = geometry or self.geometry
with cmgl.MatrixMode(GL_PROJECTION):
glLoadIdentity()
glOrtho(geometry[0], geometry[0] + geometry[2], geometry[1], geometry[1] + geometry[3], -100, 100)
self.geometry = geometry
def clear(self, r = 0, g = 0, b = 0, a = 0):
glDepthMask(1)
glEnable(GL_COLOR_MATERIAL)
glClearColor(r, g, b, a)
glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
class ImgDrawing(object):
def __init__(self, context, ImgData):
self.ImgData = None
self.texture = None
self.context = context
self.cache = None
self.filename = ImgData
# Detect the type of data passed in
if type(ImgData) == file:
self.ImgData = ImgData.read()
elif type(ImgData) == str:
self.texture = Texture(ImgData)
elif isinstance(ImgData, Image.Image): #stump: let a PIL image be passed in
self.texture = Texture()
self.texture.loadImage(ImgData)
# Make sure we have a valid texture
if not self.texture:
if type(ImgData) == str:
e = "Unable to load texture for %s." % ImgData
else:
e = "Unable to load texture for SVG file."
Log.error(e)
raise RuntimeError(e)
self.pixelSize = self.texture.pixelSize #the size of the image in pixels (from texture)
self.position = [0.0,0.0] #position of the image in the viewport
self.scale = [1.0,1.0] #percentage scaling
self.angle = 0 #angle of rotation (degrees)
self.color = (1.0,1.0,1.0,1.0) #glColor rgba
self.rect = (0,1,0,1) #texture mapping coordinates
self.shift = -.5 #horizontal alignment
self.vshift = -.5 #vertical alignment
self.path = self.texture.name #path of the image file
self.createArrays()
def createArrays(self):
self.vtxArray = np.zeros((4,2), dtype=np.float32)
self.texArray = np.zeros((4,2), dtype=np.float32)
self.createVtx()
self.createTex()
def createVtx(self):
vA = self.vtxArray #short hand variable casting
#topLeft, topRight, bottomRight, bottomLeft
vA[0,0] = 0.0; vA[0,1] = 1.0
vA[1,0] = 1.0; vA[1,1] = 1.0
vA[2,0] = 1.0; vA[2,1] = 0.0
vA[3,0] = 0.0; vA[3,1] = 0.0
def createTex(self):
tA = self.texArray
rect = self.rect
#topLeft, topRight, bottomRight, bottomLeft
tA[0,0] = rect[0]; tA[0,1] = rect[3]
tA[1,0] = rect[1]; tA[1,1] = rect[3]
tA[2,0] = rect[1]; tA[2,1] = rect[2]
tA[3,0] = rect[0]; tA[3,1] = rect[2]
def width1(self):
width = self.pixelSize[0]
if width:
return width
else:
return 0
#myfingershurt:
def height1(self):
height = self.pixelSize[1]
if height:
return height
else:
return 0
def widthf(self, pixelw):
width = self.pixelSize[0]
if width:
wfactor = pixelw/width
return wfactor
else:
return 0
def setPosition(self, x, y):
self.position = [x,y]
def setScale(self, width, height):
self.scale = [width, height]
def setAngle(self, angle):
self.angle = angle
def setRect(self, rect):
if not rect == self.rect:
self.rect = rect
self.createTex()
def setAlignment(self, alignment):
if alignment == LEFT: #left
self.shift = 0
elif alignment == CENTER:#center
self.shift = -.5
elif alignment == RIGHT:#right
self.shift = -1.0
def setVAlignment(self, alignment):
if alignment == 0: #bottom
self.vshift = 0
elif alignment == 1:#center
self.vshift = -.5
elif alignment == 2:#top
self.vshift = -1.0
def setColor(self, color):
if len(color) == 3:
color = (color[0], color[1], color[2], 1.0)
self.color = color
def draw(self):
with cmgl.PushedSpecificMatrix(GL_TEXTURE):
with cmgl.PushedSpecificMatrix(GL_PROJECTION):
with cmgl.MatrixMode(GL_PROJECTION):
self.context.setProjection()
with cmgl.PushedMatrix():
glLoadIdentity()
glTranslate(self.position[0], self.position[1], 0.0)
glRotatef(-self.angle, 0, 0, 1)
glScalef(self.scale[0], self.scale[1], 1.0)
glScalef(self.pixelSize[0], self.pixelSize[1], 1)
glTranslatef(self.shift, self.vshift, 0)
glColor4f(*self.color)
glEnable(GL_TEXTURE_2D)
self.texture.bind()
cmgl.drawArrays(GL_QUADS, vertices=self.vtxArray, texcoords=self.texArray)
glDisable(GL_TEXTURE_2D)
| gpl-2.0 | -1,157,506,122,447,340,300 | 32.800971 | 104 | 0.561396 | false |
badlogicmanpreet/nupic | examples/opf/experiments/anomaly/spatial/2field_many_novelAtEnd/description.py | 30 | 15834 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb],
# 'setup' : [claModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 | 2,957,362,483,745,656,000 | 36.880383 | 92 | 0.623721 | false |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/tests/unit/vpc/test_routetable.py | 48 | 20511 | from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, RouteTable
class TestDescribeRouteTables(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeRouteTablesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>6f570b0b-9c18-4b07-bdec-73740dcf861a</requestId>
<routeTableSet>
<item>
<routeTableId>rtb-13ad487a</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-12ad487b</routeTableAssociationId>
<routeTableId>rtb-13ad487a</routeTableId>
<main>true</main>
</item>
</associationSet>
<tagSet/>
</item>
<item>
<routeTableId>rtb-f9ad4890</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
<item>
<destinationCidrBlock>0.0.0.0/0</destinationCidrBlock>
<gatewayId>igw-eaad4883</gatewayId>
<state>active</state>
<origin>CreateRoute</origin>
</item>
<item>
<destinationCidrBlock>10.0.0.0/21</destinationCidrBlock>
<networkInterfaceId>eni-884ec1d1</networkInterfaceId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
<item>
<destinationCidrBlock>11.0.0.0/22</destinationCidrBlock>
<vpcPeeringConnectionId>pcx-efc52b86</vpcPeeringConnectionId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-faad4893</routeTableAssociationId>
<routeTableId>rtb-f9ad4890</routeTableId>
<subnetId>subnet-15ad487c</subnetId>
</item>
</associationSet>
<tagSet/>
</item>
</routeTableSet>
</DescribeRouteTablesResponse>
"""
def test_get_all_route_tables(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_route_tables(
['rtb-13ad487a', 'rtb-f9ad4890'], filters=[('route.state', 'active')])
self.assert_request_parameters({
'Action': 'DescribeRouteTables',
'RouteTableId.1': 'rtb-13ad487a',
'RouteTableId.2': 'rtb-f9ad4890',
'Filter.1.Name': 'route.state',
'Filter.1.Value.1': 'active'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 2)
self.assertIsInstance(api_response[0], RouteTable)
self.assertEquals(api_response[0].id, 'rtb-13ad487a')
self.assertEquals(len(api_response[0].routes), 1)
self.assertEquals(api_response[0].routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response[0].routes[0].gateway_id, 'local')
self.assertEquals(api_response[0].routes[0].state, 'active')
self.assertEquals(api_response[0].routes[0].origin, 'CreateRouteTable')
self.assertEquals(len(api_response[0].associations), 1)
self.assertEquals(api_response[0].associations[0].id, 'rtbassoc-12ad487b')
self.assertEquals(api_response[0].associations[0].route_table_id, 'rtb-13ad487a')
self.assertIsNone(api_response[0].associations[0].subnet_id)
self.assertEquals(api_response[0].associations[0].main, True)
self.assertEquals(api_response[1].id, 'rtb-f9ad4890')
self.assertEquals(len(api_response[1].routes), 4)
self.assertEquals(api_response[1].routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response[1].routes[0].gateway_id, 'local')
self.assertEquals(api_response[1].routes[0].state, 'active')
self.assertEquals(api_response[1].routes[0].origin, 'CreateRouteTable')
self.assertEquals(api_response[1].routes[1].destination_cidr_block, '0.0.0.0/0')
self.assertEquals(api_response[1].routes[1].gateway_id, 'igw-eaad4883')
self.assertEquals(api_response[1].routes[1].state, 'active')
self.assertEquals(api_response[1].routes[1].origin, 'CreateRoute')
self.assertEquals(api_response[1].routes[2].destination_cidr_block, '10.0.0.0/21')
self.assertEquals(api_response[1].routes[2].interface_id, 'eni-884ec1d1')
self.assertEquals(api_response[1].routes[2].state, 'blackhole')
self.assertEquals(api_response[1].routes[2].origin, 'CreateRoute')
self.assertEquals(api_response[1].routes[3].destination_cidr_block, '11.0.0.0/22')
self.assertEquals(api_response[1].routes[3].vpc_peering_connection_id, 'pcx-efc52b86')
self.assertEquals(api_response[1].routes[3].state, 'blackhole')
self.assertEquals(api_response[1].routes[3].origin, 'CreateRoute')
self.assertEquals(len(api_response[1].associations), 1)
self.assertEquals(api_response[1].associations[0].id, 'rtbassoc-faad4893')
self.assertEquals(api_response[1].associations[0].route_table_id, 'rtb-f9ad4890')
self.assertEquals(api_response[1].associations[0].subnet_id, 'subnet-15ad487c')
self.assertEquals(api_response[1].associations[0].main, False)
class TestAssociateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<AssociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<associationId>rtbassoc-f8ad4891</associationId>
</AssociateRouteTableResponse>
"""
def test_associate_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.associate_route_table(
'rtb-e4ad488d', 'subnet-15ad487c')
self.assert_request_parameters({
'Action': 'AssociateRouteTable',
'RouteTableId': 'rtb-e4ad488d',
'SubnetId': 'subnet-15ad487c'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, 'rtbassoc-f8ad4891')
class TestDisassociateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DisassociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateRouteTableResponse>
"""
def test_disassociate_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.disassociate_route_table('rtbassoc-fdad4894')
self.assert_request_parameters({
'Action': 'DisassociateRouteTable',
'AssociationId': 'rtbassoc-fdad4894'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestCreateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<routeTable>
<routeTableId>rtb-f9ad4890</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
</item>
</routeSet>
<associationSet/>
<tagSet/>
</routeTable>
</CreateRouteTableResponse>
"""
def test_create_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route_table('vpc-11ad4878')
self.assert_request_parameters({
'Action': 'CreateRouteTable',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, RouteTable)
self.assertEquals(api_response.id, 'rtb-f9ad4890')
self.assertEquals(len(api_response.routes), 1)
self.assertEquals(api_response.routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response.routes[0].gateway_id, 'local')
self.assertEquals(api_response.routes[0].state, 'active')
class TestDeleteRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
def test_delete_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_route_table('rtb-e4ad488d')
self.assert_request_parameters({
'Action': 'DeleteRouteTable',
'RouteTableId': 'rtb-e4ad488d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestReplaceRouteTableAssociation(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<ReplaceRouteTableAssociationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<newAssociationId>rtbassoc-faad4893</newAssociationId>
</ReplaceRouteTableAssociationResponse>
"""
def test_replace_route_table_assocation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route_table_assocation(
'rtbassoc-faad4893', 'rtb-f9ad4890')
self.assert_request_parameters({
'Action': 'ReplaceRouteTableAssociation',
'AssociationId': 'rtbassoc-faad4893',
'RouteTableId': 'rtb-f9ad4890'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_table_association_with_assoc(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route_table_association_with_assoc(
'rtbassoc-faad4893', 'rtb-f9ad4890')
self.assert_request_parameters({
'Action': 'ReplaceRouteTableAssociation',
'AssociationId': 'rtbassoc-faad4893',
'RouteTableId': 'rtb-f9ad4890'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, 'rtbassoc-faad4893')
class TestCreateRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
def test_create_route_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '0.0.0.0/0',
'GatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_instance(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'InstanceId': 'i-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_interface(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'NetworkInterfaceId': 'eni-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_vpc_peering_connection(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'VpcPeeringConnectionId': 'pcx-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestReplaceRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
def test_replace_route_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '0.0.0.0/0',
'GatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_instance(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'InstanceId': 'i-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_interface(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'NetworkInterfaceId': 'eni-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_vpc_peering_connection(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'VpcPeeringConnectionId': 'pcx-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestDeleteRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
def test_delete_route(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_route('rtb-e4ad488d', '172.16.1.0/24')
self.assert_request_parameters({
'Action': 'DeleteRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '172.16.1.0/24'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,286,190,253,022,579,500 | 44.988789 | 99 | 0.570036 | false |
esc/pybuilder | src/unittest/python/plugins/python/sonarqube_plugin_tests.py | 1 | 3670 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from test_utils import Mock, patch
from os.path import normcase as nc
from pybuilder.core import Project
from pybuilder.errors import BuildFailedException
from pybuilder.plugins.python.sonarqube_plugin import (SonarCommandBuilder,
build_sonar_runner,
run_sonar_analysis)
class RunSonarAnalysisTest(TestCase):
def setUp(self):
self.project = Project("any-project")
self.project.version = "0.0.1"
self.project.set_property("sonarqube_project_key", "project_key")
self.project.set_property("sonarqube_project_name", "project_name")
self.project.set_property("dir_source_main_python", "src/main/python")
self.project.set_property("dir_target", "target")
self.project.set_property("dir_reports", "target/reports")
def test_should_build_sonar_runner_for_project(self):
self.assertEqual(
build_sonar_runner(self.project).as_string,
"sonar-runner -Dsonar.projectKey=project_key "
"-Dsonar.projectName=project_name "
"-Dsonar.projectVersion=0.0.1 "
"-Dsonar.sources=src/main/python "
"-Dsonar.python.coverage.reportPath=%s" % nc("target/reports/coverage*.xml"))
@patch("pybuilder.plugins.python.sonarqube_plugin.SonarCommandBuilder.run")
def test_should_break_build_when_sonar_runner_fails(self, run_sonar_command):
run_sonar_command.return_value = Mock(exit_code=1)
self.assertRaises(BuildFailedException, run_sonar_analysis, self.project, Mock())
@patch("pybuilder.plugins.python.sonarqube_plugin.SonarCommandBuilder.run")
def test_should_not_break_build_when_sonar_runner_succeeds(self, run_sonar_command):
run_sonar_command.return_value = Mock(exit_code=0)
run_sonar_analysis(self.project, Mock())
class SonarCommandBuilderTests(TestCase):
def setUp(self):
self.project = Project("any-project")
self.project.set_property("any-property-name", "any-property-value")
self.sonar_builder = SonarCommandBuilder("sonar", self.project)
def test_should_set_sonar_key_to_specific_value(self):
self.sonar_builder.set_sonar_key("anySonarKey").to("anyValue")
self.assertEqual(
self.sonar_builder.as_string,
"sonar -DanySonarKey=anyValue")
def test_should_set_sonar_key_to_two_specific_values(self):
self.sonar_builder.set_sonar_key("anySonarKey").to("anyValue").set_sonar_key("other").to("otherValue")
self.assertEqual(
self.sonar_builder.as_string,
"sonar -DanySonarKey=anyValue -Dother=otherValue")
def test_should_set_sonar_key_to_property_value(self):
self.sonar_builder.set_sonar_key("anySonarKey").to_property_value("any-property-name")
self.assertEqual(self.sonar_builder.as_string, "sonar -DanySonarKey=any-property-value")
| apache-2.0 | 4,561,225,584,113,375,700 | 40.704545 | 110 | 0.678474 | false |
hydroshare/hydroshare | hs_core/views/serializers.py | 1 | 15076 | import json
from collections import namedtuple
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group, User
from rest_framework import serializers
from hs_core.hydroshare import utils
from hs_core import hydroshare
from .utils import validate_json, validate_user, validate_group
from hs_access_control.models import PrivilegeCodes
from drf_yasg.utils import swagger_serializer_method
RESOURCE_TYPES = [rtype.__name__ for rtype in utils.get_resource_types()]
CONTENT_TYPES = [ctype.__name__ for ctype in utils.get_content_types()]
class StringListField(serializers.ListField):
child = serializers.CharField()
class ResourceUpdateRequestValidator(serializers.Serializer):
title = serializers.CharField(required=False)
metadata = serializers.CharField(validators=[validate_json], required=False)
extra_metadata = serializers.CharField(validators=[validate_json], required=False)
edit_users = serializers.CharField(required=False)
edit_groups = serializers.CharField(required=False)
view_users = serializers.CharField(required=False)
view_groups = serializers.CharField(required=False)
keywords = StringListField(required=False)
abstract = serializers.CharField(required=False)
def validate_edit_users(self, value):
return self._validate_users(value)
def validate_view_users(self, value):
return self._validate_users(value)
def validate_edit_groups(self, value):
return self._validate_groups(value)
def validate_view_groups(self, value):
return self._validate_groups(value)
def _validate_users(self, value):
values = value.split(',')
for value in values:
if not User.objects.filter(username=value).exists():
raise serializers.ValidationError("%s in not a valid user name." % value)
return values
def _validate_groups(self, value):
values = value.split(',')
for value in values:
if not Group.objects.filter(name=value).exists():
raise serializers.ValidationError("%s in not a valid group name." % value)
return values
class ResourceCreateRequestValidator(ResourceUpdateRequestValidator):
resource_type = serializers.ChoiceField(
choices=list(zip(
[x.__name__ for x in hydroshare.get_resource_types()],
[x.__name__ for x in hydroshare.get_resource_types()]
)), default='CompositeResource')
class ResourceTypesSerializer(serializers.Serializer):
resource_type = serializers.CharField(max_length=100, required=True,
validators=[lambda x: x in RESOURCE_TYPES],
help_text='list of resource types')
class ContentTypesSerializer(serializers.Serializer):
content_type = serializers.CharField(max_length=100, required=True,
validators=[lambda x: x in CONTENT_TYPES],
help_text='list of content types')
class TaskStatusSerializer(serializers.Serializer):
bag_status = serializers.CharField(help_text='Status of task, i.e. "Not Ready" ')
task_id = serializers.CharField(help_text="The task id to be used to check task status")
class ResourceListRequestValidator(serializers.Serializer):
creator = serializers.CharField(min_length=1, required=False,
help_text='The first author (name or email)')
author = serializers.CharField(required=False,
help_text='Comma separated list of authors (name or email)')
group = serializers.CharField(min_length=1, required=False, validators=[validate_group],
help_text='A group name (requires edit_permissions=True)')
user = serializers.CharField(min_length=1, required=False, validators=[validate_user],
help_text='Viewable by user (name or email)')
owner = serializers.CharField(min_length=1, required=False, validators=[validate_user],
help_text='Owned by user (name or email)')
from_date = serializers.DateField(required=False, default=None,
help_text='to get a list of resources created on or after '
'this date')
to_date = serializers.DateField(required=False, default=None,
help_text='to get a list of resources created on or before '
'this date')
subject = serializers.CharField(required=False,
help_text='Comma separated list of subjects')
full_text_search = serializers.CharField(required=False,
help_text='get a list of resources with this text')
edit_permission = serializers.BooleanField(required=False, default=False,
help_text='filter by edit permissions of '
'user/group/owner')
published = serializers.BooleanField(required=False, default=False,
help_text='filter by published resources')
type = serializers.MultipleChoiceField(choices=RESOURCE_TYPES, required=False, default=None,
help_text='to get a list of resources of the specified '
'resource types')
coverage_type = serializers.ChoiceField(choices=['box', 'point'], required=False,
help_text='to get a list of resources that fall within '
'the specified spatial coverage boundary')
north = serializers.CharField(required=False,
help_text='north coordinate of spatial coverage. This parameter '
'is required if *coverage_type* has been specified')
south = serializers.CharField(required=False,
help_text='south coordinate of spatial coverage. This parameter '
'is required if *coverage_type* has been specified '
'with a value of box')
east = serializers.CharField(required=False,
help_text='east coordinate of spatial coverage. This parameter '
'is required if *coverage_type* has been specified')
west = serializers.CharField(required=False,
help_text='west coordinate of spatial coverage. This parameter '
'is required if *coverage_type* has been specified with '
'a value of box')
include_obsolete = serializers.BooleanField(required=False, default=False,
help_text='Include repleaced resources')
class ResourceListItemSerializer(serializers.Serializer):
resource_type = serializers.CharField(max_length=100)
resource_title = serializers.CharField(max_length=200)
resource_id = serializers.CharField(max_length=100)
abstract = serializers.CharField()
authors = serializers.ListField()
creator = serializers.CharField(max_length=100)
doi = serializers.CharField(max_length=200)
date_created = serializers.DateTimeField(format='%m-%d-%Y')
date_last_updated = serializers.DateTimeField(format='%m-%d-%Y')
public = serializers.BooleanField()
discoverable = serializers.BooleanField()
shareable = serializers.BooleanField()
coverages = serializers.JSONField(required=False)
immutable = serializers.BooleanField()
published = serializers.BooleanField()
bag_url = serializers.URLField()
science_metadata_url = serializers.URLField()
resource_map_url = serializers.URLField()
resource_url = serializers.URLField()
def to_representation(self, instance):
# URLs in metadata should be fully qualified.
# ALWAYS qualify them with www.hydroshare.org, rather than the local server name.
site_url = hydroshare.utils.current_site_url()
bag_url = site_url + instance.bag_url
science_metadata_url = site_url + reverse('get_update_science_metadata', args=[instance.short_id])
resource_map_url = site_url + reverse('get_resource_map', args=[instance.short_id])
resource_url = site_url + instance.get_absolute_url()
coverages = [{"type": v['type'], "value": json.loads(v['_value'])}
for v in list(instance.metadata.coverages.values())]
authors = []
for c in instance.metadata.creators.all():
authors.append(c.name)
doi = None
if instance.raccess.published:
doi = "10.4211/hs.{}".format(instance.short_id)
description = ''
if instance.metadata.description:
description = instance.metadata.description.abstract
return {'resource_type': instance.resource_type,
'resource_id': instance.short_id,
'resource_title': instance.metadata.title.value,
'abstract': description,
'authors': authors,
'creator': instance.first_creator.name,
'doi': doi,
'public': instance.raccess.public,
'discoverable': instance.raccess.discoverable,
'shareable': instance.raccess.shareable,
'immutable': instance.raccess.immutable,
'published': instance.raccess.published,
'date_created': instance.created,
'date_last_updated': instance.last_updated,
'bag_url': bag_url,
'coverages': coverages,
'science_metadata_url': science_metadata_url,
'resource_map_url': resource_map_url,
'resource_url': resource_url,
'content_types': instance.aggregation_types}
class ResourceCreatedSerializer(serializers.Serializer):
resource_type = serializers.CharField(max_length=100)
resource_id = serializers.CharField(max_length=100)
message = serializers.CharField()
class ResourceFileSerializer(serializers.Serializer):
file_name = serializers.CharField(max_length=200, help_text='The filename, including the path')
url = serializers.URLField(help_text='The url to download the file')
size = serializers.IntegerField(help_text='The size of the file')
content_type = serializers.CharField(max_length=255, help_text='The content type of the file')
logical_file_type = serializers.CharField(max_length=255)
modified_time = serializers.DateTimeField(format="%Y-%m-%dT%H:%M:%S")
checksum = serializers.CharField(max_length=100)
class GroupPrivilegeSerializer(serializers.Serializer):
status = serializers.CharField(help_text='The status of the request')
name = serializers.CharField(help_text="The name of the shared group")
privileg_granted = serializers.ChoiceField(help_text="The privilege to grant",
choices=PrivilegeCodes.CHOICES,
default=PrivilegeCodes.NONE)
group_pic = serializers.CharField(help_text="Url of the group picture")
current_user_privilege = serializers.ChoiceField(help_text="Logged in user's permissions",
choices=PrivilegeCodes.CHOICES,
default=PrivilegeCodes.NONE)
error_msg = serializers.CharField(help_text="Description of error")
class UserPrivilegeSerializer(serializers.Serializer):
status = serializers.CharField(help_text='The status of the request')
username = serializers.CharField(help_text="The username name of the shared user")
name = serializers.CharField(help_text="The full name name of the shared user")
privileg_granted = serializers.ChoiceField(help_text="The privilege to grant",
choices=PrivilegeCodes.CHOICES,
default=PrivilegeCodes.NONE)
current_user_privilege = serializers.ChoiceField(help_text="Logged in user's permissions",
choices=PrivilegeCodes.CHOICES,
default=PrivilegeCodes.NONE)
profile_pic = serializers.CharField(help_text="Url of the user's profile picture")
is_current_user = serializers.BooleanField(help_text="Indicates whether the currently logged "
"in user made ther permission request")
error_msg = serializers.CharField(help_text="Description of error")
class ResourceType(object):
def __init__(self, resource_type):
self.resource_type = resource_type
class ContentType(object):
def __init__(self, content_type):
self.content_type = content_type
ResourceListItem = namedtuple('ResourceListItem',
['resource_type',
'resource_id',
'resource_title',
'abstract',
'authors',
'creator',
'doi',
'public',
'discoverable',
'shareable',
'immutable',
'published',
'date_created',
'date_last_updated',
'bag_url',
'coverages',
'science_metadata_url',
'resource_map_url',
'resource_url',
'content_types'])
ResourceFileItem = namedtuple('ResourceFileItem',
['url',
'file_name',
'size',
'content_type',
'logical_file_type',
'modified_time',
'checksum'])
class UserAuthenticateRequestValidator(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
class AccessRulesRequestValidator(serializers.Serializer):
public = serializers.BooleanField(default=False)
class ResourceFileValidator(serializers.Serializer):
file = serializers.FileField()
@swagger_serializer_method(serializer_or_field=file)
def get_file(self, obj):
return ResourceFileValidator().data
| bsd-3-clause | 6,463,104,856,968,565,000 | 49.760943 | 106 | 0.591603 | false |
jhawkesworth/ansible | lib/ansible/modules/network/netvisor/pn_role.py | 35 | 6334 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_role
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to create/delete/modify role
description:
- This module can be used to create, delete and modify user roles.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: false
type: str
state:
description:
- State the action to perform. Use C(present) to create role and
C(absent) to delete role and C(update) to modify role.
required: true
type: str
choices: ['present', 'absent', 'update']
pn_scope:
description:
- local or fabric.
required: false
type: str
choices: ['local', 'fabric']
pn_access:
description:
- type of access.
required: false
type: str
choices: ['read-only', 'read-write']
pn_shell:
description:
- allow shell command.
required: false
type: bool
pn_sudo:
description:
- allow sudo from shell.
required: false
type: bool
pn_running_config:
description:
- display running configuration of switch.
required: false
type: bool
pn_name:
description:
- role name.
required: true
type: str
pn_delete_from_users:
description:
- delete from users.
required: false
type: bool
"""
EXAMPLES = """
- name: Role create
pn_role:
pn_cliswitch: 'sw01'
state: 'present'
pn_name: 'foo'
pn_scope: 'local'
pn_access: 'read-only'
- name: Role delete
pn_role:
pn_cliswitch: 'sw01'
state: 'absent'
pn_name: 'foo'
- name: Role modify
pn_role:
pn_cliswitch: 'sw01'
state: 'update'
pn_name: 'foo'
pn_access: 'read-write'
pn_sudo: true
pn_shell: true
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the role command.
returned: always
type: list
stderr:
description: set of error responses from the role command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the role-show command.
If a role with given name exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
role_name = module.params['pn_name']
cli += ' role-show format name no-show-headers'
out = run_commands(module, cli)[1]
if out:
out = out.split()
return True if role_name in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
present='role-create',
absent='role-delete',
update='role-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_scope=dict(required=False, type='str',
choices=['local', 'fabric']),
pn_access=dict(required=False, type='str',
choices=['read-only', 'read-write']),
pn_shell=dict(required=False, type='bool'),
pn_sudo=dict(required=False, type='bool'),
pn_running_config=dict(required=False, type='bool'),
pn_name=dict(required=False, type='str'),
pn_delete_from_users=dict(required=False, type='bool'),
),
required_if=(
["state", "present", ["pn_name", "pn_scope"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]],
),
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
scope = module.params['pn_scope']
access = module.params['pn_access']
shell = module.params['pn_shell']
sudo = module.params['pn_sudo']
running_config = module.params['pn_running_config']
name = module.params['pn_name']
delete_from_users = module.params['pn_delete_from_users']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
ROLE_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if shell is (False or '') and sudo is True:
module.fail_json(
failed=True,
msg='sudo access requires shell access'
)
if command == 'role-modify':
if ROLE_EXISTS is False:
module.fail_json(
failed=True,
msg='Role with name %s does not exist' % name
)
if command == 'role-delete':
if ROLE_EXISTS is False:
module.exit_json(
skipped=True,
msg='Role with name %s does not exist' % name
)
if command == 'role-create':
if ROLE_EXISTS is True:
module.exit_json(
skipped=True,
msg='Role with name %s already exists' % name
)
if scope:
cli += ' scope ' + scope
if command != 'role-delete':
if access:
cli += ' access ' + access
cli += booleanArgs(shell, 'shell', 'no-shell')
cli += booleanArgs(sudo, 'sudo', 'no-sudo')
cli += booleanArgs(running_config, 'running-config', 'no-running-config')
if command == 'role-modify':
if delete_from_users:
cli += ' delete-from-users ' + delete_from_users
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 | -16,602,217,102,090,656 | 25.613445 | 92 | 0.593937 | false |
surfnzdotcom/cvsnt-fork | libxml/python/tests/compareNodes.py | 87 | 1507 | #!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
#
# Testing XML Node comparison and Node hash-value
#
doc = libxml2.parseDoc("""<root><foo/></root>""")
root = doc.getRootElement()
# Create two different objects which point to foo
foonode1 = root.children
foonode2 = root.children
# Now check that [in]equality tests work ok
if not ( foonode1 == foonode2 ):
print "Error comparing nodes with ==, nodes should be equal but are unequal"
sys.exit(1)
if not ( foonode1 != root ):
print "Error comparing nodes with ==, nodes should not be equal but are equal"
sys.exit(1)
if not ( foonode1 != root ):
print "Error comparing nodes with !=, nodes should not be equal but are equal"
if ( foonode1 != foonode2 ):
print "Error comparing nodes with !=, nodes should be equal but are unequal"
# Next check that the hash function for the objects also works ok
if not (hash(foonode1) == hash(foonode2)):
print "Error hash values for two equal nodes are different"
sys.exit(1)
if not (hash(foonode1) != hash(root)):
print "Error hash values for two unequal nodes are not different"
sys.exit(1)
if hash(foonode1) == hash(root):
print "Error hash values for two unequal nodes are equal"
sys.exit(1)
# Basic tests successful
doc.freeDoc()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-2.0 | -765,601,378,357,059,500 | 29.14 | 82 | 0.704048 | false |
SivilTaram/edx-platform | common/djangoapps/third_party_auth/models.py | 7 | 19981 | # -*- coding: utf-8 -*-
"""
Models used to implement SAML SSO support in third_party_auth
(inlcuding Shibboleth support)
"""
from config_models.models import ConfigurationModel, cache
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import json
import logging
from social.backends.base import BaseAuth
from social.backends.oauth import OAuthAuth
from social.backends.saml import SAMLAuth, SAMLIdentityProvider
from social.exceptions import SocialAuthBaseException
from social.utils import module_member
log = logging.getLogger(__name__)
# A dictionary of {name: class} entries for each python-social-auth backend available.
# Because this setting can specify arbitrary code to load and execute, it is set via
# normal Django settings only and cannot be changed at runtime:
def _load_backend_classes(base_class=BaseAuth):
""" Load the list of python-social-auth backend classes from Django settings """
for class_path in settings.AUTHENTICATION_BACKENDS:
auth_class = module_member(class_path)
if issubclass(auth_class, base_class):
yield auth_class
_PSA_BACKENDS = {backend_class.name: backend_class for backend_class in _load_backend_classes()}
_PSA_OAUTH2_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(OAuthAuth)]
_PSA_SAML_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(SAMLAuth)]
def clean_json(value, of_type):
""" Simple helper method to parse and clean JSON """
if not value.strip():
return json.dumps(of_type())
try:
value_python = json.loads(value)
except ValueError as err:
raise ValidationError("Invalid JSON: {}".format(err.message))
if not isinstance(value_python, of_type):
raise ValidationError("Expected a JSON {}".format(of_type))
return json.dumps(value_python, indent=4)
class AuthNotConfigured(SocialAuthBaseException):
""" Exception when SAMLProviderData or other required info is missing """
def __init__(self, provider_name):
super(AuthNotConfigured, self).__init__()
self.provider_name = provider_name
def __str__(self):
return _('Authentication with {} is currently unavailable.').format( # pylint: disable=no-member
self.provider_name
)
class ProviderConfig(ConfigurationModel):
"""
Abstract Base Class for configuring a third_party_auth provider
"""
icon_class = models.CharField(
max_length=50, default='fa-sign-in',
help_text=(
'The Font Awesome (or custom) icon class to use on the login button for this provider. '
'Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university'
),
)
name = models.CharField(max_length=50, blank=False, help_text="Name of this provider (shown to users)")
secondary = models.BooleanField(
default=False,
help_text=_(
'Secondary providers are displayed less prominently, '
'in a separate list of "Institution" login providers.'
),
)
skip_registration_form = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
prefix = None # used for provider_id. Set to a string value in subclass
backend_name = None # Set to a field or fixed value in subclass
# "enabled" field is inherited from ConfigurationModel
class Meta(object): # pylint: disable=missing-docstring
abstract = True
@property
def provider_id(self):
""" Unique string key identifying this provider. Must be URL and css class friendly. """
assert self.prefix is not None
return "-".join((self.prefix, ) + tuple(getattr(self, field) for field in self.KEY_FIELDS))
@property
def backend_class(self):
""" Get the python-social-auth backend class used for this provider """
return _PSA_BACKENDS[self.backend_name]
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
return self.backend_name == social_auth.provider
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
common.djangoapps.student.views.register_user uses this to populate the
new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
suggested_username = pipeline_kwargs.get('username')
return {
'email': details.get('email', ''),
'name': details.get('fullname', ''),
'username': suggested_username,
}
def get_authentication_backend(self):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '{}.{}'.format(self.backend_class.__module__, self.backend_class.__name__)
class OAuth2ProviderConfig(ProviderConfig):
"""
Configuration Entry for an OAuth2 based provider.
Also works for OAuth1 providers.
"""
prefix = 'oa2'
KEY_FIELDS = ('backend_name', ) # Backend name is unique
backend_name = models.CharField(
max_length=50, choices=[(name, name) for name in _PSA_OAUTH2_BACKENDS], blank=False, db_index=True,
help_text=(
"Which python-social-auth OAuth2 provider backend to use. "
"The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting."
# To be precise, it's set by AUTHENTICATION_BACKENDS - which aws.py sets from THIRD_PARTY_AUTH_BACKENDS
)
)
key = models.TextField(blank=True, verbose_name="Client ID")
secret = models.TextField(
blank=True,
verbose_name="Client Secret",
help_text=(
'For increased security, you can avoid storing this in your database by leaving '
' this field blank and setting '
'SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} '
'in your instance\'s Django settings (or lms.auth.json)'
)
)
other_settings = models.TextField(blank=True, help_text="Optional JSON object with advanced settings, if any.")
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "Provider Configuration (OAuth)"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(OAuth2ProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "KEY":
return self.key
if name == "SECRET":
if self.secret:
return self.secret
# To allow instances to avoid storing secrets in the DB, the secret can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_OAUTH_SECRETS', {}).get(self.backend_name, '')
if self.other_settings:
other_settings = json.loads(self.other_settings)
assert isinstance(other_settings, dict), "other_settings should be a JSON object (dictionary)"
return other_settings[name]
raise KeyError
class SAMLProviderConfig(ProviderConfig):
"""
Configuration Entry for a SAML/Shibboleth provider.
"""
prefix = 'saml'
KEY_FIELDS = ('idp_slug', )
backend_name = models.CharField(
max_length=50, default='tpa-saml', choices=[(name, name) for name in _PSA_SAML_BACKENDS], blank=False,
help_text="Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.")
idp_slug = models.SlugField(
max_length=30, db_index=True,
help_text=(
'A short string uniquely identifying this provider. '
'Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"'
))
entity_id = models.CharField(
max_length=255, verbose_name="Entity ID", help_text="Example: https://idp.testshib.org/idp/shibboleth")
metadata_source = models.CharField(
max_length=255,
help_text=(
"URL to this provider's XML metadata. Should be an HTTPS URL. "
"Example: https://www.testshib.org/metadata/testshib-providers.xml"
))
attr_user_permanent_id = models.CharField(
max_length=128, blank=True, verbose_name="User ID Attribute",
help_text="URN of the SAML attribute that we can use as a unique, persistent user ID. Leave blank for default.")
attr_full_name = models.CharField(
max_length=128, blank=True, verbose_name="Full Name Attribute",
help_text="URN of SAML attribute containing the user's full name. Leave blank for default.")
attr_first_name = models.CharField(
max_length=128, blank=True, verbose_name="First Name Attribute",
help_text="URN of SAML attribute containing the user's first name. Leave blank for default.")
attr_last_name = models.CharField(
max_length=128, blank=True, verbose_name="Last Name Attribute",
help_text="URN of SAML attribute containing the user's last name. Leave blank for default.")
attr_username = models.CharField(
max_length=128, blank=True, verbose_name="Username Hint Attribute",
help_text="URN of SAML attribute to use as a suggested username for this user. Leave blank for default.")
attr_email = models.CharField(
max_length=128, blank=True, verbose_name="Email Attribute",
help_text="URN of SAML attribute containing the user's email address[es]. Leave blank for default.")
other_settings = models.TextField(
verbose_name="Advanced settings", blank=True,
help_text=(
'For advanced use cases, enter a JSON object with addtional configuration. '
'The tpa-saml backend supports only {"requiredEntitlements": ["urn:..."]} '
'which can be used to require the presence of a specific eduPersonEntitlement.'
))
def clean(self):
""" Standardize and validate fields """
super(SAMLProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "Provider Configuration (SAML IdP)"
verbose_name_plural = "Provider Configuration (SAML IdPs)"
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {'idp': self.idp_slug}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend'] and self.idp_slug == pipeline['kwargs']['response']['idp_name']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.idp_slug + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_config(self):
"""
Return a SAMLIdentityProvider instance for use by SAMLAuthBackend.
Essentially this just returns the values of this object and its
associated 'SAMLProviderData' entry.
"""
if self.other_settings:
conf = json.loads(self.other_settings)
else:
conf = {}
attrs = (
'attr_user_permanent_id', 'attr_full_name', 'attr_first_name',
'attr_last_name', 'attr_username', 'attr_email', 'entity_id')
for field in attrs:
val = getattr(self, field)
if val:
conf[field] = val
# Now get the data fetched automatically from the metadata.xml:
data = SAMLProviderData.current(self.entity_id)
if not data or not data.is_valid():
log.error("No SAMLProviderData found for %s. Run 'manage.py saml pull' to fix or debug.", self.entity_id)
raise AuthNotConfigured(provider_name=self.name)
conf['x509cert'] = data.public_key
conf['url'] = data.sso_url
return SAMLIdentityProvider(self.idp_slug, **conf)
class SAMLConfiguration(ConfigurationModel):
"""
General configuration required for this edX instance to act as a SAML
Service Provider and allow users to authenticate via third party SAML
Identity Providers (IdPs)
"""
private_key = models.TextField(
help_text=(
'To generate a key pair as two files, run '
'"openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". '
'Paste the contents of saml.key here. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
public_key = models.TextField(
help_text=(
'Public key certificate. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name="Entity ID")
org_info_str = models.TextField(
verbose_name="Organization Info",
default='{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text="JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default='{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "SAML Configuration"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(SAMLConfiguration, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
self.private_key = (
self.private_key
.replace("-----BEGIN RSA PRIVATE KEY-----", "")
.replace("-----BEGIN PRIVATE KEY-----", "")
.replace("-----END RSA PRIVATE KEY-----", "")
.replace("-----END PRIVATE KEY-----", "")
.strip()
)
self.public_key = (
self.public_key
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.strip()
)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "ORG_INFO":
return json.loads(self.org_info_str)
if name == "SP_ENTITY_ID":
return self.entity_id
if name == "SP_PUBLIC_CERT":
if self.public_key:
return self.public_key
# To allow instances to avoid storing keys in the DB, the key pair can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
if name == "SP_PRIVATE_KEY":
if self.private_key:
return self.private_key
# To allow instances to avoid storing keys in the DB, the private key can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
other_config = json.loads(self.other_config_str)
if name in ("TECHNICAL_CONTACT", "SUPPORT_CONTACT"):
contact = {
"givenName": "{} Support".format(settings.PLATFORM_NAME),
"emailAddress": settings.TECH_SUPPORT_EMAIL
}
contact.update(other_config.get(name, {}))
return contact
return other_config[name] # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
class SAMLProviderData(models.Model):
"""
Data about a SAML IdP that is fetched automatically by 'manage.py saml pull'
This data is only required during the actual authentication process.
"""
cache_timeout = 600
fetched_at = models.DateTimeField(db_index=True, null=False)
expires_at = models.DateTimeField(db_index=True, null=True)
entity_id = models.CharField(max_length=255, db_index=True) # This is the key for lookups in this table
sso_url = models.URLField(verbose_name="SSO URL")
public_key = models.TextField()
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "SAML Provider Data"
verbose_name_plural = verbose_name
ordering = ('-fetched_at', )
def is_valid(self):
""" Is this data valid? """
if self.expires_at and timezone.now() > self.expires_at:
return False
return bool(self.entity_id and self.sso_url and self.public_key)
is_valid.boolean = True
@classmethod
def cache_key_name(cls, entity_id):
""" Return the name of the key to use to cache the current data """
return 'configuration/{}/current/{}'.format(cls.__name__, entity_id)
@classmethod
def current(cls, entity_id):
"""
Return the active data entry, if any, otherwise None
"""
cached = cache.get(cls.cache_key_name(entity_id))
if cached is not None:
return cached
try:
current = cls.objects.filter(entity_id=entity_id).order_by('-fetched_at')[0]
except IndexError:
current = None
cache.set(cls.cache_key_name(entity_id), current, cls.cache_timeout)
return current
| agpl-3.0 | -6,730,590,762,113,479,000 | 42.817982 | 120 | 0.639858 | false |
ericmjl/bokeh | sphinx/source/docs/user_guide/examples/interaction_callbacks_for_range_update.py | 1 | 1632 | import numpy as np
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, CustomJS, Rect
from bokeh.plotting import figure, output_file, show
output_file('range_update_callback.html')
N = 4000
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = [
"#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)
]
source = ColumnDataSource({'x': [], 'y': [], 'width': [], 'height': []})
jscode = """
const data = source.data
const start = cb_obj.start
const end = cb_obj.end
data[%r] = [start + (end - start) / 2]
data[%r] = [end - start]
source.change.emit()
"""
p1 = figure(title='Pan and Zoom Here', x_range=(0, 100), y_range=(0, 100),
tools='box_zoom,wheel_zoom,pan,reset', plot_width=400, plot_height=400)
p1.scatter(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)
xcb = CustomJS(args=dict(source=source), code=jscode % ('x', 'width'))
ycb = CustomJS(args=dict(source=source), code=jscode % ('y', 'height'))
p1.x_range.js_on_change('start', xcb)
p1.x_range.js_on_change('end', xcb)
p1.y_range.js_on_change('start', ycb)
p1.y_range.js_on_change('end', ycb)
p2 = figure(title='See Zoom Window Here', x_range=(0, 100), y_range=(0, 100),
tools='', plot_width=400, plot_height=400)
p2.scatter(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)
rect = Rect(x='x', y='y', width='width', height='height', fill_alpha=0.1,
line_color='black', fill_color='black')
p2.add_glyph(source, rect)
layout = row(p1, p2)
show(layout)
| bsd-3-clause | 269,026,434,572,336,500 | 31 | 83 | 0.639706 | false |
xujun10110/golismero | thirdparty_libs/django/utils/html.py | 78 | 11180 | """HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
import string
try:
from urllib.parse import quote, unquote, urlsplit, urlunsplit
except ImportError: # Python 2
from urllib import quote, unquote
from urlparse import urlsplit, urlunsplit
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import force_text, force_str
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.text import normalize_newlines
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
strip_tags_re = re.compile(r'<[^>]*?>', re.IGNORECASE)
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if isinstance(text, SafeData):
return text
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
six.iteritems(kwargs)])
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
return strip_tags_re.sub('', force_text(value))
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def fix_ampersands(value):
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
return unencoded_ampersands_re.sub('&', force_text(value))
fix_ampersands = allow_lazy(fix_ampersands, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
url = unquote(force_str(url))
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % middle)
elif not ':' in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert <b> and <i> to <strong> and <em>.
* Encode all ampersands correctly.
* Remove all "target" attributes from <a> tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and <br clear="all">.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "<p> </p>", but only if it's at the
bottom of the text.
"""
from django.utils.text import normalize_newlines
text = normalize_newlines(force_text(text))
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
text = fix_ampersands(text)
# Remove all target="" attributes from <a> tags.
text = link_target_attribute_re.sub('\\1', text)
# Trim stupid HTML such as <br clear="all">.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('</p>', '</li>')
for d in DOTS:
s = s.replace('<p>%s' % d, '<li>')
return '<ul>\n%s\n</ul>' % s
text = hard_coded_bullets_re.sub(replace_p_tags, text)
# Remove stuff like "<p> </p>", but only if it's at the bottom
# of the text.
text = trailing_empty_content_re.sub('', text)
return text
clean_html = allow_lazy(clean_html, six.text_type)
| gpl-2.0 | 7,069,716,072,698,103,000 | 39.952381 | 147 | 0.591592 | false |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/jinja2/testsuite/ext.py | 402 | 18086 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.ext
~~~~~~~~~~~~~~~~~~~~
Tests for the extensions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, DictLoader, contextfunction, nodes
from jinja2.exceptions import TemplateAssertionError
from jinja2.ext import Extension
from jinja2.lexer import Token, count_newlines
from jinja2._compat import next, BytesIO, itervalues, text_type
importable_object = 23
_gettext_re = re.compile(r'_\((.*?)\)(?s)')
i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'plural2.html': '{% trans user_count=get_user_count() %}{{ user_count }}s'
'{% pluralize %}{{ user_count }}p{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s")|format(num=user_count) }}'
}
newstyle_i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s", num=user_count) }}',
'ngettext.html': '{{ ngettext("%(num)s apple", "%(num)s apples", apples) }}',
'ngettext_long.html': '{% trans num=apples %}{{ num }} apple{% pluralize %}'
'{{ num }} apples{% endtrans %}',
'transvars1.html': '{% trans %}User: {{ num }}{% endtrans %}',
'transvars2.html': '{% trans num=count %}User: {{ num }}{% endtrans %}',
'transvars3.html': '{% trans count=num %}User: {{ count }}{% endtrans %}',
'novars.html': '{% trans %}%(hello)s{% endtrans %}',
'vars.html': '{% trans %}{{ foo }}%(foo)s{% endtrans %}',
'explicitvars.html': '{% trans foo="42" %}%(foo)s{% endtrans %}'
}
languages = {
'de': {
'missing': u'fehlend',
'watch out': u'pass auf',
'One user online': u'Ein Benutzer online',
'%(user_count)s users online': u'%(user_count)s Benutzer online',
'User: %(num)s': u'Benutzer: %(num)s',
'User: %(count)s': u'Benutzer: %(count)s',
'%(num)s apple': u'%(num)s Apfel',
'%(num)s apples': u'%(num)s Äpfel'
}
}
@contextfunction
def gettext(context, string):
language = context.get('LANGUAGE', 'en')
return languages.get(language, {}).get(string, string)
@contextfunction
def ngettext(context, s, p, n):
language = context.get('LANGUAGE', 'en')
if n != 1:
return languages.get(language, {}).get(p, p)
return languages.get(language, {}).get(s, s)
i18n_env = Environment(
loader=DictLoader(i18n_templates),
extensions=['jinja2.ext.i18n']
)
i18n_env.globals.update({
'_': gettext,
'gettext': gettext,
'ngettext': ngettext
})
newstyle_i18n_env = Environment(
loader=DictLoader(newstyle_i18n_templates),
extensions=['jinja2.ext.i18n']
)
newstyle_i18n_env.install_gettext_callables(gettext, ngettext, newstyle=True)
class TestExtension(Extension):
tags = set(['test'])
ext_attr = 42
def parse(self, parser):
return nodes.Output([self.call_method('_dump', [
nodes.EnvironmentAttribute('sandboxed'),
self.attr('ext_attr'),
nodes.ImportedName(__name__ + '.importable_object'),
nodes.ContextReference()
])]).set_lineno(next(parser.stream).lineno)
def _dump(self, sandboxed, ext_attr, imported_object, context):
return '%s|%s|%s|%s' % (
sandboxed,
ext_attr,
imported_object,
context.blocks
)
class PreprocessorExtension(Extension):
def preprocess(self, source, name, filename=None):
return source.replace('[[TEST]]', '({{ foo }})')
class StreamFilterExtension(Extension):
def filter_stream(self, stream):
for token in stream:
if token.type == 'data':
for t in self.interpolate(token):
yield t
else:
yield token
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while 1:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos:match.start()]
if value:
yield Token(lineno, 'data', value)
lineno += count_newlines(token.value)
yield Token(lineno, 'variable_begin', None)
yield Token(lineno, 'name', 'gettext')
yield Token(lineno, 'lparen', None)
yield Token(lineno, 'string', match.group(1))
yield Token(lineno, 'rparen', None)
yield Token(lineno, 'variable_end', None)
pos = match.end()
if pos < end:
yield Token(lineno, 'data', token.value[pos:])
class ExtensionsTestCase(JinjaTestCase):
def test_extend_late(self):
env = Environment()
env.add_extension('jinja2.ext.autoescape')
t = env.from_string('{% autoescape true %}{{ "<test>" }}{% endautoescape %}')
assert t.render() == '<test>'
def test_loop_controls(self):
env = Environment(extensions=['jinja2.ext.loopcontrols'])
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item % 2 == 0 %}{% continue %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '13'
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item > 2 %}{% break %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '12'
def test_do(self):
env = Environment(extensions=['jinja2.ext.do'])
tmpl = env.from_string('''
{%- set items = [] %}
{%- for char in "foo" %}
{%- do items.append(loop.index0 ~ char) %}
{%- endfor %}{{ items|join(', ') }}''')
assert tmpl.render() == '0f, 1o, 2o'
def test_with(self):
env = Environment(extensions=['jinja2.ext.with_'])
tmpl = env.from_string('''\
{% with a=42, b=23 -%}
{{ a }} = {{ b }}
{% endwith -%}
{{ a }} = {{ b }}\
''')
assert [x.strip() for x in tmpl.render(a=1, b=2).splitlines()] \
== ['42 = 23', '1 = 2']
def test_extension_nodes(self):
env = Environment(extensions=[TestExtension])
tmpl = env.from_string('{% test %}')
assert tmpl.render() == 'False|42|23|{}'
def test_identifier(self):
assert TestExtension.identifier == __name__ + '.TestExtension'
def test_rebinding(self):
original = Environment(extensions=[TestExtension])
overlay = original.overlay()
for env in original, overlay:
for ext in itervalues(env.extensions):
assert ext.environment is env
def test_preprocessor_extension(self):
env = Environment(extensions=[PreprocessorExtension])
tmpl = env.from_string('{[[TEST]]}')
assert tmpl.render(foo=42) == '{(42)}'
def test_streamfilter_extension(self):
env = Environment(extensions=[StreamFilterExtension])
env.globals['gettext'] = lambda x: x.upper()
tmpl = env.from_string('Foo _(bar) Baz')
out = tmpl.render()
assert out == 'Foo BAR Baz'
def test_extension_ordering(self):
class T1(Extension):
priority = 1
class T2(Extension):
priority = 2
env = Environment(extensions=[T1, T2])
ext = list(env.iter_extensions())
assert ext[0].__class__ is T1
assert ext[1].__class__ is T2
class InternationalizationTestCase(JinjaTestCase):
def test_trans(self):
tmpl = i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) == 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_trans_plural_with_functions(self):
tmpl = i18n_env.get_template('plural2.html')
def get_user_count():
get_user_count.called += 1
return 1
get_user_count.called = 0
assert tmpl.render(LANGUAGE='de', get_user_count=get_user_count) == '1s'
assert get_user_count.called == 1
def test_complex_plural(self):
tmpl = i18n_env.from_string('{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
self.assert_raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('ascii')) # make python 3 happy
assert list(babel_extract(source, ('gettext', 'ngettext', '_'), [], {})) == [
(2, 'gettext', u'Hello World', []),
(3, 'gettext', u'Hello World', []),
(4, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_comment_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{# trans first #}
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}{# trans second #}
{#: third #}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('utf-8')) # make python 3 happy
assert list(babel_extract(source, ('gettext', 'ngettext', '_'), ['trans', ':'], {})) == [
(3, 'gettext', u'Hello World', ['first']),
(4, 'gettext', u'Hello World', ['second']),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None), ['third'])
]
class NewstyleInternationalizationTestCase(JinjaTestCase):
def test_trans(self):
tmpl = newstyle_i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = newstyle_i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) == 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_complex_plural(self):
tmpl = newstyle_i18n_env.from_string('{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
self.assert_raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = newstyle_i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_newstyle_plural(self):
tmpl = newstyle_i18n_env.get_template('ngettext.html')
assert tmpl.render(LANGUAGE='de', apples=1) == '1 Apfel'
assert tmpl.render(LANGUAGE='de', apples=5) == u'5 Äpfel'
def test_autoescape_support(self):
env = Environment(extensions=['jinja2.ext.autoescape',
'jinja2.ext.i18n'])
env.install_gettext_callables(lambda x: u'<strong>Wert: %(name)s</strong>',
lambda s, p, n: s, newstyle=True)
t = env.from_string('{% autoescape ae %}{{ gettext("foo", name='
'"<test>") }}{% endautoescape %}')
assert t.render(ae=True) == '<strong>Wert: <test></strong>'
assert t.render(ae=False) == '<strong>Wert: <test></strong>'
def test_num_used_twice(self):
tmpl = newstyle_i18n_env.get_template('ngettext_long.html')
assert tmpl.render(apples=5, LANGUAGE='de') == u'5 Äpfel'
def test_num_called_num(self):
source = newstyle_i18n_env.compile('''
{% trans num=3 %}{{ num }} apple{% pluralize
%}{{ num }} apples{% endtrans %}
''', raw=True)
# quite hacky, but the only way to properly test that. The idea is
# that the generated code does not pass num twice (although that
# would work) for better performance. This only works on the
# newstyle gettext of course
assert re.search(r"l_ngettext, u?'\%\(num\)s apple', u?'\%\(num\)s "
r"apples', 3", source) is not None
def test_trans_vars(self):
t1 = newstyle_i18n_env.get_template('transvars1.html')
t2 = newstyle_i18n_env.get_template('transvars2.html')
t3 = newstyle_i18n_env.get_template('transvars3.html')
assert t1.render(num=1, LANGUAGE='de') == 'Benutzer: 1'
assert t2.render(count=23, LANGUAGE='de') == 'Benutzer: 23'
assert t3.render(num=42, LANGUAGE='de') == 'Benutzer: 42'
def test_novars_vars_escaping(self):
t = newstyle_i18n_env.get_template('novars.html')
assert t.render() == '%(hello)s'
t = newstyle_i18n_env.get_template('vars.html')
assert t.render(foo='42') == '42%(foo)s'
t = newstyle_i18n_env.get_template('explicitvars.html')
assert t.render() == '%(foo)s'
class AutoEscapeTestCase(JinjaTestCase):
def test_scoped_setting(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape false %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=False)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape true %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
def test_nonvolatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{{ {"foo": "<test>"}|xmlattr|escape }}')
assert tmpl.render() == ' foo="<test>"'
tmpl = env.from_string('{% autoescape false %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render() == ' foo="&lt;test&gt;"'
def test_volatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{% autoescape foo %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render(foo=False) == ' foo="&lt;test&gt;"'
assert tmpl.render(foo=True) == ' foo="<test>"'
def test_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmpl = env.from_string('{% autoescape true %}{% set x = "<x>" %}{{ x }}'
'{% endautoescape %}{{ x }}{{ "<y>" }}')
assert tmpl.render(x=1) == '<x>1<y>'
def test_volatile_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmplsource = '''
{% autoescape val %}
{% macro foo(x) %}
[{{ x }}]
{% endmacro %}
{{ foo().__class__.__name__ }}
{% endautoescape %}
{{ '<testing>' }}
'''
tmpl = env.from_string(tmplsource)
assert tmpl.render(val=True).split()[0] == 'Markup'
assert tmpl.render(val=False).split()[0] == text_type.__name__
# looking at the source we should see <testing> there in raw
# (and then escaped as well)
env = Environment(extensions=['jinja2.ext.autoescape'])
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtensionsTestCase))
suite.addTest(unittest.makeSuite(InternationalizationTestCase))
suite.addTest(unittest.makeSuite(NewstyleInternationalizationTestCase))
suite.addTest(unittest.makeSuite(AutoEscapeTestCase))
return suite
| apache-2.0 | -6,856,594,468,536,722,000 | 38.396514 | 97 | 0.544655 | false |
matthewelse/british-informatics-olympiad | 2011/q3-upsidedown.py | 1 | 1050 | # A solution to the British Informatics Olympiad 2011 Question 3
# Scores 24/24
from __future__ import print_function
try:
input = raw_input
except:
pass
def number_with_n_digits(n):
return 9**(n//2)
def nth_with_n_digits(number_of_digits, n):
if number_of_digits == 0:
return ""
if number_of_digits % 2 == 1:
even = nth_with_n_digits(number_of_digits - 1, n)
middle = len(even)//2
return even[:middle] + "5" + even[middle:]
# 9**(n / 2)
output = ""
# it needs to start at 0
n -= 1
for i in range(number_of_digits // 2):
n, inner = divmod(n, 9)
inner += 1
#n += 1
output = str(inner) + output + str(10-inner)
return output
nth = int(input())
#nth = 11
sum_so_far = 0
number_of_digits = 0
while sum_so_far < nth:
number_of_digits += 1
sum_so_far += number_with_n_digits(number_of_digits)
#print("The number has %i digits!" % number_of_digits)
#print(nth - sum_so_far + number_with_n_digits(number_of_digits))
print(nth_with_n_digits(number_of_digits, nth - sum_so_far + number_with_n_digits(number_of_digits)))
| mit | -6,917,340,826,105,736,000 | 22.333333 | 101 | 0.651429 | false |
myjang0507/slte | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 | -593,690,749,052,352,500 | 38.116402 | 117 | 0.565805 | false |
remik/django-page-cms | pages/testproj/test_settings.py | 1 | 5420 | # -*- coding: utf-8 -*-
# Django test settings for cms project.
import os
PROJECT_DIR = os.path.dirname(__file__)
TEST_PROJ = 'pages.testproj'
DEBUG = True
USE_TZ = True
ADMINS = (
# ('Your Name', '[email protected]'),
)
CACHE_BACKEND = 'locmem:///'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db'
}
}
# We still want to be ale to test with 1.1.X
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'test.db'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be avilable on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
MEDIA_ROOT = STATIC_ROOT = os.path.join(PROJECT_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_DIR, 'media', 'static')
STATIC_URL = MEDIA_URL + 'static/'
# Absolute path to the directory that holds pages media.
# PAGES_MEDIA_ROOT = os.path.join(STATIC_ROOT, 'pages', 'media', 'pages')
# Absolute path to the directory that holds media.
ADMIN_MEDIA_ROOT = os.path.join(STATIC_ROOT, 'admin_media')
ADMIN_MEDIA_PREFIX = '/admin_media/'
FIXTURE_DIRS = [os.path.join(PROJECT_DIR, 'fixtures')]
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*xq7m@)*f2awoj!spa0(jibsrz9%c0d=e(g)v*!17y(vx0ue_3'
_TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
"pages.context_processors.media",
)
INTERNAL_IPS = ('127.0.0.1',)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
)
ROOT_URLCONF = TEST_PROJ + '.urls'
_TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
CACHE_BACKEND = "locmem:///?timeout=300&max_entries=6000"
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'rest_framework',
'mptt',
'pages',
'taggit',
TEST_PROJ + '.documents',
'django.contrib.staticfiles',
# these 2 package don't create any dependecies
# haystack change coverage score report by importing modules
#'haystack',
)
# Default language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
# languages you want to translate into the CMS.
PAGE_LANGUAGES = (
('de', gettext_noop('German')),
('fr-ch', gettext_noop('Swiss french')),
('en-us', gettext_noop('US English')),
)
# You should add here all language you want to accept as valid client
# language. By default we copy the PAGE_LANGUAGES constant and add some other
# similar languages.
languages = list(PAGE_LANGUAGES)
languages.append(('fr-fr', gettext_noop('French')))
languages.append(('fr-be', gettext_noop('Belgium french')))
languages.append(('it-it', gettext_noop('Italian')))
LANGUAGES = languages
# This enable you to map a language(s) to another one, these languages should
# be in the LANGUAGES config
def language_mapping(lang):
if lang.startswith('fr'):
# serve swiss french for everyone
return 'fr-ch'
return lang
PAGE_LANGUAGE_MAPPING = language_mapping
PAGE_DEFAULT_TEMPLATE = 'pages/examples/index.html'
PAGE_API_ENABLED = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': _TEMPLATE_DIRS,
'OPTIONS': {
'debug': DEBUG,
'context_processors': _TEMPLATE_CONTEXT_PROCESSORS,
},
},
]
PAGE_TEMPLATES = (
('pages/examples/nice.html', 'nice one'),
('pages/examples/cool.html', 'cool one'),
('pages/examples/editor.html', 'raw editor'),
('pages/tests/untranslated.html', 'untranslated'),
)
PAGE_SANITIZE_USER_INPUT = True
PAGE_USE_SITE_ID = True
PAGE_TAGGING = True
HAYSTACK_SITECONF = 'example.search_sites'
HAYSTACK_SEARCH_ENGINE = 'dummy'
#HAYSTACK_WHOOSH_PATH = os.path.join(PROJECT_DIR, 'whoosh_index')
COVERAGE_EXCLUDE_MODULES = (
"pages.migrations.*",
"pages.tests.*",
"pages.urls",
"pages.__init__",
"pages.search_indexes",
"pages.management.commands.*",
)
COVERAGE_HTML_REPORT = True
COVERAGE_BRANCH_COVERAGE = False
PAGE_ENABLE_TESTS = True
try:
from local_settings import *
except ImportError:
pass
| bsd-3-clause | -677,724,726,457,283,800 | 27.082902 | 88 | 0.690406 | false |
HaebinShin/tensorflow | tensorflow/python/kernel_tests/clip_ops_test.py | 6 | 9754 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.clip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class ClipTest(tf.test.TestCase):
# ClipByValue test
def testClipByValue(self):
with self.test_session():
x = tf.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
np_ans = [[-4.4, 2.0, 3.0],
[4.0, 4.4, 4.4]]
clip_value = 4.4
ans = tf.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByValueNonFinite(self):
with self.test_session():
x = tf.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = tf.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
# ClipByNorm tests
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-2.4, 0.0, 0.0],
[3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim0(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
np_ans = [[-2.4, 0.0, 0.0],
[3.2, 0.0, 3.0]]
clip_norm = 4.0
ans = tf.clip_by_norm(x, clip_norm, [0])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim1(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0],
[3.2, 0.0, 2.4]]
clip_norm = 4.0
ans = tf.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormNotClippedWithAxes(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0],
[4.0, 0.0, 3.0]]
clip_norm = 6.0
ans = tf.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
# ClipByGlobalNorm tests
def testClipByGlobalNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormSupportsNone(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm((x0, None, x1, None), clip_norm)
self.assertTrue(ans[1] is None)
self.assertTrue(ans[3] is None)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[2].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormWithIndexedSlicesClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.IndexedSlices(tf.constant([1.0, -2.0]),
tf.constant([3, 4]))
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].values.eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormPreservesDenseShape(self):
dense_shape = (1,)
slices = tf.IndexedSlices(
tf.constant([1.0]),
tf.constant([0]),
dense_shape=dense_shape)
ans, _ = tf.clip_by_global_norm([slices], 1.0)
modified_slices = ans[0]
self.assertEqual(dense_shape, slices.dense_shape)
self.assertEqual(dense_shape, modified_slices.dense_shape)
def testClipByGlobalNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
np_ans_0 = [[-2.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
np_ans_1 = [1.0, -2.0]
clip_norm = 6.0
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
x0 = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([0.0, 0.0])
# Norm = 0, no changes
np_ans_0 = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
np_ans_1 = [0.0, 0.0]
clip_norm = 6.0
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 0.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByAverageNormClipped(self):
# Norm clipping when average clip_norm < 0.83333333
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0],
[3.84, 0.0, 0.0]]
clip_norm = 0.8
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormNotClipped(self):
# No norm clipping when average clip_norm >= 0.83333333
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-3.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
clip_norm = 0.9
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormZero(self):
# No norm clipping when average clip_norm = 0
with self.test_session():
x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Average norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
clip_norm = 0.9
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 2,580,761,221,660,276,700 | 32.750865 | 80 | 0.550441 | false |
glensc/owncloud-client | admin/win/nsi/l10n/bin/build_locale_nsi.py | 12 | 6085 | ##############################################################################
#
# PROJECT: ownCloud v1.0
# LICENSE: See LICENSE in the top level directory
#
##############################################################################
import collections
import os
import polib
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--output", dest="output",
help="Directory for localized output", default="../Shared/installer/nightly_localized.nsi")
parser.add_option("-p", "--podir", dest="podir",
help="Directory containing PO files", default="../Shared/installer/locale/")
parser.add_option("-l", "--lang", dest="lang",
help="Default language of the NSI", default="English" )
(options, args) = parser.parse_args()
# Define a dict to convert locale names to language names
localeToName = {
"af" : "Afrikaans",
"sq" : "Albanian",
"ar" : "Arabic",
"hy" : "Armenian",
"eu" : "Basque",
"be" : "Belarusian",
"bs" : "Bosnian",
"br" : "Breton",
"bg" : "Bulgarian",
"ca" : "Catalan",
"bem" : "Cibemba",
"hr" : "Croatian",
"cs" : "Czech",
"da" : "Danish",
"nl" : "Dutch",
"efi" : "Efik",
"en" : "English",
"eo" : "Esperanto",
"et" : "Estonian",
"fa" : "Farsi",
"fi" : "Finnish",
"fr" : "French",
"gl" : "Galician",
"ka" : "Georgian",
"de" : "German",
"el" : "Greek",
"he" : "Hebrew",
"hi" : "Hindi",
"hu" : "Hungarian",
"is" : "Icelandic",
"ig" : "Igbo",
"id" : "Indonesian",
"ga" : "Irish",
"it" : "Italian",
"ja" : "Japanese",
"km" : "Khmer",
"ko" : "Korean",
"ku" : "Kurdish",
"lv" : "Latvian",
"lt" : "Lithuanian",
"lb" : "Luxembourgish",
"mk" : "Macedonian",
"mg" : "Malagasy",
"ms" : "Malay",
"mn" : "Mongolian",
"nb" : "Norwegian",
"nn" : "NorwegianNynorsk",
"ps" : "Pashto",
"pl" : "Polish",
"pt" : "Portuguese",
"pt_BR" : "PortugueseBR",
"ro" : "Romanian",
"ru" : "Russian",
"sr" : "Serbian",
"sr_sp" : "SerbianLatin",
"st" : "Sesotho",
"sn" : "Shona",
"zh_CN" : "SimpChinese",
"sk" : "Slovak",
"sl" : "Slovenian",
"es" : "Spanish",
"es_AR" : "SpanishInternational",
"sw" : "Swahili",
"sv" : "Swedish",
"ta" : "Tamil",
"th" : "Thai",
"zh_HK" : "TradChinese",
"tr" : "Turkish",
"tw" : "Twi",
"uk" : "Ukrainian",
"ug" : "Uyghur",
"uz" : "Uzbek",
"ca@valencia" : "Valencian",
"vi" : "Vietnamese",
"cy" : "Welsh",
"yo" : "Yoruba",
"zu" : "Zulu",
}
def escapeNSIS(st):
return st.replace('\\', r'$\\')\
.replace('\t', r'$\t')\
.replace('\r', r'\r')\
.replace('\n', r'\n')\
.replace('\"', r'$\"')\
.replace('$$\\', '$\\')
translationCache = {}
# The purpose of this loop is to go to the podir scanning for PO files for each locale name
# Once we've found a PO file, we use PO lib to read every translated entry
# Using this, for each each language, we store a dict of entries - { nsilabel (comment) : translation (msgstr) }
# For untranslated entries, we use msgid instead of msgstr (i.e. default English string)
for root,dirs,files in os.walk(options.podir):
for file in files:
filename,ext = os.path.splitext(file)
if ext == ".po":
# Valid locale filename (fr.po, de.po etc)?
if filename in localeToName:
language = localeToName[filename]
translationCache[language] = collections.OrderedDict()
po = polib.pofile(os.path.join(root,file))
for entry in po.translated_entries():
# Loop through all our labels and add translation (each translation may have multiple labels)
for label in entry.comment.split():
translationCache[language][label] = escapeNSIS(entry.msgstr)
# For untranslated strings, let's add the English entry
for entry in po.untranslated_entries():
for label in entry.comment.split():
print("Warning: Label '%s' for language '%s' remains untranslated"%(label,language))
translationCache[language][label] = escapeNSIS(entry.msgid)
def tostr(obj):
if type(obj) == unicode:
return obj.encode("utf-8")
else:
return obj
NSILanguages = []
NSIDeclarations = []
# file header
NSILanguages.append( tostr('; Auto-generated - do not modify\n') )
NSIDeclarations.append( tostr('; Auto-generated - do not modify\n') )
# loopthrough the languages an generate one nsh files for each language
lineNo = 1
for language,translations in translationCache.iteritems():
NSINewLines = []
NSINewLines.append( tostr('# Auto-generated - do not modify\n') )
count = 0
# if the language isn't the default, we add our MUI_LANGUAGE macro
if language.upper() != options.lang.upper():
NSILanguages.append( tostr('!insertmacro MUI_LANGUAGE "%s"\n'%language) )
# For every translation we grabbed from the .po, let's add our StrCpy command
for label,value in translations.iteritems():
NSINewLines.append( tostr('StrCpy $%s "%s"\n' % (label,value)) )
if language.upper() == options.lang.upper():
NSIDeclarations.append( tostr('Var %s\n' % label) )
count += 1
NSIWorkingFile = open('%s/%s.nsh' % (options.output, language),"w")
NSIWorkingFile.writelines(NSINewLines)
NSIWorkingFile.close()
print ( "%i translations merged for language '%s'"%(count,language) )
# Finally, let's write languages.nsh and declarations.nsh
NSIWorkingFile = open('%s/languages.nsh' % options.output,"w")
NSIWorkingFile.writelines(NSILanguages)
NSIWorkingFile.close()
NSIWorkingFile = open('%s/declarations.nsh' % options.output,"w")
NSIWorkingFile.writelines(NSIDeclarations)
NSIWorkingFile.close()
print ( "NSI Localization Operation Complete" )
| gpl-2.0 | 4,218,139,049,276,264,400 | 32.434066 | 113 | 0.559901 | false |
atiqueahmedziad/addons-server | src/olympia/accounts/urls.py | 3 | 1619 | from django.conf.urls import include, url
from rest_framework.routers import SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from olympia.bandwagon.views import CollectionAddonViewSet, CollectionViewSet
from . import views
accounts = SimpleRouter()
accounts.register(r'account', views.AccountViewSet, base_name='account')
collections = NestedSimpleRouter(accounts, r'account', lookup='user')
collections.register(r'collections', CollectionViewSet,
base_name='collection')
sub_collections = NestedSimpleRouter(collections, r'collections',
lookup='collection')
sub_collections.register('addons', CollectionAddonViewSet,
base_name='collection-addon')
notifications = NestedSimpleRouter(accounts, r'account', lookup='user')
notifications.register(r'notifications', views.AccountNotificationViewSet,
base_name='notification')
urlpatterns = [
url(r'^authenticate/$', views.AuthenticateView.as_view(),
name='accounts.authenticate'),
url(r'^login/start/$',
views.LoginStartView.as_view(),
name='accounts.login_start'),
url(r'^session/$', views.SessionView.as_view(),
name='accounts.session'),
url(r'', include(accounts.urls)),
url(r'^profile/$', views.ProfileView.as_view(), name='account-profile'),
url(r'^super-create/$', views.AccountSuperCreate.as_view(),
name='accounts.super-create'),
url(r'', include(collections.urls)),
url(r'', include(sub_collections.urls)),
url(r'', include(notifications.urls)),
]
| bsd-3-clause | -7,936,732,885,928,994,000 | 37.547619 | 77 | 0.689314 | false |
ol-loginov/intellij-community | python/lib/Lib/site-packages/django/contrib/syndication/feeds.py | 245 | 1367 | from django.contrib.syndication import views
from django.core.exceptions import ObjectDoesNotExist
import warnings
# This is part of the deprecated API
from django.contrib.syndication.views import FeedDoesNotExist, add_domain
class Feed(views.Feed):
"""Provided for backwards compatibility."""
def __init__(self, slug, request):
warnings.warn('The syndication feeds.Feed class is deprecated. Please '
'use the new class based view API.',
category=DeprecationWarning)
self.slug = slug
self.request = request
self.feed_url = getattr(self, 'feed_url', None) or request.path
self.title_template = self.title_template or ('feeds/%s_title.html' % slug)
self.description_template = self.description_template or ('feeds/%s_description.html' % slug)
def get_object(self, bits):
return None
def get_feed(self, url=None):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
if url:
bits = url.split('/')
else:
bits = []
try:
obj = self.get_object(bits)
except ObjectDoesNotExist:
raise FeedDoesNotExist
return super(Feed, self).get_feed(obj, self.request)
| apache-2.0 | 961,640,140,581,728,900 | 34.973684 | 101 | 0.635699 | false |
DailyActie/Surrogate-Model | 01-codes/tensorflow-master/tensorflow/python/framework/dtypes_test.py | 1 | 10625 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class TypesTest(test_util.TensorFlowTestCase):
def testAllTypesConstructible(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(
datatype_enum, tf.DType(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(
datatype_enum, tf.as_dtype(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToNumpyDtype(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = tf.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != tf.bfloat16:
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(tf.as_dtype(datatype_enum).base_dtype,
tf.as_dtype(numpy_dtype))
def testInvalid(self):
with self.assertRaises(TypeError):
tf.DType(types_pb2.DT_INVALID)
with self.assertRaises(TypeError):
tf.as_dtype(types_pb2.DT_INVALID)
def testNumpyConversion(self):
self.assertIs(tf.float32, tf.as_dtype(np.float32))
self.assertIs(tf.float64, tf.as_dtype(np.float64))
self.assertIs(tf.int32, tf.as_dtype(np.int32))
self.assertIs(tf.int64, tf.as_dtype(np.int64))
self.assertIs(tf.uint8, tf.as_dtype(np.uint8))
self.assertIs(tf.uint16, tf.as_dtype(np.uint16))
self.assertIs(tf.int16, tf.as_dtype(np.int16))
self.assertIs(tf.int8, tf.as_dtype(np.int8))
self.assertIs(tf.complex64, tf.as_dtype(np.complex64))
self.assertIs(tf.string, tf.as_dtype(np.object))
self.assertIs(tf.string, tf.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(tf.bool, tf.as_dtype(np.bool))
with self.assertRaises(TypeError):
tf.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
def testStringConversion(self):
self.assertIs(tf.float32, tf.as_dtype("float32"))
self.assertIs(tf.float64, tf.as_dtype("float64"))
self.assertIs(tf.int32, tf.as_dtype("int32"))
self.assertIs(tf.uint8, tf.as_dtype("uint8"))
self.assertIs(tf.uint16, tf.as_dtype("uint16"))
self.assertIs(tf.int16, tf.as_dtype("int16"))
self.assertIs(tf.int8, tf.as_dtype("int8"))
self.assertIs(tf.string, tf.as_dtype("string"))
self.assertIs(tf.complex64, tf.as_dtype("complex64"))
self.assertIs(tf.int64, tf.as_dtype("int64"))
self.assertIs(tf.bool, tf.as_dtype("bool"))
self.assertIs(tf.qint8, tf.as_dtype("qint8"))
self.assertIs(tf.quint8, tf.as_dtype("quint8"))
self.assertIs(tf.qint32, tf.as_dtype("qint32"))
self.assertIs(tf.bfloat16, tf.as_dtype("bfloat16"))
self.assertIs(tf.float32_ref, tf.as_dtype("float32_ref"))
self.assertIs(tf.float64_ref, tf.as_dtype("float64_ref"))
self.assertIs(tf.int32_ref, tf.as_dtype("int32_ref"))
self.assertIs(tf.uint8_ref, tf.as_dtype("uint8_ref"))
self.assertIs(tf.int16_ref, tf.as_dtype("int16_ref"))
self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref"))
self.assertIs(tf.string_ref, tf.as_dtype("string_ref"))
self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref"))
self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref"))
self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref"))
self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref"))
self.assertIs(tf.quint8_ref, tf.as_dtype("quint8_ref"))
self.assertIs(tf.qint32_ref, tf.as_dtype("qint32_ref"))
self.assertIs(tf.bfloat16_ref, tf.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
tf.as_dtype("not_a_type")
def testDTypesHaveUniqueNames(self):
dtypes = []
names = set()
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = tf.as_dtype(datatype_enum)
dtypes.append(dtype)
names.add(dtype.name)
self.assertEqual(len(dtypes), len(names))
def testIsInteger(self):
self.assertEqual(tf.as_dtype("int8").is_integer, True)
self.assertEqual(tf.as_dtype("int16").is_integer, True)
self.assertEqual(tf.as_dtype("int32").is_integer, True)
self.assertEqual(tf.as_dtype("int64").is_integer, True)
self.assertEqual(tf.as_dtype("uint8").is_integer, True)
self.assertEqual(tf.as_dtype("uint16").is_integer, True)
self.assertEqual(tf.as_dtype("complex64").is_integer, False)
self.assertEqual(tf.as_dtype("float").is_integer, False)
self.assertEqual(tf.as_dtype("double").is_integer, False)
self.assertEqual(tf.as_dtype("string").is_integer, False)
self.assertEqual(tf.as_dtype("bool").is_integer, False)
def testIsFloating(self):
self.assertEqual(tf.as_dtype("int8").is_floating, False)
self.assertEqual(tf.as_dtype("int16").is_floating, False)
self.assertEqual(tf.as_dtype("int32").is_floating, False)
self.assertEqual(tf.as_dtype("int64").is_floating, False)
self.assertEqual(tf.as_dtype("uint8").is_floating, False)
self.assertEqual(tf.as_dtype("uint16").is_floating, False)
self.assertEqual(tf.as_dtype("complex64").is_floating, False)
self.assertEqual(tf.as_dtype("float32").is_floating, True)
self.assertEqual(tf.as_dtype("float64").is_floating, True)
self.assertEqual(tf.as_dtype("string").is_floating, False)
self.assertEqual(tf.as_dtype("bool").is_floating, False)
def testIsUnsigned(self):
self.assertEqual(tf.as_dtype("int8").is_unsigned, False)
self.assertEqual(tf.as_dtype("int16").is_unsigned, False)
self.assertEqual(tf.as_dtype("int32").is_unsigned, False)
self.assertEqual(tf.as_dtype("int64").is_unsigned, False)
self.assertEqual(tf.as_dtype("uint8").is_unsigned, True)
self.assertEqual(tf.as_dtype("uint16").is_unsigned, True)
self.assertEqual(tf.as_dtype("float32").is_unsigned, False)
self.assertEqual(tf.as_dtype("float64").is_unsigned, False)
self.assertEqual(tf.as_dtype("bool").is_unsigned, False)
self.assertEqual(tf.as_dtype("string").is_unsigned, False)
self.assertEqual(tf.as_dtype("complex64").is_unsigned, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = tf.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
# ignore types for which there are no minimum/maximum (or we cannot
# compute it, such as for the q* types)
if (dtype.is_quantized or
dtype.base_dtype == tf.bool or
dtype.base_dtype == tf.string or
dtype.base_dtype == tf.complex64):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
# check some values that are known
if numpy_dtype == np.bool_:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 1)
if numpy_dtype == np.int8:
self.assertEquals(dtype.min, -128)
self.assertEquals(dtype.max, 127)
if numpy_dtype == np.int16:
self.assertEquals(dtype.min, -32768)
self.assertEquals(dtype.max, 32767)
if numpy_dtype == np.int32:
self.assertEquals(dtype.min, -2147483648)
self.assertEquals(dtype.max, 2147483647)
if numpy_dtype == np.int64:
self.assertEquals(dtype.min, -9223372036854775808)
self.assertEquals(dtype.max, 9223372036854775807)
if numpy_dtype == np.uint8:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 255)
if numpy_dtype == np.uint16:
if dtype == tf.uint16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 65535)
elif dtype == tf.bfloat16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint32:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 18446744073709551615)
if numpy_dtype in (np.float16, np.float32, np.float64):
self.assertEquals(dtype.min, np.finfo(numpy_dtype).min)
self.assertEquals(dtype.max, np.finfo(numpy_dtype).max)
def testRepr(self):
for enum, name in dtypes._TYPE_TO_STRING.items():
dtype = tf.DType(enum)
self.assertEquals(repr(dtype), 'tf.' + name)
dtype2 = eval(repr(dtype))
self.assertEquals(type(dtype2), tf.DType)
self.assertEquals(dtype, dtype2)
if __name__ == "__main__":
googletest.main()
| mit | -6,265,317,340,081,946,000 | 46.86036 | 80 | 0.617412 | false |
eunchong/build | scripts/slave/recipe_modules/cipd/example.py | 1 | 3427 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'file',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/step',
'cipd',
]
def RunSteps(api):
# First, you need a cipd client.
api.cipd.install_client('install cipd')
api.cipd.install_client('install cipd', version='deadbeaf')
assert api.cipd.get_executable()
package_name = 'public/package/%s' % api.cipd.platform_suffix()
package_instance_id = '7f751b2237df2fdf3c1405be00590fefffbaea2d'
packages = {package_name: package_instance_id}
cipd_root = api.path['slave_build'].join('packages')
# Some packages don't require credentials to be installed or queried.
api.cipd.ensure(cipd_root, packages)
step = api.cipd.search(package_name, tag='git_revision:40-chars-long-hash')
api.cipd.describe(package_name,
version=step.json.output['result'][0]['instance_id'])
# Others do, so provide creds first.
api.cipd.set_service_account_credentials('fake-credentials.json')
private_package_name = 'private/package/%s' % api.cipd.platform_suffix()
packages[private_package_name] = 'latest'
api.cipd.ensure(cipd_root, packages)
step = api.cipd.search(private_package_name, tag='key:value')
api.cipd.describe(private_package_name,
version=step.json.output['result'][0]['instance_id'],
test_data_tags=['custom:tagged', 'key:value'],
test_data_refs=['latest'])
# The rest of commands expect credentials to be set.
# Build & register new package version.
api.cipd.build('fake-input-dir', 'fake-package-path', 'infra/fake-package')
api.cipd.build('fake-input-dir', 'fake-package-path', 'infra/fake-package',
install_mode='copy')
api.cipd.register('infra/fake-package', 'fake-package-path',
refs=['fake-ref-1', 'fake-ref-2'],
tags={'fake_tag_1': 'fake_value_1',
'fake_tag_2': 'fake_value_2'})
# Set tag or ref of an already existing package.
api.cipd.set_tag('fake-package',
version='long/weird/ref/which/doesn/not/fit/into/40chars',
tags={'dead': 'beaf', 'more': 'value'})
api.cipd.set_ref('fake-package', version='latest', refs=['any', 'some'])
# Search by the new tag.
api.cipd.search('fake-package/%s' % api.cipd.platform_suffix(),
tag='dead:beaf')
def GenTests(api):
yield (
# This is very common dev workstation, but not all devs are on it.
api.test('basic') +
api.platform('linux', 64)
)
yield (
api.test('mac64') +
api.platform('mac', 64)
)
yield (
api.test('install-failed') +
api.step_data('install cipd', retcode=1)
)
yield (
api.test('describe-failed') +
api.platform('linux', 64) +
api.override_step_data(
'cipd describe public/package/linux-amd64',
api.cipd.example_error(
'package "public/package/linux-amd64-ubuntu14_04" not registered',
)
)
)
yield (
api.test('describe-many-instances') +
api.platform('linux', 64) +
api.override_step_data(
'cipd search fake-package/linux-amd64 dead:beaf',
api.cipd.example_search(
'public/package/linux-amd64-ubuntu14_04',
instances=3
)
)
)
| bsd-3-clause | 7,805,765,115,964,086,000 | 32.930693 | 77 | 0.635249 | false |
nathanielvarona/airflow | tests/providers/google/ads/transfers/test_ads_to_gcs.py | 3 | 2455 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
from airflow.providers.google.ads.transfers.ads_to_gcs import GoogleAdsToGcsOperator
from tests.providers.google.ads.operators.test_ads import (
BUCKET,
CLIENT_IDS,
FIELDS_TO_EXTRACT,
GCS_OBJ_PATH,
IMPERSONATION_CHAIN,
QUERY,
api_version,
gcp_conn_id,
google_ads_conn_id,
)
class TestGoogleAdsToGcsOperator:
@mock.patch("airflow.providers.google.ads.transfers.ads_to_gcs.GoogleAdsHook")
@mock.patch("airflow.providers.google.ads.transfers.ads_to_gcs.GCSHook")
def test_execute(self, mock_gcs_hook, mock_ads_hook):
op = GoogleAdsToGcsOperator(
gcp_conn_id=gcp_conn_id,
google_ads_conn_id=google_ads_conn_id,
client_ids=CLIENT_IDS,
query=QUERY,
attributes=FIELDS_TO_EXTRACT,
obj=GCS_OBJ_PATH,
bucket=BUCKET,
task_id="run_operator",
impersonation_chain=IMPERSONATION_CHAIN,
api_version=api_version,
)
op.execute({})
mock_ads_hook.assert_called_once_with(
gcp_conn_id=gcp_conn_id,
google_ads_conn_id=google_ads_conn_id,
api_version=api_version,
)
mock_ads_hook.return_value.search.assert_called_once_with(
client_ids=CLIENT_IDS, query=QUERY, page_size=10000
)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=gcp_conn_id,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_gcs_hook.return_value.upload.assert_called_once_with(
bucket_name=BUCKET, object_name=GCS_OBJ_PATH, filename=mock.ANY, gzip=False
)
| apache-2.0 | 3,004,015,969,946,300,000 | 36.769231 | 87 | 0.674134 | false |
dpac-vlsi/SynchroTrace | src/arch/x86/isa/insts/simd128/integer/data_transfer/__init__.py | 91 | 2388 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["move",
"move_non_temporal",
"move_mask"]
microcode = '''
# 128 bit multimedia and scientific data transfer instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause | 6,213,386,654,715,811,000 | 49.808511 | 72 | 0.778894 | false |
goldmedal/spark | examples/src/main/python/ml/one_vs_rest_example.py | 52 | 2236 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example of Multiclass to Binary Reduction with One Vs Rest,
using Logistic Regression as the base classifier.
Run with:
bin/spark-submit examples/src/main/python/ml/one_vs_rest_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.classification import LogisticRegression, OneVsRest
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("OneVsRestExample") \
.getOrCreate()
# $example on$
# load data file.
inputData = spark.read.format("libsvm") \
.load("data/mllib/sample_multiclass_classification_data.txt")
# generate the train/test split.
(train, test) = inputData.randomSplit([0.8, 0.2])
# instantiate the base classifier.
lr = LogisticRegression(maxIter=10, tol=1E-6, fitIntercept=True)
# instantiate the One Vs Rest Classifier.
ovr = OneVsRest(classifier=lr)
# train the multiclass model.
ovrModel = ovr.fit(train)
# score the model on test data.
predictions = ovrModel.transform(test)
# obtain evaluator.
evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
# compute the classification error on test data.
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g" % (1.0 - accuracy))
# $example off$
spark.stop()
| apache-2.0 | -9,018,965,458,473,737,000 | 32.878788 | 74 | 0.723614 | false |
GenericStudent/home-assistant | homeassistant/components/nws/config_flow.py | 10 | 2946 | """Config flow for National Weather Service (NWS) integration."""
import logging
import aiohttp
from pynws import SimpleNWS
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from . import base_unique_id
from .const import CONF_STATION, DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
latitude = data[CONF_LATITUDE]
longitude = data[CONF_LONGITUDE]
api_key = data[CONF_API_KEY]
station = data.get(CONF_STATION)
client_session = async_get_clientsession(hass)
ha_api_key = f"{api_key} homeassistant"
nws = SimpleNWS(latitude, longitude, ha_api_key, client_session)
try:
await nws.set_station(station)
except aiohttp.ClientError as err:
_LOGGER.error("Could not connect: %s", err)
raise CannotConnect from err
return {"title": nws.station}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for National Weather Service (NWS)."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(
base_unique_id(user_input[CONF_LATITUDE], user_input[CONF_LONGITUDE])
)
self._abort_if_unique_id_configured()
try:
info = await validate_input(self.hass, user_input)
user_input[CONF_STATION] = info["title"]
return self.async_create_entry(title=info["title"], data=user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
data_schema = vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Required(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Required(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
vol.Optional(CONF_STATION): str,
}
)
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| apache-2.0 | -7,285,548,878,446,906,000 | 33.658824 | 85 | 0.63408 | false |
mhrivnak/pulp | server/test/unit/server/db/model/test_criteria.py | 1 | 8977 | """
Test the pulp.server.db.model.criteria module.
"""
import unittest
from pulp.server import exceptions
from pulp.server.db.model import criteria
FIELDS = set(('sort', 'skip', 'limit', 'filters', 'fields'))
class TestAsDict(unittest.TestCase):
def test_empty(self):
c = criteria.Criteria()
ret = c.as_dict()
self.assertTrue(isinstance(ret, dict))
for field in FIELDS:
self.assertTrue(ret[field] is None)
def test_full(self):
c = criteria.Criteria(
filters={'name': {'$in': ['a', 'b']}},
sort=(('name', 'ascending'),),
limit=10,
skip=10,
fields=('name', 'id')
)
ret = c.as_dict()
self.assertTrue(isinstance(ret['filters'], dict))
self.assertEqual(ret['limit'], 10)
self.assertEqual(ret['skip'], 10)
self.assertEqual(ret['fields'], c.fields)
self.assertEqual(set(ret.keys()), FIELDS)
class TestClientInputValidation(unittest.TestCase):
def test_as_dict(self):
# this should work
criteria.Criteria.from_client_input({})
def test_as_string(self):
self.assertRaises(exceptions.InvalidValue, criteria.Criteria.from_client_input, 'abc 123')
def test_as_int(self):
self.assertRaises(exceptions.InvalidValue, criteria.Criteria.from_client_input, 123)
def test_as_none(self):
self.assertRaises(exceptions.InvalidValue, criteria.Criteria.from_client_input, None)
def test_as_bool(self):
self.assertRaises(exceptions.InvalidValue, criteria.Criteria.from_client_input, True)
def test_as_list(self):
self.assertRaises(exceptions.InvalidValue, criteria.Criteria.from_client_input, [])
class TestFromDict(unittest.TestCase):
"""
Test the Criteria.from_dict() method.
"""
def test_from_dict(self):
filters = {'some': 'filters'}
sort = ['sort_item']
limit = 42
skip = 64
fields = ['a_field']
a_dict = {'filters': filters, 'sort': sort, 'limit': limit, 'skip': skip, 'fields': fields}
new_criteria = criteria.Criteria.from_dict(a_dict)
self.assertTrue(isinstance(new_criteria, criteria.Criteria))
self.assertEqual(new_criteria.filters, filters)
self.assertEqual(new_criteria.sort, sort)
self.assertEqual(new_criteria.limit, limit)
self.assertEqual(new_criteria.skip, skip)
self.assertEqual(new_criteria.fields, fields)
def test_from_dict_accepts_as_dict_as_input(self):
"""
Verify that from_dict() accepts the output of as_dict() as input.
"""
filters = {'some': 'filters'}
sort = ['sort_item']
limit = 42
skip = 64
fields = ['a_field']
criteria_1 = criteria.Criteria(filters, sort, limit, skip, fields)
criteria_2 = criteria.Criteria.from_dict(criteria_1.as_dict())
self.assertTrue(isinstance(criteria_2, criteria.Criteria))
self.assertEqual(criteria_2.filters, criteria_1.filters)
self.assertEqual(criteria_2.sort, criteria_1.sort)
self.assertEqual(criteria_2.limit, criteria_1.limit)
self.assertEqual(criteria_2.skip, criteria_1.skip)
self.assertEqual(criteria_2.fields, criteria_1.fields)
class TestValidateFilters(unittest.TestCase):
def test_as_dict(self):
input = {'id': 'repo1'}
ret = criteria._validate_filters(input)
self.assertEqual(ret, input)
def test_as_string(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_filters, 'abc 123')
def test_as_int(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_filters, 123)
def test_as_none(self):
ret = criteria._validate_filters(None)
self.assertTrue(ret is None)
def test_as_bool(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_filters, True)
def test_as_list(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_filters, [])
class TestValidateSort(unittest.TestCase):
def test_as_list(self):
input = []
ret = criteria._validate_sort(input)
self.assertEqual(ret, input)
def test_as_tuple(self):
input = ()
ret = criteria._validate_sort(input)
self.assertEqual(ret, [])
def test_as_string(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_sort, 'abc 123')
def test_as_int(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_sort, 123)
def test_as_none(self):
ret = criteria._validate_sort(None)
self.assertTrue(ret is None)
def test_as_bool(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_sort, True)
def test_as_dict(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_sort, {})
class TestValidateLimit(unittest.TestCase):
def test_as_int(self):
input = 20
ret = criteria._validate_limit(input)
self.assertEqual(ret, input)
def test_as_int_string(self):
input = '20'
ret = criteria._validate_limit(input)
self.assertEqual(ret, 20)
def test_as_zero(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_limit, 0)
def test_as_negative(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_limit, -1)
def test_as_tuple(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_limit, ())
def test_as_string(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_limit, 'abc 123')
def test_as_list(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_limit, [])
def test_as_none(self):
ret = criteria._validate_limit(None)
self.assertTrue(ret is None)
def test_as_bool(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_limit, True)
def test_as_dict(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_limit, {})
class TestValidateSkip(unittest.TestCase):
def test_as_int(self):
input = 20
ret = criteria._validate_skip(input)
self.assertEqual(ret, input)
def test_as_int_string(self):
input = '20'
ret = criteria._validate_skip(input)
self.assertEqual(ret, 20)
def test_as_zero(self):
input = 0
ret = criteria._validate_skip(input)
self.assertEqual(ret, input)
def test_as_negative(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_skip, -1)
def test_as_tuple(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_skip, ())
def test_as_string(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_skip, 'abc 123')
def test_as_list(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_skip, [])
def test_as_none(self):
ret = criteria._validate_skip(None)
self.assertTrue(ret is None)
def test_as_bool(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_skip, True)
def test_as_dict(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_skip, {})
class TestValidateFields(unittest.TestCase):
def test_as_list(self):
input = []
ret = criteria._validate_fields(input)
self.assertEqual(ret, input)
def test_as_tuple(self):
input = ()
ret = criteria._validate_fields(input)
self.assertEqual(ret, list(input))
def test_as_string(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_fields, 'abc 123')
def test_as_int(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_fields, 123)
def test_as_none(self):
ret = criteria._validate_fields(None)
self.assertTrue(ret is None)
def test_as_bool(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_fields, True)
def test_as_dict(self):
self.assertRaises(exceptions.InvalidValue, criteria._validate_fields, {})
def test_items_as_string(self):
input = ['id', 'name']
ret = criteria._validate_fields(input)
self.assertEqual(ret, input)
def test_items_as_int(self):
input = ['id', 3]
self.assertRaises(exceptions.InvalidValue, criteria._validate_fields, input)
def test_items_as_dict(self):
input = ['id', {}]
self.assertRaises(exceptions.InvalidValue, criteria._validate_fields, input)
def test_items_as_list(self):
input = ['id', []]
self.assertRaises(exceptions.InvalidValue, criteria._validate_fields, input)
def test_items_as_bool(self):
input = ['id', True]
self.assertRaises(exceptions.InvalidValue, criteria._validate_fields, input)
| gpl-2.0 | -3,886,031,113,554,155,500 | 31.762774 | 99 | 0.648101 | false |
jyejare/robottelo | tests/foreman/cli/test_puppet.py | 1 | 4056 | """End to end test for Puppet funcionality
:Requirement: Puppet
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Puppet
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from robottelo.config import settings
from robottelo.decorators import run_in_one_thread
from robottelo.decorators import skip_if_not_set
from robottelo.decorators import tier3
from robottelo.decorators import upgrade
from robottelo.test import CLITestCase
@run_in_one_thread
class PuppetTestCase(CLITestCase):
"""Implements Puppet test scenario"""
@classmethod
@skip_if_not_set('clients')
def setUpClass(cls):
super(PuppetTestCase, cls).setUpClass()
cls.sat6_hostname = settings.server.hostname
@pytest.mark.stubbed
@tier3
@upgrade
def test_positive_puppet_scenario(self):
"""Tests extensive all-in-one puppet scenario
:id: d4fdba9f-6333-4d47-987b-ce920da20d77
:Steps:
1. Create an organization and upload a cloned manifest for it.
2. Enable respective Satellite Tools repos and sync them.
3. Create a product and a LFE
4. Create a puppet repos within the product
5. Upload motd puppet module into the repo
6. Upload parameterizable puppet module and create smart params for
it
7. Create a CV and add Tools repo and puppet module(s)
8. Publish and promote CV to the LFE
9. Create AK with the product and enable Satellite Tools in it
10. Create a libvirt compute resource
11. Create a sane subnet and sane domain to be used by libvirt
12. Create a hostgroup associated with all created entities
(especially Puppet Classes has added puppet modules)
13. Provision a host using the hostgroup on libvirt resource
14. Assert that puppet agent can run on the host
15. Assert that the puppet modules get installed by provisioning
16. Run facter on host and assert that was successful
:expectedresults: multiple asserts along the code
:CaseAutomation: notautomated
:CaseLevel: System
"""
@run_in_one_thread
class PuppetCapsuleTestCase(CLITestCase):
"""Implements Puppet test scenario with standalone capsule"""
@classmethod
@skip_if_not_set('clients')
def setUpClass(cls):
super(PuppetCapsuleTestCase, cls).setUpClass()
cls.sat6_hostname = settings.server.hostname
@pytest.mark.stubbed
@tier3
@upgrade
def test_positive_puppet_capsule_scenario(self):
"""Tests extensive all-in-one puppet scenario via Capsule
:id: 51ffe74e-7131-43d0-a919-1175233b4763
:Steps:
1. Create an organization and upload a cloned manifest for it.
2. Enable respective Satellite Tools repos and sync them.
3. Create a product and a LFE
4. Create a puppet repos within the product
5. Upload motd puppet module into the repo
6. Upload parameterizable puppet module and create smart params for
it
7. Create a CV and add Tools repo and puppet module(s)
8. Publish and promote CV to the LFE
9. Create AK with the product and enable Satellite Tools in it
10. Create a libvirt compute resource
11. Create a sane subnet and sane domain to be used by libvirt
12. Create a hostgroup associated with all created entities
(especially Puppet Classes has added puppet modules)
13. Provision a host using the hostgroup on libvirt resource
14. Assert that puppet agent can run on the host
15. Assert that the puppet modules get installed by provisioning
16. Run facter on host and assert that was successful
:expectedresults: multiple asserts along the code
:CaseAutomation: notautomated
:CaseLevel: System
"""
| gpl-3.0 | -1,292,803,075,519,433,700 | 33.372881 | 79 | 0.66716 | false |
jolevq/odoopub | addons/l10n_br/__openerp__.py | 430 | 3125 | # -*- encoding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2009 Renato Lima - Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Brazilian - Accounting',
'category': 'Localization/Account Charts',
'description': """
Base module for the Brazilian localization
==========================================
This module consists in:
- Generic Brazilian chart of accounts
- Brazilian taxes such as:
- IPI
- ICMS
- PIS
- COFINS
- ISS
- IR
- IRPJ
- CSLL
The field tax_discount has also been added in the account.tax.template and
account.tax objects to allow the proper computation of some Brazilian VATs
such as ICMS. The chart of account creation wizard has been extended to
propagate those new data properly.
It's important to note however that this module lack many implementations to
use OpenERP properly in Brazil. Those implementations (such as the electronic
fiscal Invoicing which is already operational) are brought by more than 15
additional modules of the Brazilian Launchpad localization project
https://launchpad.net/openerp.pt-br-localiz and their dependencies in the
extra addons branch. Those modules aim at not breaking with the remarkable
OpenERP modularity, this is why they are numerous but small. One of the
reasons for maintaining those modules apart is that Brazilian Localization
leaders need commit rights agility to complete the localization as companies
fund the remaining legal requirements (such as soon fiscal ledgers,
accounting SPED, fiscal SPED and PAF ECF that are still missing as September
2011). Those modules are also strictly licensed under AGPL V3 and today don't
come with any additional paid permission for online use of 'private modules'.
""",
'license': 'AGPL-3',
'author': 'Akretion, OpenERP Brasil',
'website': 'http://openerpbrasil.org',
'version': '0.6',
'depends': ['account','account_chart'],
'data': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account_tax_code_template.xml',
'data/account_chart_template.xml',
'data/account_tax_template.xml',
'account_view.xml',
'l10n_br_wizard.xml',
],
'installable': True,
}
| agpl-3.0 | -7,678,701,675,671,245,000 | 39.584416 | 79 | 0.65248 | false |
kallewoof/bitcoin | test/lint/lint-files.py | 13 | 8367 | #!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This checks that all files in the repository have correct filenames and permissions
"""
import os
import re
import sys
from subprocess import check_output
from typing import Optional, NoReturn
CMD_ALL_FILES = "git ls-files -z --full-name"
CMD_SOURCE_FILES = 'git ls-files -z --full-name -- "*.[cC][pP][pP]" "*.[hH]" "*.[pP][yY]" "*.[sS][hH]"'
CMD_SHEBANG_FILES = "git grep --full-name --line-number -I '^#!'"
ALLOWED_FILENAME_REGEXP = "^[a-zA-Z0-9/_.@][a-zA-Z0-9/_.@-]*$"
ALLOWED_SOURCE_FILENAME_REGEXP = "^[a-z0-9_./-]+$"
ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP = (
"^src/(secp256k1/|univalue/|test/fuzz/FuzzedDataProvider.h)"
)
ALLOWED_PERMISSION_NON_EXECUTABLES = 644
ALLOWED_PERMISSION_EXECUTABLES = 755
ALLOWED_EXECUTABLE_SHEBANG = {
"py": [b"#!/usr/bin/env python3"],
"sh": [b"#!/usr/bin/env bash", b"#!/bin/sh"],
}
class FileMeta(object):
def __init__(self, file_path: str):
self.file_path = file_path
@property
def extension(self) -> Optional[str]:
"""
Returns the file extension for a given filename string.
eg:
'ci/lint_run_all.sh' -> 'sh'
'ci/retry/retry' -> None
'contrib/devtools/split-debug.sh.in' -> 'in'
"""
return str(os.path.splitext(self.file_path)[1].strip(".") or None)
@property
def full_extension(self) -> Optional[str]:
"""
Returns the full file extension for a given filename string.
eg:
'ci/lint_run_all.sh' -> 'sh'
'ci/retry/retry' -> None
'contrib/devtools/split-debug.sh.in' -> 'sh.in'
"""
filename_parts = self.file_path.split(os.extsep, 1)
try:
return filename_parts[1]
except IndexError:
return None
@property
def permissions(self) -> int:
"""
Returns the octal file permission of the file
"""
return int(oct(os.stat(self.file_path).st_mode)[-3:])
def check_all_filenames() -> int:
"""
Checks every file in the repository against an allowed regexp to make sure only lowercase or uppercase
alphanumerics (a-zA-Z0-9), underscores (_), hyphens (-), at (@) and dots (.) are used in repository filenames.
"""
filenames = check_output(CMD_ALL_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
filename_regex = re.compile(ALLOWED_FILENAME_REGEXP)
failed_tests = 0
for filename in filenames:
if not filename_regex.match(filename):
print(
f"""File {repr(filename)} does not not match the allowed filename regexp ('{ALLOWED_FILENAME_REGEXP}')."""
)
failed_tests += 1
return failed_tests
def check_source_filenames() -> int:
"""
Checks only source files (*.cpp, *.h, *.py, *.sh) against a stricter allowed regexp to make sure only lowercase
alphanumerics (a-z0-9), underscores (_), hyphens (-) and dots (.) are used in source code filenames.
Additionally there is an exception regexp for directories or files which are excepted from matching this regexp.
"""
filenames = check_output(CMD_SOURCE_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
filename_regex = re.compile(ALLOWED_SOURCE_FILENAME_REGEXP)
filename_exception_regex = re.compile(ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP)
failed_tests = 0
for filename in filenames:
if not filename_regex.match(filename) and not filename_exception_regex.match(filename):
print(
f"""File {repr(filename)} does not not match the allowed source filename regexp ('{ALLOWED_SOURCE_FILENAME_REGEXP}'), or the exception regexp ({ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP})."""
)
failed_tests += 1
return failed_tests
def check_all_file_permissions() -> int:
"""
Checks all files in the repository match an allowed executable or non-executable file permission octal.
Additionally checks that for executable files, the file contains a shebang line
"""
filenames = check_output(CMD_ALL_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
failed_tests = 0
for filename in filenames:
file_meta = FileMeta(filename)
if file_meta.permissions == ALLOWED_PERMISSION_EXECUTABLES:
with open(filename, "rb") as f:
shebang = f.readline().rstrip(b"\n")
# For any file with executable permissions the first line must contain a shebang
if not shebang.startswith(b"#!"):
print(
f"""File "{filename}" has permission {ALLOWED_PERMISSION_EXECUTABLES} (executable) and is thus expected to contain a shebang '#!'. Add shebang or do "chmod {ALLOWED_PERMISSION_NON_EXECUTABLES} {filename}" to make it non-executable."""
)
failed_tests += 1
# For certain file extensions that have been defined, we also check that the shebang conforms to a specific
# allowable set of shebangs
if file_meta.extension in ALLOWED_EXECUTABLE_SHEBANG.keys():
if shebang not in ALLOWED_EXECUTABLE_SHEBANG[file_meta.extension]:
print(
f"""File "{filename}" is missing expected shebang """
+ " or ".join(
[
x.decode("utf-8")
for x in ALLOWED_EXECUTABLE_SHEBANG[file_meta.extension]
]
)
)
failed_tests += 1
elif file_meta.permissions == ALLOWED_PERMISSION_NON_EXECUTABLES:
continue
else:
print(
f"""File "{filename}" has unexpected permission {file_meta.permissions}. Do "chmod {ALLOWED_PERMISSION_NON_EXECUTABLES} {filename}" (if non-executable) or "chmod {ALLOWED_PERMISSION_EXECUTABLES} {filename}" (if executable)."""
)
failed_tests += 1
return failed_tests
def check_shebang_file_permissions() -> int:
"""
Checks every file that contains a shebang line to ensure it has an executable permission
"""
filenames = check_output(CMD_SHEBANG_FILES, shell=True).decode("utf8").strip().split("\n")
# The git grep command we use returns files which contain a shebang on any line within the file
# so we need to filter the list to only files with the shebang on the first line
filenames = [filename.split(":1:")[0] for filename in filenames if ":1:" in filename]
failed_tests = 0
for filename in filenames:
file_meta = FileMeta(filename)
if file_meta.permissions != ALLOWED_PERMISSION_EXECUTABLES:
# These file types are typically expected to be sourced and not executed directly
if file_meta.full_extension in ["bash", "init", "openrc", "sh.in"]:
continue
# *.py files which don't contain an `if __name__ == '__main__'` are not expected to be executed directly
if file_meta.extension == "py":
with open(filename, "r", encoding="utf8") as f:
file_data = f.read()
if not re.search("""if __name__ == ['"]__main__['"]:""", file_data):
continue
print(
f"""File "{filename}" contains a shebang line, but has the file permission {file_meta.permissions} instead of the expected executable permission {ALLOWED_PERMISSION_EXECUTABLES}. Do "chmod {ALLOWED_PERMISSION_EXECUTABLES} {filename}" (or remove the shebang line)."""
)
failed_tests += 1
return failed_tests
def main() -> NoReturn:
failed_tests = 0
failed_tests += check_all_filenames()
failed_tests += check_source_filenames()
failed_tests += check_all_file_permissions()
failed_tests += check_shebang_file_permissions()
if failed_tests:
print(
f"ERROR: There were {failed_tests} failed tests in the lint-files.py lint test. Please resolve the above errors."
)
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
main()
| mit | 874,851,253,220,299,300 | 40.216749 | 282 | 0.61145 | false |
groschovskiy/lerigos_music | Server/API/lib/pkg_resources/_vendor/packaging/_compat.py | 901 | 1253 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = str,
else:
string_types = basestring,
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
| apache-2.0 | -4,214,548,331,290,717,000 | 30.325 | 78 | 0.702314 | false |
mcking49/apache-flask | Python/Lib/ctypes/test/test_parameters.py | 31 | 6432 | import unittest, sys
from ctypes.test import need_symbol
class SimpleTypesTestCase(unittest.TestCase):
def setUp(self):
import ctypes
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
self.prev_conv_mode = set_conversion_mode("ascii", "strict")
def tearDown(self):
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
set_conversion_mode(*self.prev_conv_mode)
def test_subclasses(self):
from ctypes import c_void_p, c_char_p
# ctypes 0.9.5 and before did overwrite from_param in SimpleType_new
class CVOIDP(c_void_p):
def from_param(cls, value):
return value * 2
from_param = classmethod(from_param)
class CCHARP(c_char_p):
def from_param(cls, value):
return value * 4
from_param = classmethod(from_param)
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
@need_symbol('c_wchar_p')
def test_subclasses_c_wchar_p(self):
from ctypes import c_wchar_p
class CWCHARP(c_wchar_p):
def from_param(cls, value):
return value * 3
from_param = classmethod(from_param)
self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc")
# XXX Replace by c_char_p tests
def test_cstrings(self):
from ctypes import c_char_p, byref
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = "123"
self.assertIs(c_char_p.from_param(s)._obj, s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(u"123")._obj, "123")
self.assertRaises(UnicodeEncodeError, c_char_p.from_param, u"123\377")
self.assertRaises(TypeError, c_char_p.from_param, 42)
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p("123")
self.assertIs(c_char_p.from_param(a), a)
@need_symbol('c_wchar_p')
def test_cw_strings(self):
from ctypes import byref, c_wchar_p
s = u"123"
if sys.platform == "win32":
self.assertTrue(c_wchar_p.from_param(s)._obj is s)
self.assertRaises(TypeError, c_wchar_p.from_param, 42)
# new in 0.9.1: convert (decode) ascii to unicode
self.assertEqual(c_wchar_p.from_param("123")._obj, u"123")
self.assertRaises(UnicodeDecodeError, c_wchar_p.from_param, "123\377")
pa = c_wchar_p.from_param(c_wchar_p(u"123"))
self.assertEqual(type(pa), c_wchar_p)
def test_int_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer
LPINT = POINTER(c_int)
## p = pointer(c_int(42))
## x = LPINT.from_param(p)
x = LPINT.from_param(pointer(c_int(42)))
self.assertEqual(x.contents.value, 42)
self.assertEqual(LPINT(c_int(42)).contents.value, 42)
self.assertEqual(LPINT.from_param(None), None)
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42)))
def test_byref_pointer(self):
# The from_param class method of POINTER(typ) classes accepts what is
# returned by byref(obj), it type(obj) == typ
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPINT = POINTER(c_int)
LPINT.from_param(byref(c_int(42)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22)))
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22)))
def test_byref_pointerpointer(self):
# See above
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPLPINT = POINTER(POINTER(c_int))
LPLPINT.from_param(byref(pointer(c_int(42))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22))))
if c_int != c_long:
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22))))
def test_array_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER
INTARRAY = c_int * 3
ia = INTARRAY()
self.assertEqual(len(ia), 3)
self.assertEqual([ia[i] for i in range(3)], [0, 0, 0])
# Pointers are only compatible with arrays containing items of
# the same type!
LPINT = POINTER(c_int)
LPINT.from_param((c_int*3)())
self.assertRaises(TypeError, LPINT.from_param, c_short*3)
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
func = CDLL(_ctypes_test.__file__)._testfunc_p_p
func.restype = c_void_p
# TypeError: has no from_param method
self.assertRaises(TypeError, setattr, func, "argtypes", (object,))
class Adapter(object):
def from_param(cls, obj):
return None
func.argtypes = (Adapter(),)
self.assertEqual(func(None), None)
self.assertEqual(func(object()), None)
class Adapter(object):
def from_param(cls, obj):
return obj
func.argtypes = (Adapter(),)
# don't know how to convert parameter 1
self.assertRaises(ArgumentError, func, object())
self.assertEqual(func(c_void_p(42)), 42)
class Adapter(object):
def from_param(cls, obj):
raise ValueError(obj)
func.argtypes = (Adapter(),)
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
################################################################
if __name__ == '__main__':
unittest.main()
| mit | 8,008,755,104,228,328,000 | 34.535912 | 88 | 0.595771 | false |
olasitarska/django | tests/unmanaged_models/tests.py | 49 | 2183 | from __future__ import unicode_literals
from django.db import connection
from django.test import TestCase
from .models import A01, A02, B01, B02, C01, C02, Unmanaged2, Managed1
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests whilst we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a = [a]
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertIsInstance(a2, A02)
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertIsInstance(b2, B02)
self.assertEqual(b2.f_a, "fred")
self.assertIsInstance(b2.fk_a, A02)
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertIsInstance(resp[0], C02)
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertTrue(table not in tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assertTrue(table in tables, "Table '%s' does not exist." % table)
| bsd-3-clause | 8,315,767,131,304,421,000 | 34.786885 | 97 | 0.628951 | false |
ressu/SickGear | lib/hachoir_parser/container/swf.py | 90 | 15001 | """
SWF (Macromedia/Adobe Flash) file parser.
Documentation:
- Alexis' SWF Reference:
http://www.m2osw.com/swf_alexref.html
- http://www.half-serious.com/swf/format/
- http://www.anotherbigidea.com/javaswf/
- http://www.gnu.org/software/gnash/
Author: Victor Stinner
Creation date: 29 october 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, UInt8, UInt32, UInt16, CString, Enum,
Bytes, RawBytes, NullBits, String, SubFile)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, filesizeHandler
from lib.hachoir_core.tools import paddingSize, humanFrequency
from lib.hachoir_parser.image.common import RGB
from lib.hachoir_parser.image.jpeg import JpegChunk, JpegFile
from lib.hachoir_core.stream import StringInputStream, ConcatStream
from lib.hachoir_parser.common.deflate import Deflate, has_deflate
from lib.hachoir_parser.container.action_script import parseActionScript
import math
# Maximum file size (50 MB)
MAX_FILE_SIZE = 50 * 1024 * 1024
TWIPS = 20
class RECT(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
yield Bits(self, "nbits", 5)
nbits = self["nbits"].value
if not nbits:
raise ParserError("SWF parser: Invalid RECT field size (0)")
yield Bits(self, "xmin", nbits, "X minimum in twips")
yield Bits(self, "xmax", nbits, "X maximum in twips")
yield Bits(self, "ymin", nbits, "Y minimum in twips")
yield Bits(self, "ymax", nbits, "Y maximum in twips")
size = paddingSize(self.current_size, 8)
if size:
yield NullBits(self, "padding", size)
def getWidth(self):
return math.ceil(float(self["xmax"].value) / TWIPS)
def getHeight(self):
return math.ceil(float(self["ymax"].value) / TWIPS)
def createDescription(self):
return "Rectangle: %ux%u" % (self.getWidth(), self.getHeight())
class FixedFloat16(FieldSet):
def createFields(self):
yield UInt8(self, "float_part")
yield UInt8(self, "int_part")
def createValue(self):
return self["int_part"].value + float(self["float_part"].value) / 256
def parseBackgroundColor(parent, size):
yield RGB(parent, "color")
def bit2hertz(field):
return humanFrequency(5512.5 * (2 ** field.value))
SOUND_CODEC_MP3 = 2
SOUND_CODEC = {
0: "RAW",
1: "ADPCM",
SOUND_CODEC_MP3: "MP3",
3: "Uncompressed",
6: "Nellymoser",
}
class SoundEnvelope(FieldSet):
def createFields(self):
yield UInt8(self, "count")
for index in xrange(self["count"].value):
yield UInt32(self, "mark44[]")
yield UInt16(self, "level0[]")
yield UInt16(self, "level1[]")
def parseSoundBlock(parent, size):
# TODO: Be able to get codec... Need to know last sound "def_sound[]" field
# if not (...)sound_header:
# raise ParserError("Sound block without header")
if True: #sound_header == SOUND_CODEC_MP3:
yield UInt16(parent, "samples")
yield UInt16(parent, "left")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseStartSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "has_in_point")
yield Bit(parent, "has_out_point")
yield Bit(parent, "has_loops")
yield Bit(parent, "has_envelope")
yield Bit(parent, "no_multiple")
yield Bit(parent, "stop_playback")
yield NullBits(parent, "reserved", 2)
if parent["has_in_point"].value:
yield UInt32(parent, "in_point")
if parent["has_out_point"].value:
yield UInt32(parent, "out_point")
if parent["has_loops"].value:
yield UInt16(parent, "loop_count")
if parent["has_envelope"].value:
yield SoundEnvelope(parent, "envelope")
def parseDefineSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "is_stereo")
yield Bit(parent, "is_16bit")
yield textHandler(Bits(parent, "rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt32(parent, "sample_count")
if parent["codec"].value == SOUND_CODEC_MP3:
yield UInt16(parent, "len")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseSoundHeader(parent, size):
yield Bit(parent, "playback_is_stereo")
yield Bit(parent, "playback_is_16bit")
yield textHandler(Bits(parent, "playback_rate", 2), bit2hertz)
yield NullBits(parent, "reserved", 4)
yield Bit(parent, "sound_is_stereo")
yield Bit(parent, "sound_is_16bit")
yield textHandler(Bits(parent, "sound_rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt16(parent, "sample_count")
if parent["codec"].value == 2:
yield UInt16(parent, "latency_seek")
class JpegHeader(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
count = 1
while True:
chunk = JpegChunk(self, "jpeg_chunk[]")
yield chunk
if 1 < count and chunk["type"].value in (JpegChunk.TAG_SOI, JpegChunk.TAG_EOI):
break
count += 1
def parseJpeg(parent, size):
yield UInt16(parent, "char_id", "Character identifier")
size -= 2
code = parent["code"].value
if code != Tag.TAG_BITS:
if code == Tag.TAG_BITS_JPEG3:
yield UInt32(parent, "alpha_offset", "Character identifier")
size -= 4
addr = parent.absolute_address + parent.current_size + 16
if parent.stream.readBytes(addr, 2) in ("\xff\xdb", "\xff\xd8"):
header = JpegHeader(parent, "jpeg_header")
yield header
hdr_size = header.size // 8
size -= hdr_size
else:
hdr_size = 0
if code == Tag.TAG_BITS_JPEG3:
img_size = parent["alpha_offset"].value - hdr_size
else:
img_size = size
else:
img_size = size
yield SubFile(parent, "image", img_size, "JPEG picture", parser=JpegFile)
if code == Tag.TAG_BITS_JPEG3:
size = (parent.size - parent.current_size) // 8
yield RawBytes(parent, "alpha", size, "Image data")
def parseVideoFrame(parent, size):
yield UInt16(parent, "stream_id")
yield UInt16(parent, "frame_num")
if 4 < size:
yield RawBytes(parent, "video_data", size-4)
class Export(FieldSet):
def createFields(self):
yield UInt16(self, "object_id")
yield CString(self, "name")
def parseExport(parent, size):
yield UInt16(parent, "count")
for index in xrange(parent["count"].value):
yield Export(parent, "export[]")
class Tag(FieldSet):
TAG_BITS = 6
TAG_BITS_JPEG2 = 32
TAG_BITS_JPEG3 = 35
TAG_INFO = {
# SWF version 1.0
0: ("end[]", "End", None),
1: ("show_frame[]", "Show frame", None),
2: ("def_shape[]", "Define shape", None),
3: ("free_char[]", "Free character", None),
4: ("place_obj[]", "Place object", None),
5: ("remove_obj[]", "Remove object", None),
6: ("def_bits[]", "Define bits", parseJpeg),
7: ("def_but[]", "Define button", None),
8: ("jpg_table", "JPEG tables", None),
9: ("bkgd_color[]", "Set background color", parseBackgroundColor),
10: ("def_font[]", "Define font", None),
11: ("def_text[]", "Define text", None),
12: ("action[]", "Action script", parseActionScript),
13: ("def_font_info[]", "Define font info", None),
# SWF version 2.0
14: ("def_sound[]", "Define sound", parseDefineSound),
15: ("start_sound[]", "Start sound", parseStartSound),
16: ("stop_sound[]", "Stop sound", None),
17: ("def_but_sound[]", "Define button sound", None),
18: ("sound_hdr", "Sound stream header", parseSoundHeader),
19: ("sound_blk[]", "Sound stream block", parseSoundBlock),
20: ("def_bits_lossless[]", "Define bits lossless", None),
21: ("def_bits_jpeg2[]", "Define bits JPEG 2", parseJpeg),
22: ("def_shape2[]", "Define shape 2", None),
23: ("def_but_cxform[]", "Define button CXFORM", None),
24: ("protect", "File is protected", None),
# SWF version 3.0
25: ("path_are_ps[]", "Paths are Postscript", None),
26: ("place_obj2[]", "Place object 2", None),
28: ("remove_obj2[]", "Remove object 2", None),
29: ("sync_frame[]", "Synchronize frame", None),
31: ("free_all[]", "Free all", None),
32: ("def_shape3[]", "Define shape 3", None),
33: ("def_text2[]", "Define text 2", None),
34: ("def_but2[]", "Define button2", None),
35: ("def_bits_jpeg3[]", "Define bits JPEG 3", parseJpeg),
36: ("def_bits_lossless2[]", "Define bits lossless 2", None),
39: ("def_sprite[]", "Define sprite", None),
40: ("name_character[]", "Name character", None),
41: ("serial_number", "Serial number", None),
42: ("generator_text[]", "Generator text", None),
43: ("frame_label[]", "Frame label", None),
45: ("sound_hdr2[]", "Sound stream header2", parseSoundHeader),
46: ("def_morph_shape[]", "Define morph shape", None),
47: ("gen_frame[]", "Generate frame", None),
48: ("def_font2[]", "Define font 2", None),
49: ("tpl_command[]", "Template command", None),
# SWF version 4.0
37: ("def_text_field[]", "Define text field", None),
38: ("def_quicktime_movie[]", "Define QuickTime movie", None),
# SWF version 5.0
50: ("def_cmd_obj[]", "Define command object", None),
51: ("flash_generator", "Flash generator", None),
52: ("gen_ext_font[]", "Gen external font", None),
56: ("export[]", "Export", parseExport),
57: ("import[]", "Import", None),
58: ("ebnable_debug", "Enable debug", None),
# SWF version 6.0
59: ("do_init_action[]", "Do init action", None),
60: ("video_str[]", "Video stream", None),
61: ("video_frame[]", "Video frame", parseVideoFrame),
62: ("def_font_info2[]", "Define font info 2", None),
63: ("mx4[]", "MX4", None),
64: ("enable_debug2", "Enable debugger 2", None),
# SWF version 7.0
65: ("script_limits[]", "Script limits", None),
66: ("tab_index[]", "Set tab index", None),
# SWF version 8.0
69: ("file_attr[]", "File attributes", None),
70: ("place_obj3[]", "Place object 3", None),
71: ("import2[]", "Import a definition list from another movie", None),
73: ("def_font_align[]", "Define font alignment zones", None),
74: ("csm_txt_set[]", "CSM text settings", None),
75: ("def_font3[]", "Define font text 3", None),
77: ("metadata[]", "XML code describing the movie", None),
78: ("def_scale_grid[]", "Define scaling factors", None),
83: ("def_shape4[]", "Define shape 4", None),
84: ("def_morph2[]", "Define a morphing shape 2", None),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
size = self["length"].value
if self[0].name == "length_ext":
self._size = (6+size) * 8
else:
self._size = (2+size) * 8
code = self["code"].value
if code in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[code]
else:
self.parser = None
def createFields(self):
if self.stream.readBits(self.absolute_address, 6, self.endian) == 63:
yield Bits(self, "length_ext", 6)
yield Bits(self, "code", 10)
yield filesizeHandler(UInt32(self, "length"))
else:
yield filesizeHandler(Bits(self, "length", 6))
yield Bits(self, "code", 10)
size = self["length"].value
if 0 < size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
def createDescription(self):
return "Tag: %s (%s)" % (self["code"].display, self["length"].display)
class SwfFile(Parser):
VALID_VERSIONS = set(xrange(1, 9+1))
PARSER_TAGS = {
"id": "swf",
"category": "container",
"file_ext": ["swf"],
"mime": (u"application/x-shockwave-flash",),
"min_size": 64,
"description": u"Macromedia Flash data"
}
PARSER_TAGS["magic"] = []
for version in VALID_VERSIONS:
PARSER_TAGS["magic"].append(("FWS%c" % version, 0))
PARSER_TAGS["magic"].append(("CWS%c" % version, 0))
endian = LITTLE_ENDIAN
SWF_SCALE_FACTOR = 1.0 / 20
def validate(self):
if self.stream.readBytes(0, 3) not in ("FWS", "CWS"):
return "Wrong file signature"
if self["version"].value not in self.VALID_VERSIONS:
return "Unknown version"
if MAX_FILE_SIZE < self["filesize"].value:
return "File too big (%u)" % self["filesize"].value
if self["signature"].value == "FWS":
if self["rect/padding"].value != 0:
return "Unknown rectangle padding value"
return True
def createFields(self):
yield String(self, "signature", 3, "SWF format signature", charset="ASCII")
yield UInt8(self, "version")
yield filesizeHandler(UInt32(self, "filesize"))
if self["signature"].value != "CWS":
yield RECT(self, "rect")
yield FixedFloat16(self, "frame_rate")
yield UInt16(self, "frame_count")
while not self.eof:
yield Tag(self, "tag[]")
else:
size = (self.size - self.current_size) // 8
if has_deflate:
data = Deflate(Bytes(self, "compressed_data", size), False)
def createInputStream(cis, source=None, **args):
stream = cis(source=source)
header = StringInputStream("FWS" + self.stream.readBytes(3*8, 5))
args.setdefault("tags",[]).append(("class", SwfFile))
return ConcatStream((header, stream), source=stream.source, **args)
data.setSubIStream(createInputStream)
yield data
else:
yield Bytes(self, "compressed_data", size)
def createDescription(self):
desc = ["version %u" % self["version"].value]
if self["signature"].value == "CWS":
desc.append("compressed")
return u"Macromedia Flash data: %s" % (", ".join(desc))
def createContentSize(self):
if self["signature"].value == "FWS":
return self["filesize"].value * 8
else:
# TODO: Size of compressed Flash?
return None
| gpl-3.0 | -675,575,776,458,684,400 | 36.5025 | 91 | 0.580161 | false |
portableant/open-context-py | opencontext_py/apps/ldata/linkannotations/models.py | 1 | 3000 | import hashlib
from django.db import models
from opencontext_py.apps.ldata.linkentities.models import LinkEntityGeneration
# This class stores linked data annotations made on the data contributed to open context
class LinkAnnotation(models.Model):
# predicates indicating that a subject has an object that is a broader, more general class or property
# used for establising hierarchy relations among oc-predicates and oc-types
# these relations are needed for look ups in the faceted search
PREDS_SBJ_IS_SUB_OF_OBJ = ['skos:broader',
'skos:broaderTransitive',
'skos:broadMatch',
'rdfs:subClassOf',
'rdfs:subPropertyOf']
# predicates indicating that a subject has an object that is narrower (a subclass)
PREDS_SBJ_IS_SUPER_OF_OBJ = ['skos:narrower',
'skos:narrowerTransitive',
'skos:narrowMatch']
# predicates indicting that a subject is the same or very similar to an object
PREDS_SBJ_EQUIV_OBJ = ['owl:sameAs',
'skos:closeMatch',
'skos:exactMatch']
hash_id = models.CharField(max_length=50, primary_key=True)
sort = models.DecimalField(max_digits=8, decimal_places=3)
subject = models.CharField(max_length=200, db_index=True)
subject_type = models.CharField(max_length=50)
project_uuid = models.CharField(max_length=50)
source_id = models.CharField(max_length=200) # longer than the normal 50 for URI-identifed vocabs
predicate_uri = models.CharField(max_length=200, db_index=True)
object_uri = models.CharField(max_length=200, db_index=True)
creator_uuid = models.CharField(max_length=50)
updated = models.DateTimeField(auto_now=True)
def make_hash_id(self):
"""
creates a hash-id to insure unique combinations of project_uuids and contexts
"""
hash_obj = hashlib.sha1()
concat_string = str(self.subject) + " " + str(self.predicate_uri) + " " + str(self.object_uri)
hash_obj.update(concat_string.encode('utf-8'))
return hash_obj.hexdigest()
def clean_uris(self):
"""
cleans URIs to keep them consistent and empty of 'cruft'
"""
le_gen = LinkEntityGeneration()
self.subject = le_gen.make_clean_uri(self.subject)
self.predicate_uri = le_gen.make_clean_uri(self.predicate_uri)
self.object_uri = le_gen.make_clean_uri(self.object_uri)
def save(self, *args, **kwargs):
"""
creates the hash-id on saving to insure a unique assertion
"""
self.clean_uris()
self.hash_id = self.make_hash_id()
super(LinkAnnotation, self).save(*args, **kwargs)
class Meta:
db_table = 'link_annotations'
unique_together = ('subject', 'predicate_uri', 'object_uri')
ordering = ['subject', 'sort']
| gpl-3.0 | -5,660,116,949,967,335,000 | 43.776119 | 106 | 0.627333 | false |
futursolo/FurtherLand | foundation/__init__.py | 1 | 4159 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Futur Solo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tornado.web import *
import tornado.ioloop
import tornado.process
import tornado.netutil
import tornado.httpserver
import os
from . import place
from . import office
from . import memory as historial
navigation = [
(r"/", place.CentralSquare),
# (r"/classes/(.*).htm", ClassesPlace), This will be avaliable in future
# (r"/timeline", HistoryLibrary),
(r"/feed.xml", place.NewsAnnouncement),
(r"/api", place.TerminalService),
(r"/avatar/(.*)", place.IllustratePlace),
(r"/writings/(.*).htm", place.ConferenceHall),
(r"/pages/(.*).htm", place.MemorialWall),
# Office Redirects
(r"/management/checkin/", RedirectHandler, {"url": "/management/checkin"}),
(r"/management/checkout/", RedirectHandler,
{"url": "/management/checkout"}),
(r"/management", RedirectHandler, {"url": "/management/lobby"}),
(r"/management/", RedirectHandler, {"url": "/management/lobby"}),
(r"/management/lobby/", RedirectHandler, {"url": "/management/lobby"}),
(r"/management/working", RedirectHandler,
{"url": "/management/working/new"}),
(r"/management/working/", RedirectHandler,
{"url": "/management/working/new"}),
(r"/management/crda", RedirectHandler,
{"url": "/management/crda/writings"}),
(r"/management/crda/", RedirectHandler,
{"url": "/management/crda/writings"}),
(r"/management/configuration/", RedirectHandler,
{"url": "/management/configuration"}),
(r"/management/checkin", office.CheckinOffice),
(r"/management/checkout", office.CheckoutOffice),
(r"/management/api", office.ActionOffice),
(r"/management/(.*)/(.*)", office.MainOffice),
(r"/management/(.*)", office.MainOffice),
(r"(.*)", place.LostAndFoundPlace)
]
class FurtherLand:
def __init__(self, melody):
import os
self.identity = os.getpid()
self.melody = melody
# Build A Port
self.port = tornado.netutil.bind_sockets(
melody.listen_port, address=melody.listen_ip)
self.stage = Application(
handlers=navigation,
cookie_secret=melody.secret,
xsrf_cookies=True,
root_path=os.path.split(os.path.realpath(melody.base))[0],
static_path=os.path.join(
os.path.split(os.path.realpath(melody.base))[0], "spirit"),
template_path=os.path.join(
os.path.split(os.path.realpath(melody.base))[0], "factory"),
login_url="/management/checkin",
historial_records=historial.Records(melody.library),
autoescape=None,
debug=melody.dev,
static_url_prefix="/spirit/",
further_land=self,
safe_land=melody.safeland
)
try:
# Build Multi Land Enterance
tornado.process.fork_processes(
tornado.process.cpu_count() * 2, max_restarts=100)
except:
pass
def rise(self):
try:
print("FurtherLand has been risen on %s:%d." % (
self.melody.listen_ip, self.melody.listen_port))
import tornado.ioloop
self.land = tornado.httpserver.HTTPServer(self.stage)
self.land.add_sockets(self.port)
tornado.ioloop.IOLoop.current().start()
except:
self.set()
def set(self):
tornado.ioloop.IOLoop.current().stop()
print("FurtherLand set.")
def version(self):
return "FurtherLand Sakihokori Edition"
| apache-2.0 | 2,604,364,657,375,265,000 | 33.371901 | 79 | 0.622265 | false |
ryuunosukeyoshi/PartnerPoi-Bot | lib/youtube_dl/extractor/facebook.py | 17 | 17957 | # coding: utf-8
from __future__ import unicode_literals
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
get_element_by_id,
int_or_none,
js_to_json,
limit_length,
sanitized_Request,
try_get,
urlencode_postdata,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
(?:[\w-]+\.)?(?:facebook\.com|facebookcorewwwi\.onion)/
(?:[^#]*?\#!/)?
(?:
(?:
video/video\.php|
photo\.php|
video\.php|
video/embed|
story\.php
)\?(?:.*?)(?:v|video_id|story_fbid)=|
[^/]+/videos/(?:[^/]+/)?|
[^/]+/posts/|
groups/[^/]+/permalink/
)|
facebook:
)
(?P<id>[0-9]+)
'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
_VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
'upload_date': '20140908',
'timestamp': 1410199200,
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Asif Nawab Butt posted a video to his Timeline.',
'uploader': 'Asif Nawab Butt',
'upload_date': '20140506',
'timestamp': 1399398998,
},
'expected_warnings': [
'title'
]
}, {
'note': 'Video with DASH manifest',
'url': 'https://www.facebook.com/video.php?v=957955867617029',
'md5': 'b2c28d528273b323abe5c6ab59f0f030',
'info_dict': {
'id': '957955867617029',
'ext': 'mp4',
'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
'uploader': 'Demy de Zeeuw',
'upload_date': '20160110',
'timestamp': 1452431627,
},
}, {
'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
'info_dict': {
'id': '544765982287235',
'ext': 'mp4',
'title': '"What are you doing running in the snow?"',
'uploader': 'FailArmy',
},
'skip': 'Video gone',
}, {
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
'info_dict': {
'id': '1035862816472149',
'ext': 'mp4',
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
'uploader': 'S. Saint',
},
'skip': 'Video gone',
}, {
'note': 'swf params escaped',
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
'md5': '97ba073838964d12c70566e0085c2b91',
'info_dict': {
'id': '10153664894881749',
'ext': 'mp4',
'title': 'Facebook video #10153664894881749',
},
}, {
# have 1080P, but only up to 720p in swf params
'url': 'https://www.facebook.com/cnn/videos/10155529876156509/',
'md5': '0d9813160b146b3bc8744e006027fcc6',
'info_dict': {
'id': '10155529876156509',
'ext': 'mp4',
'title': 'Holocaust survivor becomes US citizen',
'timestamp': 1477818095,
'upload_date': '20161030',
'uploader': 'CNN',
},
}, {
# bigPipe.onPageletArrive ... onPageletArrive pagelet_group_mall
'url': 'https://www.facebook.com/yaroslav.korpan/videos/1417995061575415/',
'info_dict': {
'id': '1417995061575415',
'ext': 'mp4',
'title': 'md5:a7b86ca673f51800cd54687b7f4012fe',
'timestamp': 1486648217,
'upload_date': '20170209',
'uploader': 'Yaroslav Korpan',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/LaGuiaDelVaron/posts/1072691702860471',
'info_dict': {
'id': '1072691702860471',
'ext': 'mp4',
'title': 'md5:ae2d22a93fbb12dad20dc393a869739d',
'timestamp': 1477305000,
'upload_date': '20161024',
'uploader': 'La Guía Del Varón',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/groups/1024490957622648/permalink/1396382447100162/',
'info_dict': {
'id': '1396382447100162',
'ext': 'mp4',
'title': 'md5:e2d2700afdf84e121f5d0f999bad13a3',
'timestamp': 1486035494,
'upload_date': '20170202',
'uploader': 'Elisabeth Ahtn',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}, {
'url': 'facebook:544765982287235',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/',
'only_matching': True,
}, {
'url': 'https://zh-hk.facebook.com/peoplespower/videos/1135894589806027/',
'only_matching': True,
}, {
'url': 'https://www.facebookcorewwwi.onion/video.php?v=274175099429670',
'only_matching': True,
}, {
# no title
'url': 'https://www.facebook.com/onlycleverentertainment/videos/1947995502095005/',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
urls = []
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1',
webpage):
urls.append(mobj.group('url'))
# Facebook API embed
# see https://developers.facebook.com/docs/plugins/embedded-video-player
for mobj in re.finditer(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+
data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', webpage):
urls.append(mobj.group('url'))
return urls
def _login(self):
(useremail, password) = self._get_login_info()
if useremail is None:
return
login_page_req = sanitized_Request(self._LOGIN_URL)
self._set_cookie('facebook.com', 'locale', 'en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
login_results, 'login error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return
fb_dtsg = self._search_regex(
r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
h = self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
if not fb_dtsg or not h:
return
check_form = {
'fb_dtsg': fb_dtsg,
'h': h,
'name_action_selected': 'dont_save',
}
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
self._login()
def _extract_from_url(self, url, video_id, fatal_if_no_video=True):
req = sanitized_Request(url)
req.add_header('User-Agent', self._CHROME_USER_AGENT)
webpage = self._download_webpage(req, video_id)
video_data = None
def extract_video_data(instances):
for item in instances:
if item[1][0] == 'VideoConfig':
video_item = item[2][0]
if video_item.get('video_id'):
return video_item['videoData']
server_js_data = self._parse_json(self._search_regex(
r'handleServerJS\(({.+})(?:\);|,")', webpage,
'server js data', default='{}'), video_id, fatal=False)
if server_js_data:
video_data = extract_video_data(server_js_data.get('instances', []))
if not video_data:
server_js_data = self._parse_json(
self._search_regex(
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+(?:stream_pagelet|pagelet_group_mall|permalink_video_pagelet)',
webpage, 'js data', default='{}'),
video_id, transform_source=js_to_json, fatal=False)
if server_js_data:
video_data = extract_video_data(try_get(
server_js_data, lambda x: x['jsmods']['instances'],
list) or [])
if not video_data:
if not fatal_if_no_video:
return webpage, False
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
elif '>You must log in to continue' in webpage:
self.raise_login_required()
else:
raise ExtractorError('Cannot parse data')
formats = []
for f in video_data:
format_id = f['stream_type']
if f and isinstance(f, dict):
f = [f]
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
preference = -10 if format_id == 'progressive' else 0
if quality == 'hd':
preference += 5
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
'preference': preference,
})
dash_manifest = f[0].get('dash_manifest')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
if not formats:
raise ExtractorError('Cannot find video formats')
self._sort_formats(formats)
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage,
'title', default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
if not video_title:
video_title = self._html_search_meta(
'description', webpage, 'title', default=None)
if video_title:
video_title = limit_length(video_title, 80)
else:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id(
'fbPhotoPageAuthorName', webpage)) or self._search_regex(
r'ownerName\s*:\s*"([^"]+)"', webpage, 'uploader', fatal=False)
timestamp = int_or_none(self._search_regex(
r'<abbr[^>]+data-utime=["\'](\d+)', webpage,
'timestamp', default=None))
info_dict = {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
'timestamp': timestamp,
}
return webpage, info_dict
def _real_extract(self, url):
video_id = self._match_id(url)
real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
webpage, info_dict = self._extract_from_url(real_url, video_id, fatal_if_no_video=False)
if info_dict:
return info_dict
if '/posts/' in url:
entries = [
self.url_result('facebook:%s' % vid, FacebookIE.ie_key())
for vid in self._parse_json(
self._search_regex(
r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
webpage, 'video ids', group='ids'),
video_id)]
return self.playlist_result(entries, video_id)
else:
_, info_dict = self._extract_from_url(
self._VIDEO_PAGE_TEMPLATE % video_id,
video_id, fatal_if_no_video=True)
return info_dict
class FacebookPluginsVideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/plugins/video\.php\?.*?\bhref=(?P<id>https.+)'
_TESTS = [{
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fgov.sg%2Fvideos%2F10154383743583686%2F&show_text=0&width=560',
'md5': '5954e92cdfe51fe5782ae9bda7058a07',
'info_dict': {
'id': '10154383743583686',
'ext': 'mp4',
'title': 'What to do during the haze?',
'uploader': 'Gov.sg',
'upload_date': '20160826',
'timestamp': 1472184808,
},
'add_ie': [FacebookIE.ie_key()],
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fvideo.php%3Fv%3D10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https://www.facebook.com/gov.sg/videos/10154383743583686/&show_text=0&width=560',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
compat_urllib_parse_unquote(self._match_id(url)),
FacebookIE.ie_key())
| gpl-3.0 | 8,484,587,579,731,366,000 | 39.622172 | 163 | 0.511445 | false |
sauloal/cnidaria | scripts/venv/lib/python2.7/encodings/cp864.py | 593 | 33919 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP864.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp864',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0025: 0x066a, # ARABIC PERCENT SIGN
0x0080: 0x00b0, # DEGREE SIGN
0x0081: 0x00b7, # MIDDLE DOT
0x0082: 0x2219, # BULLET OPERATOR
0x0083: 0x221a, # SQUARE ROOT
0x0084: 0x2592, # MEDIUM SHADE
0x0085: 0x2500, # FORMS LIGHT HORIZONTAL
0x0086: 0x2502, # FORMS LIGHT VERTICAL
0x0087: 0x253c, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x0088: 0x2524, # FORMS LIGHT VERTICAL AND LEFT
0x0089: 0x252c, # FORMS LIGHT DOWN AND HORIZONTAL
0x008a: 0x251c, # FORMS LIGHT VERTICAL AND RIGHT
0x008b: 0x2534, # FORMS LIGHT UP AND HORIZONTAL
0x008c: 0x2510, # FORMS LIGHT DOWN AND LEFT
0x008d: 0x250c, # FORMS LIGHT DOWN AND RIGHT
0x008e: 0x2514, # FORMS LIGHT UP AND RIGHT
0x008f: 0x2518, # FORMS LIGHT UP AND LEFT
0x0090: 0x03b2, # GREEK SMALL BETA
0x0091: 0x221e, # INFINITY
0x0092: 0x03c6, # GREEK SMALL PHI
0x0093: 0x00b1, # PLUS-OR-MINUS SIGN
0x0094: 0x00bd, # FRACTION 1/2
0x0095: 0x00bc, # FRACTION 1/4
0x0096: 0x2248, # ALMOST EQUAL TO
0x0097: 0x00ab, # LEFT POINTING GUILLEMET
0x0098: 0x00bb, # RIGHT POINTING GUILLEMET
0x0099: 0xfef7, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0x009a: 0xfef8, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0x009b: None, # UNDEFINED
0x009c: None, # UNDEFINED
0x009d: 0xfefb, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0x009e: 0xfefc, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
0x009f: None, # UNDEFINED
0x00a1: 0x00ad, # SOFT HYPHEN
0x00a2: 0xfe82, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0x00a5: 0xfe84, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0x00a6: None, # UNDEFINED
0x00a7: None, # UNDEFINED
0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM
0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM
0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM
0x00ae: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM
0x00af: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE
0x00ba: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM
0x00bd: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM
0x00be: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x00a2, # CENT SIGN
0x00c1: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM
0x00c2: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0x00c3: 0xfe83, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0x00c4: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0x00c5: 0xfeca, # ARABIC LETTER AIN FINAL FORM
0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM
0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM
0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM
0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM
0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM
0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM
0x00ce: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM
0x00cf: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM
0x00d0: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM
0x00d1: 0xfead, # ARABIC LETTER REH ISOLATED FORM
0x00d2: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM
0x00d3: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM
0x00d4: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM
0x00d5: 0xfebb, # ARABIC LETTER SAD INITIAL FORM
0x00d6: 0xfebf, # ARABIC LETTER DAD INITIAL FORM
0x00d7: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM
0x00d8: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM
0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM
0x00da: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM
0x00db: 0x00a6, # BROKEN VERTICAL BAR
0x00dc: 0x00ac, # NOT SIGN
0x00dd: 0x00f7, # DIVISION SIGN
0x00de: 0x00d7, # MULTIPLICATION SIGN
0x00df: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0xfed3, # ARABIC LETTER FEH INITIAL FORM
0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM
0x00e3: 0xfedb, # ARABIC LETTER KAF INITIAL FORM
0x00e4: 0xfedf, # ARABIC LETTER LAM INITIAL FORM
0x00e5: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM
0x00e6: 0xfee7, # ARABIC LETTER NOON INITIAL FORM
0x00e7: 0xfeeb, # ARABIC LETTER HEH INITIAL FORM
0x00e8: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM
0x00e9: 0xfeef, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0x00ea: 0xfef3, # ARABIC LETTER YEH INITIAL FORM
0x00eb: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM
0x00ec: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM
0x00ed: 0xfece, # ARABIC LETTER GHAIN FINAL FORM
0x00ee: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM
0x00ef: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM
0x00f0: 0xfe7d, # ARABIC SHADDA MEDIAL FORM
0x00f1: 0x0651, # ARABIC SHADDAH
0x00f2: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM
0x00f3: 0xfee9, # ARABIC LETTER HEH ISOLATED FORM
0x00f4: 0xfeec, # ARABIC LETTER HEH MEDIAL FORM
0x00f5: 0xfef0, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0x00f6: 0xfef2, # ARABIC LETTER YEH FINAL FORM
0x00f7: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM
0x00f8: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM
0x00f9: 0xfef5, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0x00fa: 0xfef6, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0x00fb: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM
0x00fc: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM
0x00fd: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: None, # UNDEFINED
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'\u066a' # 0x0025 -> ARABIC PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xb0' # 0x0080 -> DEGREE SIGN
u'\xb7' # 0x0081 -> MIDDLE DOT
u'\u2219' # 0x0082 -> BULLET OPERATOR
u'\u221a' # 0x0083 -> SQUARE ROOT
u'\u2592' # 0x0084 -> MEDIUM SHADE
u'\u2500' # 0x0085 -> FORMS LIGHT HORIZONTAL
u'\u2502' # 0x0086 -> FORMS LIGHT VERTICAL
u'\u253c' # 0x0087 -> FORMS LIGHT VERTICAL AND HORIZONTAL
u'\u2524' # 0x0088 -> FORMS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x0089 -> FORMS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x008a -> FORMS LIGHT VERTICAL AND RIGHT
u'\u2534' # 0x008b -> FORMS LIGHT UP AND HORIZONTAL
u'\u2510' # 0x008c -> FORMS LIGHT DOWN AND LEFT
u'\u250c' # 0x008d -> FORMS LIGHT DOWN AND RIGHT
u'\u2514' # 0x008e -> FORMS LIGHT UP AND RIGHT
u'\u2518' # 0x008f -> FORMS LIGHT UP AND LEFT
u'\u03b2' # 0x0090 -> GREEK SMALL BETA
u'\u221e' # 0x0091 -> INFINITY
u'\u03c6' # 0x0092 -> GREEK SMALL PHI
u'\xb1' # 0x0093 -> PLUS-OR-MINUS SIGN
u'\xbd' # 0x0094 -> FRACTION 1/2
u'\xbc' # 0x0095 -> FRACTION 1/4
u'\u2248' # 0x0096 -> ALMOST EQUAL TO
u'\xab' # 0x0097 -> LEFT POINTING GUILLEMET
u'\xbb' # 0x0098 -> RIGHT POINTING GUILLEMET
u'\ufef7' # 0x0099 -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
u'\ufef8' # 0x009a -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
u'\ufffe' # 0x009b -> UNDEFINED
u'\ufffe' # 0x009c -> UNDEFINED
u'\ufefb' # 0x009d -> ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
u'\ufefc' # 0x009e -> ARABIC LIGATURE LAM WITH ALEF FINAL FORM
u'\ufffe' # 0x009f -> UNDEFINED
u'\xa0' # 0x00a0 -> NON-BREAKING SPACE
u'\xad' # 0x00a1 -> SOFT HYPHEN
u'\ufe82' # 0x00a2 -> ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
u'\xa3' # 0x00a3 -> POUND SIGN
u'\xa4' # 0x00a4 -> CURRENCY SIGN
u'\ufe84' # 0x00a5 -> ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
u'\ufffe' # 0x00a6 -> UNDEFINED
u'\ufffe' # 0x00a7 -> UNDEFINED
u'\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM
u'\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM
u'\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM
u'\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM
u'\u060c' # 0x00ac -> ARABIC COMMA
u'\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM
u'\ufea1' # 0x00ae -> ARABIC LETTER HAH ISOLATED FORM
u'\ufea5' # 0x00af -> ARABIC LETTER KHAH ISOLATED FORM
u'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO
u'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE
u'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO
u'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE
u'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR
u'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE
u'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX
u'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN
u'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT
u'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE
u'\ufed1' # 0x00ba -> ARABIC LETTER FEH ISOLATED FORM
u'\u061b' # 0x00bb -> ARABIC SEMICOLON
u'\ufeb1' # 0x00bc -> ARABIC LETTER SEEN ISOLATED FORM
u'\ufeb5' # 0x00bd -> ARABIC LETTER SHEEN ISOLATED FORM
u'\ufeb9' # 0x00be -> ARABIC LETTER SAD ISOLATED FORM
u'\u061f' # 0x00bf -> ARABIC QUESTION MARK
u'\xa2' # 0x00c0 -> CENT SIGN
u'\ufe80' # 0x00c1 -> ARABIC LETTER HAMZA ISOLATED FORM
u'\ufe81' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufe83' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
u'\ufe85' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
u'\ufeca' # 0x00c5 -> ARABIC LETTER AIN FINAL FORM
u'\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
u'\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM
u'\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM
u'\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
u'\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM
u'\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM
u'\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM
u'\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM
u'\ufea7' # 0x00ce -> ARABIC LETTER KHAH INITIAL FORM
u'\ufea9' # 0x00cf -> ARABIC LETTER DAL ISOLATED FORM
u'\ufeab' # 0x00d0 -> ARABIC LETTER THAL ISOLATED FORM
u'\ufead' # 0x00d1 -> ARABIC LETTER REH ISOLATED FORM
u'\ufeaf' # 0x00d2 -> ARABIC LETTER ZAIN ISOLATED FORM
u'\ufeb3' # 0x00d3 -> ARABIC LETTER SEEN INITIAL FORM
u'\ufeb7' # 0x00d4 -> ARABIC LETTER SHEEN INITIAL FORM
u'\ufebb' # 0x00d5 -> ARABIC LETTER SAD INITIAL FORM
u'\ufebf' # 0x00d6 -> ARABIC LETTER DAD INITIAL FORM
u'\ufec1' # 0x00d7 -> ARABIC LETTER TAH ISOLATED FORM
u'\ufec5' # 0x00d8 -> ARABIC LETTER ZAH ISOLATED FORM
u'\ufecb' # 0x00d9 -> ARABIC LETTER AIN INITIAL FORM
u'\ufecf' # 0x00da -> ARABIC LETTER GHAIN INITIAL FORM
u'\xa6' # 0x00db -> BROKEN VERTICAL BAR
u'\xac' # 0x00dc -> NOT SIGN
u'\xf7' # 0x00dd -> DIVISION SIGN
u'\xd7' # 0x00de -> MULTIPLICATION SIGN
u'\ufec9' # 0x00df -> ARABIC LETTER AIN ISOLATED FORM
u'\u0640' # 0x00e0 -> ARABIC TATWEEL
u'\ufed3' # 0x00e1 -> ARABIC LETTER FEH INITIAL FORM
u'\ufed7' # 0x00e2 -> ARABIC LETTER QAF INITIAL FORM
u'\ufedb' # 0x00e3 -> ARABIC LETTER KAF INITIAL FORM
u'\ufedf' # 0x00e4 -> ARABIC LETTER LAM INITIAL FORM
u'\ufee3' # 0x00e5 -> ARABIC LETTER MEEM INITIAL FORM
u'\ufee7' # 0x00e6 -> ARABIC LETTER NOON INITIAL FORM
u'\ufeeb' # 0x00e7 -> ARABIC LETTER HEH INITIAL FORM
u'\ufeed' # 0x00e8 -> ARABIC LETTER WAW ISOLATED FORM
u'\ufeef' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA ISOLATED FORM
u'\ufef3' # 0x00ea -> ARABIC LETTER YEH INITIAL FORM
u'\ufebd' # 0x00eb -> ARABIC LETTER DAD ISOLATED FORM
u'\ufecc' # 0x00ec -> ARABIC LETTER AIN MEDIAL FORM
u'\ufece' # 0x00ed -> ARABIC LETTER GHAIN FINAL FORM
u'\ufecd' # 0x00ee -> ARABIC LETTER GHAIN ISOLATED FORM
u'\ufee1' # 0x00ef -> ARABIC LETTER MEEM ISOLATED FORM
u'\ufe7d' # 0x00f0 -> ARABIC SHADDA MEDIAL FORM
u'\u0651' # 0x00f1 -> ARABIC SHADDAH
u'\ufee5' # 0x00f2 -> ARABIC LETTER NOON ISOLATED FORM
u'\ufee9' # 0x00f3 -> ARABIC LETTER HEH ISOLATED FORM
u'\ufeec' # 0x00f4 -> ARABIC LETTER HEH MEDIAL FORM
u'\ufef0' # 0x00f5 -> ARABIC LETTER ALEF MAKSURA FINAL FORM
u'\ufef2' # 0x00f6 -> ARABIC LETTER YEH FINAL FORM
u'\ufed0' # 0x00f7 -> ARABIC LETTER GHAIN MEDIAL FORM
u'\ufed5' # 0x00f8 -> ARABIC LETTER QAF ISOLATED FORM
u'\ufef5' # 0x00f9 -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
u'\ufef6' # 0x00fa -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
u'\ufedd' # 0x00fb -> ARABIC LETTER LAM ISOLATED FORM
u'\ufed9' # 0x00fc -> ARABIC LETTER KAF ISOLATED FORM
u'\ufef1' # 0x00fd -> ARABIC LETTER YEH ISOLATED FORM
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\ufffe' # 0x00ff -> UNDEFINED
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00a0, # NON-BREAKING SPACE
0x00a2: 0x00c0, # CENT SIGN
0x00a3: 0x00a3, # POUND SIGN
0x00a4: 0x00a4, # CURRENCY SIGN
0x00a6: 0x00db, # BROKEN VERTICAL BAR
0x00ab: 0x0097, # LEFT POINTING GUILLEMET
0x00ac: 0x00dc, # NOT SIGN
0x00ad: 0x00a1, # SOFT HYPHEN
0x00b0: 0x0080, # DEGREE SIGN
0x00b1: 0x0093, # PLUS-OR-MINUS SIGN
0x00b7: 0x0081, # MIDDLE DOT
0x00bb: 0x0098, # RIGHT POINTING GUILLEMET
0x00bc: 0x0095, # FRACTION 1/4
0x00bd: 0x0094, # FRACTION 1/2
0x00d7: 0x00de, # MULTIPLICATION SIGN
0x00f7: 0x00dd, # DIVISION SIGN
0x03b2: 0x0090, # GREEK SMALL BETA
0x03c6: 0x0092, # GREEK SMALL PHI
0x060c: 0x00ac, # ARABIC COMMA
0x061b: 0x00bb, # ARABIC SEMICOLON
0x061f: 0x00bf, # ARABIC QUESTION MARK
0x0640: 0x00e0, # ARABIC TATWEEL
0x0651: 0x00f1, # ARABIC SHADDAH
0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO
0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE
0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO
0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE
0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR
0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE
0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX
0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN
0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT
0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE
0x066a: 0x0025, # ARABIC PERCENT SIGN
0x2219: 0x0082, # BULLET OPERATOR
0x221a: 0x0083, # SQUARE ROOT
0x221e: 0x0091, # INFINITY
0x2248: 0x0096, # ALMOST EQUAL TO
0x2500: 0x0085, # FORMS LIGHT HORIZONTAL
0x2502: 0x0086, # FORMS LIGHT VERTICAL
0x250c: 0x008d, # FORMS LIGHT DOWN AND RIGHT
0x2510: 0x008c, # FORMS LIGHT DOWN AND LEFT
0x2514: 0x008e, # FORMS LIGHT UP AND RIGHT
0x2518: 0x008f, # FORMS LIGHT UP AND LEFT
0x251c: 0x008a, # FORMS LIGHT VERTICAL AND RIGHT
0x2524: 0x0088, # FORMS LIGHT VERTICAL AND LEFT
0x252c: 0x0089, # FORMS LIGHT DOWN AND HORIZONTAL
0x2534: 0x008b, # FORMS LIGHT UP AND HORIZONTAL
0x253c: 0x0087, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x2592: 0x0084, # MEDIUM SHADE
0x25a0: 0x00fe, # BLACK SQUARE
0xfe7d: 0x00f0, # ARABIC SHADDA MEDIAL FORM
0xfe80: 0x00c1, # ARABIC LETTER HAMZA ISOLATED FORM
0xfe81: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0xfe82: 0x00a2, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0xfe83: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0xfe84: 0x00a5, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0xfe85: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0xfe8b: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0xfe8d: 0x00c7, # ARABIC LETTER ALEF ISOLATED FORM
0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM
0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM
0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM
0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM
0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM
0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM
0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM
0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM
0xfe9f: 0x00cc, # ARABIC LETTER JEEM INITIAL FORM
0xfea1: 0x00ae, # ARABIC LETTER HAH ISOLATED FORM
0xfea3: 0x00cd, # ARABIC LETTER HAH INITIAL FORM
0xfea5: 0x00af, # ARABIC LETTER KHAH ISOLATED FORM
0xfea7: 0x00ce, # ARABIC LETTER KHAH INITIAL FORM
0xfea9: 0x00cf, # ARABIC LETTER DAL ISOLATED FORM
0xfeab: 0x00d0, # ARABIC LETTER THAL ISOLATED FORM
0xfead: 0x00d1, # ARABIC LETTER REH ISOLATED FORM
0xfeaf: 0x00d2, # ARABIC LETTER ZAIN ISOLATED FORM
0xfeb1: 0x00bc, # ARABIC LETTER SEEN ISOLATED FORM
0xfeb3: 0x00d3, # ARABIC LETTER SEEN INITIAL FORM
0xfeb5: 0x00bd, # ARABIC LETTER SHEEN ISOLATED FORM
0xfeb7: 0x00d4, # ARABIC LETTER SHEEN INITIAL FORM
0xfeb9: 0x00be, # ARABIC LETTER SAD ISOLATED FORM
0xfebb: 0x00d5, # ARABIC LETTER SAD INITIAL FORM
0xfebd: 0x00eb, # ARABIC LETTER DAD ISOLATED FORM
0xfebf: 0x00d6, # ARABIC LETTER DAD INITIAL FORM
0xfec1: 0x00d7, # ARABIC LETTER TAH ISOLATED FORM
0xfec5: 0x00d8, # ARABIC LETTER ZAH ISOLATED FORM
0xfec9: 0x00df, # ARABIC LETTER AIN ISOLATED FORM
0xfeca: 0x00c5, # ARABIC LETTER AIN FINAL FORM
0xfecb: 0x00d9, # ARABIC LETTER AIN INITIAL FORM
0xfecc: 0x00ec, # ARABIC LETTER AIN MEDIAL FORM
0xfecd: 0x00ee, # ARABIC LETTER GHAIN ISOLATED FORM
0xfece: 0x00ed, # ARABIC LETTER GHAIN FINAL FORM
0xfecf: 0x00da, # ARABIC LETTER GHAIN INITIAL FORM
0xfed0: 0x00f7, # ARABIC LETTER GHAIN MEDIAL FORM
0xfed1: 0x00ba, # ARABIC LETTER FEH ISOLATED FORM
0xfed3: 0x00e1, # ARABIC LETTER FEH INITIAL FORM
0xfed5: 0x00f8, # ARABIC LETTER QAF ISOLATED FORM
0xfed7: 0x00e2, # ARABIC LETTER QAF INITIAL FORM
0xfed9: 0x00fc, # ARABIC LETTER KAF ISOLATED FORM
0xfedb: 0x00e3, # ARABIC LETTER KAF INITIAL FORM
0xfedd: 0x00fb, # ARABIC LETTER LAM ISOLATED FORM
0xfedf: 0x00e4, # ARABIC LETTER LAM INITIAL FORM
0xfee1: 0x00ef, # ARABIC LETTER MEEM ISOLATED FORM
0xfee3: 0x00e5, # ARABIC LETTER MEEM INITIAL FORM
0xfee5: 0x00f2, # ARABIC LETTER NOON ISOLATED FORM
0xfee7: 0x00e6, # ARABIC LETTER NOON INITIAL FORM
0xfee9: 0x00f3, # ARABIC LETTER HEH ISOLATED FORM
0xfeeb: 0x00e7, # ARABIC LETTER HEH INITIAL FORM
0xfeec: 0x00f4, # ARABIC LETTER HEH MEDIAL FORM
0xfeed: 0x00e8, # ARABIC LETTER WAW ISOLATED FORM
0xfeef: 0x00e9, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0xfef0: 0x00f5, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0xfef1: 0x00fd, # ARABIC LETTER YEH ISOLATED FORM
0xfef2: 0x00f6, # ARABIC LETTER YEH FINAL FORM
0xfef3: 0x00ea, # ARABIC LETTER YEH INITIAL FORM
0xfef5: 0x00f9, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0xfef6: 0x00fa, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0xfef7: 0x0099, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0xfef8: 0x009a, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0xfefb: 0x009d, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0xfefc: 0x009e, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
}
| mit | 252,991,295,260,298,530 | 48.157971 | 97 | 0.607064 | false |
ESS-LLP/erpnext-medical | erpnext/hotels/doctype/hotel_room_reservation/hotel_room_reservation.py | 7 | 3513 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.model.document import Document
from frappe import _
from frappe.utils import date_diff, add_days, flt
class HotelRoomUnavailableError(frappe.ValidationError): pass
class HotelRoomPricingNotSetError(frappe.ValidationError): pass
class HotelRoomReservation(Document):
def validate(self):
self.total_rooms = {}
self.set_rates()
self.validate_availability()
def validate_availability(self):
for i in range(date_diff(self.to_date, self.from_date)):
day = add_days(self.from_date, i)
self.rooms_booked = {}
for d in self.items:
if not d.item in self.rooms_booked:
self.rooms_booked[d.item] = 0
room_type = frappe.db.get_value("Hotel Room Package",
d.item, 'hotel_room_type')
rooms_booked = get_rooms_booked(room_type, day, exclude_reservation=self.name) \
+ d.qty + self.rooms_booked.get(d.item)
total_rooms = self.get_total_rooms(d.item)
if total_rooms < rooms_booked:
frappe.throw(_("Hotel Rooms of type {0} are unavailable on {1}".format(d.item,
frappe.format(day, dict(fieldtype="Date")))), exc=HotelRoomUnavailableError)
self.rooms_booked[d.item] += rooms_booked
def get_total_rooms(self, item):
if not item in self.total_rooms:
self.total_rooms[item] = frappe.db.sql("""
select count(*)
from
`tabHotel Room Package` package
inner join
`tabHotel Room` room on package.hotel_room_type = room.hotel_room_type
where
package.item = %s""", item)[0][0] or 0
return self.total_rooms[item]
def set_rates(self):
self.net_total = 0
for d in self.items:
net_rate = 0.0
for i in range(date_diff(self.to_date, self.from_date)):
day = add_days(self.from_date, i)
if not d.item:
continue
day_rate = frappe.db.sql("""
select
item.rate
from
`tabHotel Room Pricing Item` item,
`tabHotel Room Pricing` pricing
where
item.parent = pricing.name
and item.item = %s
and %s between pricing.from_date
and pricing.to_date""", (d.item, day))
if day_rate:
net_rate += day_rate[0][0]
else:
frappe.throw(
_("Please set Hotel Room Rate on {}".format(
frappe.format(day, dict(fieldtype="Date")))), exc=HotelRoomPricingNotSetError)
d.rate = net_rate
d.amount = net_rate * flt(d.qty)
self.net_total += d.amount
@frappe.whitelist()
def get_room_rate(hotel_room_reservation):
"""Calculate rate for each day as it may belong to different Hotel Room Pricing Item"""
doc = frappe.get_doc(json.loads(hotel_room_reservation))
doc.set_rates()
return doc.as_dict()
def get_rooms_booked(room_type, day, exclude_reservation=None):
exclude_condition = ''
if exclude_reservation:
exclude_condition = 'and reservation.name != "{0}"'.format(frappe.db.escape(exclude_reservation))
return frappe.db.sql("""
select sum(item.qty)
from
`tabHotel Room Package` room_package,
`tabHotel Room Reservation Item` item,
`tabHotel Room Reservation` reservation
where
item.parent = reservation.name
and room_package.item = item.item
and room_package.hotel_room_type = %s
and reservation.docstatus = 1
{exclude_condition}
and %s between reservation.from_date
and reservation.to_date""".format(exclude_condition=exclude_condition),
(room_type, day))[0][0] or 0
| gpl-3.0 | 7,554,156,334,298,976,000 | 31.229358 | 99 | 0.684031 | false |
panaviatornado/hfhom | corrterm/dehn_homology/grid.py | 2 | 7708 | # Caltech SURF 2013
# FILE: grid.py
# 07.17.13
'''
do stuff with grid diagrams to prepare for CFK^\infty
'''
# winding matrix done by rows (consistent with gridlink)
# TODO bottom row first???
# TODO only knots no links...
from fractions import Fraction
import itertools # list(itertools.permutations([1,2,3]))
# TODO at this rate, we won't get past 7 crossings...
import math
def valid_xo(xlist, olist):
'''
Returns True if xlist, olist are valid (1 per row/col), False otherwise.
'''
if len(xlist) != len(olist):
return False
for col_num in range(len(xlist)):
if xlist[col_num] == olist[col_num]:
return False # same coordinates...
if xlist.count(xlist[col_num]) != 1 or \
olist.count(olist[col_num]) != 1:
return False
return True
def alexander_helper(x, o, winding, n):
'''
Returns -1/8 Sum(a(c_{i,j})) - (n-1)/2, where a(-) is minus the winding
number, and c_{i,j} is a corner of any square with an X or O.
x -- row indices of x's (starts at 0, at bottom)
y -- row indices of o's (starts at 0, at left)
winding -- matrix of winding numbers TODO bottom to top ???
n -- size of grid
'''
a_rhs = 0
for index, row in enumerate(x + o):
# sum over winding numbers of corners
a_rhs += winding[row][index%n] + winding[(row+1)%n][index%n] + \
winding[row][(index+1)%n] + winding[(row+1)%n][(index+1)%n]
a_rhs = Fraction(a_rhs, 8) - Fraction(n-1, 2)
return a_rhs
def alexander(generator, winding, rhs):
'''
Returns Alexander grading of tuple 'generator'.
generator -- tuple of row numbers
winding -- array of winding numbers TODO: bottom to top ???
rhs -- -1/8 Sum(a(c_{i,j})) - (n-1)/2, the right hand (constant) side
of the formula for Alexander grading
'''
asum = 0
for index, row in enumerate(generator):
asum += winding[row][index]
asum *= -1
asum += rhs
assert asum.denominator == 1
asum = asum.numerator
return asum
def differby2(gen1, gen2):
'''
Returns tuple (True, [i1, i2]) if generators 'gen1' and 'gen2' differ
by 2 points, where i1 and i2 are the indices (col) where they differ.
Returns (False, []) otherwise.
'''
differ = 0
where = []
assert len(gen1) == len(gen2)
for i in range(len(gen1)):
if gen1[i] != gen2[i]:
differ += 1
where.append(i)
if differ > 2:
return (False, [])
if differ != 2:
return (False, [])
return (True, where)
def interior(tup1, tup2, gen1, gen2, xlist, olist):
'''
Returns (True, [#X, #O]) if rectangle has no generators in it
Returns (False, []) if rectangle has generators in it
#X = number of X's in the rectangle
#O = number of O's in the rectangle
tup1 is the lower left corner of the rectangle
tup2 is the upper right corner of the rectangle
FIXME comment, test...might be off by 1
'''
n = len(gen1)
assert gen1[tup1[0]] == tup1[1]
assert gen1[tup2[0]] == tup2[1]
# iterate - get appropriate columns, then check if rows are in rectangle
if (tup1[0] < tup2[0] and tup1[1] >= tup2[1]) or \
(tup1[0] > tup2[0] and tup1[1] <= tup2[1]) or (tup1[0] == tup2[0]):
raise ValueError('points do not define a valid rectangle')
if tup1[0] < tup2[0]: # normal
col_indices = (tup1[0], tup2[0])
row_indices = (tup1[1], tup2[1])
else:
col_indices = (tup2[0], tup1[0])
row_indices = (tup2[1], tup1[1])
# iterate through gen1, gen2 lists - make sure there are none
for gen in (gen1[(col_indices[0]+1)%n:(col_indices[1]-1)%n] + \
gen2[(col_indices[0]+1)%n:(col_indices[1]-1)%n]):
if gen > row_indices[0] and gen < row_indices[1]: # inside rect
return (False, [])
num_x = 0
num_o = 0
# iterate through X list
for x in xlist[col_indices[0]:col_indices[1]]:
if x >= row_indices[0] and x < row_indices[1]: # inside rect
num_x += 1
# iterate through O list
for o in olist[col_indices[0]:col_indices[1]]:
if o >= row_indices[0] and o < row_indices[1]: # inside rect
num_o += 1
return (True, [num_x, num_o])
def differential(genlist, x, o, ak=False, k=0, b=False):
'''
Returns boundary operator del[genlist] = Sum over rect. y ([y, i-#X, y-#O])
Output of [] means 0.
'genlist' is a list of generators, where each generator is represented as
a tuple ((r1, r2,...,rn), i, j). Multiple generators in the list 'genlist'
indicates addition
i.e. genlist = [((r1, r2,...,rn), i, j), ((s1, s2,...,sn), i', j')] means
((r1, r2,...,rn), i, j) + ((s1, s2,...,sn), i', j')
x, o are lists of the positions of X's and O's, respectively.
'''
if ak and b: # both True
raise ValueError('at least one of Ak^+, B+ must be False')
# iterate over all other generators that differ by 2
# i.e. start with x -> pick 2 points and swap them
# C(n,2) = n(n-1)/2 = O(n^2)
assert len(x) == len(o)
boundary = {}
for gen in genlist:
rowtup = gen[0]
assert len(rowtup) == len(x)
for swap in [(i,j) for i in range(len(x)) for j in range(len(x)) \
if j > i]:
# swap = column indices to swap; i < j always
if rowtup[swap[0]] >= rowtup[swap[1]]:
continue # not a valid rectangle i.e. y -> x not x -> y
other_rowtup = list(rowtup)
other_rowtup[swap[0]], other_rowtup[swap[1]] = \
other_rowtup[swap[1]], other_rowtup[swap[0]] # swap positions
other_rowtup = tuple(other_rowtup)
result = interior((swap[0], rowtup[swap[0]]), \
(swap[1], rowtup[swap[1]]), rowtup, other_rowtup,\
x, o)
if result[0] == True:
# check Ak+, B+
if ak and gen[1] - result[1][0] < 0 and gen[2] - result[1][1] < k: # i < 0, j < k
continue # skip (no arrow)
if b and gen[1] - result[1][0] < 0: # i < 0
continue # skip (no arrow)
other_gen = (other_rowtup, gen[1] - result[1][0], \
gen[2] - result[1][1])
if other_gen in boundary:
del boundary[other_gen] # mod 2
else:
boundary[other_gen] = 1
return list(boundary) # all keys
if __name__ == '__main__':
# trefoil sample - TODO: put this in test script
winding=[[0,0,0,0,0],[0,0,1,1,1],[0,-1,0,1,1],[0,-1,-1,0,1],[0,-1,-1,-1,0]]
x=[1,2,3,4,0]
o=[4,0,1,2,3]
rhs=alexander_helper(x,o,winding,5)
count = {}
generators = itertools.permutations([0,1,2,3,4])
for i in range(math.factorial(5)):
gen = generators.next() # FIXME: is next or list( ) faster?
gr = alexander(gen,winding,rhs)
if gr in count:
count[gr] += 1
else:
count[gr] = 1
print count
print interior((0,3),(2,4),(3,0,4,1,2),(4,0,3,1,2),x,o) # Figure 3
print interior((1,2),(4,3),(0,2,4,1,3),(0,3,4,1,2),x,o) # [1,1]
print differential([((3,0,4,1,2),0,-3)],x,o)
print differential(differential([((3,0,4,1,2),0,-3)],x,o),x,o)
# TODO section 2 cancel generators
# remove ONLY gen not reducing i, j in differential
# then compute homology on reduced generators
# size of grid = arc number
# alternating => crossings + 2, others less
# http://www.indiana.edu/~knotinfo/ | gpl-2.0 | -6,500,285,984,916,305,000 | 34.525346 | 97 | 0.546445 | false |
wangxuan007/flasky | venv/lib/python2.7/site-packages/mako/lookup.py | 22 | 12847 | # mako/lookup.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import os
import stat
import posixpath
import re
from mako import exceptions, util
from mako.template import Template
try:
import threading
except:
import dummy_threading as threading
class TemplateCollection(object):
"""Represent a collection of :class:`.Template` objects,
identifiable via URI.
A :class:`.TemplateCollection` is linked to the usage of
all template tags that address other templates, such
as ``<%include>``, ``<%namespace>``, and ``<%inherit>``.
The ``file`` attribute of each of those tags refers
to a string URI that is passed to that :class:`.Template`
object's :class:`.TemplateCollection` for resolution.
:class:`.TemplateCollection` is an abstract class,
with the usual default implementation being :class:`.TemplateLookup`.
"""
def has_template(self, uri):
"""Return ``True`` if this :class:`.TemplateLookup` is
capable of returning a :class:`.Template` object for the
given ``uri``.
:param uri: String URI of the template to be resolved.
"""
try:
self.get_template(uri)
return True
except exceptions.TemplateLookupException:
return False
def get_template(self, uri, relativeto=None):
"""Return a :class:`.Template` object corresponding to the given
``uri``.
The default implementation raises
:class:`.NotImplementedError`. Implementations should
raise :class:`.TemplateLookupException` if the given ``uri``
cannot be resolved.
:param uri: String URI of the template to be resolved.
:param relativeto: if present, the given ``uri`` is assumed to
be relative to this URI.
"""
raise NotImplementedError()
def filename_to_uri(self, uri, filename):
"""Convert the given ``filename`` to a URI relative to
this :class:`.TemplateCollection`."""
return uri
def adjust_uri(self, uri, filename):
"""Adjust the given ``uri`` based on the calling ``filename``.
When this method is called from the runtime, the
``filename`` parameter is taken directly to the ``filename``
attribute of the calling template. Therefore a custom
:class:`.TemplateCollection` subclass can place any string
identifier desired in the ``filename`` parameter of the
:class:`.Template` objects it constructs and have them come back
here.
"""
return uri
class TemplateLookup(TemplateCollection):
"""Represent a collection of templates that locates template source files
from the local filesystem.
The primary argument is the ``directories`` argument, the list of
directories to search:
.. sourcecode:: python
lookup = TemplateLookup(["/path/to/templates"])
some_template = lookup.get_template("/index.html")
The :class:`.TemplateLookup` can also be given :class:`.Template` objects
programatically using :meth:`.put_string` or :meth:`.put_template`:
.. sourcecode:: python
lookup = TemplateLookup()
lookup.put_string("base.html", '''
<html><body>${self.next()}</body></html>
''')
lookup.put_string("hello.html", '''
<%include file='base.html'/>
Hello, world !
''')
:param directories: A list of directory names which will be
searched for a particular template URI. The URI is appended
to each directory and the filesystem checked.
:param collection_size: Approximate size of the collection used
to store templates. If left at its default of ``-1``, the size
is unbounded, and a plain Python dictionary is used to
relate URI strings to :class:`.Template` instances.
Otherwise, a least-recently-used cache object is used which
will maintain the size of the collection approximately to
the number given.
:param filesystem_checks: When at its default value of ``True``,
each call to :meth:`.TemplateLookup.get_template()` will
compare the filesystem last modified time to the time in
which an existing :class:`.Template` object was created.
This allows the :class:`.TemplateLookup` to regenerate a
new :class:`.Template` whenever the original source has
been updated. Set this to ``False`` for a very minor
performance increase.
:param modulename_callable: A callable which, when present,
is passed the path of the source file as well as the
requested URI, and then returns the full path of the
generated Python module file. This is used to inject
alternate schemes for Python module location. If left at
its default of ``None``, the built in system of generation
based on ``module_directory`` plus ``uri`` is used.
All other keyword parameters available for
:class:`.Template` are mirrored here. When new
:class:`.Template` objects are created, the keywords
established with this :class:`.TemplateLookup` are passed on
to each new :class:`.Template`.
"""
def __init__(self,
directories=None,
module_directory=None,
filesystem_checks=True,
collection_size=-1,
format_exceptions=False,
error_handler=None,
disable_unicode=False,
bytestring_passthrough=False,
output_encoding=None,
encoding_errors='strict',
cache_args=None,
cache_impl='beaker',
cache_enabled=True,
cache_type=None,
cache_dir=None,
cache_url=None,
modulename_callable=None,
module_writer=None,
default_filters=None,
buffer_filters=(),
strict_undefined=False,
imports=None,
future_imports=None,
enable_loop=True,
input_encoding=None,
preprocessor=None,
lexer_cls=None):
self.directories = [posixpath.normpath(d) for d in
util.to_list(directories, ())
]
self.module_directory = module_directory
self.modulename_callable = modulename_callable
self.filesystem_checks = filesystem_checks
self.collection_size = collection_size
if cache_args is None:
cache_args = {}
# transfer deprecated cache_* args
if cache_dir:
cache_args.setdefault('dir', cache_dir)
if cache_url:
cache_args.setdefault('url', cache_url)
if cache_type:
cache_args.setdefault('type', cache_type)
self.template_args = {
'format_exceptions': format_exceptions,
'error_handler': error_handler,
'disable_unicode': disable_unicode,
'bytestring_passthrough': bytestring_passthrough,
'output_encoding': output_encoding,
'cache_impl': cache_impl,
'encoding_errors': encoding_errors,
'input_encoding': input_encoding,
'module_directory': module_directory,
'module_writer': module_writer,
'cache_args': cache_args,
'cache_enabled': cache_enabled,
'default_filters': default_filters,
'buffer_filters': buffer_filters,
'strict_undefined': strict_undefined,
'imports': imports,
'future_imports': future_imports,
'enable_loop': enable_loop,
'preprocessor': preprocessor,
'lexer_cls': lexer_cls
}
if collection_size == -1:
self._collection = {}
self._uri_cache = {}
else:
self._collection = util.LRUCache(collection_size)
self._uri_cache = util.LRUCache(collection_size)
self._mutex = threading.Lock()
def get_template(self, uri):
"""Return a :class:`.Template` object corresponding to the given
``uri``.
.. note:: The ``relativeto`` argument is not supported here at
the moment.
"""
try:
if self.filesystem_checks:
return self._check(uri, self._collection[uri])
else:
return self._collection[uri]
except KeyError:
u = re.sub(r'^\/+', '', uri)
for dir in self.directories:
# make sure the path seperators are posix - os.altsep is empty
# on POSIX and cannot be used.
dir = dir.replace(os.path.sep, posixpath.sep)
srcfile = posixpath.normpath(posixpath.join(dir, u))
if os.path.isfile(srcfile):
return self._load(srcfile, uri)
else:
raise exceptions.TopLevelLookupException(
"Cant locate template for uri %r" % uri)
def adjust_uri(self, uri, relativeto):
"""Adjust the given ``uri`` based on the given relative URI."""
key = (uri, relativeto)
if key in self._uri_cache:
return self._uri_cache[key]
if uri[0] != '/':
if relativeto is not None:
v = self._uri_cache[key] = posixpath.join(
posixpath.dirname(relativeto), uri)
else:
v = self._uri_cache[key] = '/' + uri
else:
v = self._uri_cache[key] = uri
return v
def filename_to_uri(self, filename):
"""Convert the given ``filename`` to a URI relative to
this :class:`.TemplateCollection`."""
try:
return self._uri_cache[filename]
except KeyError:
value = self._relativeize(filename)
self._uri_cache[filename] = value
return value
def _relativeize(self, filename):
"""Return the portion of a filename that is 'relative'
to the directories in this lookup.
"""
filename = posixpath.normpath(filename)
for dir in self.directories:
if filename[0:len(dir)] == dir:
return filename[len(dir):]
else:
return None
def _load(self, filename, uri):
self._mutex.acquire()
try:
try:
# try returning from collection one
# more time in case concurrent thread already loaded
return self._collection[uri]
except KeyError:
pass
try:
if self.modulename_callable is not None:
module_filename = self.modulename_callable(filename, uri)
else:
module_filename = None
self._collection[uri] = template = Template(
uri=uri,
filename=posixpath.normpath(filename),
lookup=self,
module_filename=module_filename,
**self.template_args)
return template
except:
# if compilation fails etc, ensure
# template is removed from collection,
# re-raise
self._collection.pop(uri, None)
raise
finally:
self._mutex.release()
def _check(self, uri, template):
if template.filename is None:
return template
try:
template_stat = os.stat(template.filename)
if template.module._modified_time < \
template_stat[stat.ST_MTIME]:
self._collection.pop(uri, None)
return self._load(template.filename, uri)
else:
return template
except OSError:
self._collection.pop(uri, None)
raise exceptions.TemplateLookupException(
"Cant locate template for uri %r" % uri)
def put_string(self, uri, text):
"""Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given string of
``text``.
"""
self._collection[uri] = Template(
text,
lookup=self,
uri=uri,
**self.template_args)
def put_template(self, uri, template):
"""Place a new :class:`.Template` object into this
:class:`.TemplateLookup`, based on the given
:class:`.Template` object.
"""
self._collection[uri] = template
| gpl-3.0 | -5,850,916,895,609,952,000 | 34.00545 | 78 | 0.577178 | false |
thomashaw/SecGen | modules/utilities/unix/labtainers/files/Labtainers-master/scripts/gns3/showNet.py | 2 | 1582 | #!/usr/bin/env python
import sys
import os
import argparse
import json
from netaddr import IPNetwork
labtainer_dir = os.getenv('LABTAINER_DIR')
if labtainer_dir is None:
print('Must define LABTAINER_DIR environment variable')
exit(1)
sys.path.append(os.path.join(labtainer_dir, 'scripts', 'labtainer-student','bin'))
sys.path.append(os.path.join(labtainer_dir, 'scripts', 'labtainer-student','lab_bin'))
import labutils
import LabtainerLogging
'''
Show network topology of a Labtainers lab
'''
labtainers_path = os.path.join(labtainer_dir, 'labs')
parser = argparse.ArgumentParser(description='Show Labtainer topology.')
parser.add_argument('labname', help='Name of labtainers lab')
args = parser.parse_args()
labutils.logger = LabtainerLogging.LabtainerLogging("showNet.log", 'eh', "../../config/labtainer.config")
labtainer_lab = os.path.join(labtainers_path, args.labname)
dumb, start_config = labutils.GetBothConfigs(labtainer_lab, labutils.logger)
nets = []
for subnet_name in start_config.subnets:
#print('subnet: %s' % subnet_name)
nets.append(subnet_name)
print('labname: %s has %d networks' % (args.labname, len(nets)))
for name, container in start_config.containers.items():
eth_index = 0
num_adapters = len(container.container_nets.items())
print('Container: %s (# of adapters: %d)' % (name, num_adapters))
for subnet_name, subnet_ip in container.container_nets.items():
sub_index = nets.index(subnet_name)
eth = 'eth %d' % eth_index
print('\t %s to network %d' % (eth, sub_index))
eth_index += 1
| gpl-3.0 | 2,438,728,820,490,160,000 | 31.958333 | 105 | 0.709229 | false |
zardav/FaceDetection | FaceDetection/temp.py | 1 | 3849 | import numpy as np
from scipy import ndimage, misc
from matplotlib import pyplot as plt
import glob
from MyViola import MyViolaClassifier
from Svm import Svm
import funcs
def find_face(img, shape, mv):
res_i = (0, 0)
res_j = (0, 0)
res_scl = 1
max_ = 0
scales = np.arange(.2, .35, .025)
m, n = shape
for scl in scales:
img_ = misc.imresize(img, scl)
mv.change_image(img_)
x, y = img_.shape[:2]
if x < m or y < n:
continue
for i, j in funcs.iter_shape((x, y), shape, 4):
val = mv.valuefy((i, j))
if val > max_:
max_ = val
res_i, res_j = i, j
res_scl = scl
return (int(res_i[0] / res_scl), int(res_i[1] / res_scl)), (int(res_j[0] / res_scl), int(res_j[1] / res_scl))
def get_sub_pics_with_size(imgs, shape):
scales = np.arange(.2, 1, .2)
m, n = shape
for img in imgs:
while img.shape[0] > 800:
img = misc.imresize(img, 0.5)
for scl in scales:
img_ = misc.imresize(img, scl)
x, y = img_.shape[:2]
if x < m or y < n:
continue
i = 0
while i + m < x:
j = 0
while j + n < y:
yield img_[i:i+m, j:j+n]
j += n
i += m
def temp():
files = glob.glob('../../faces/cropped/*.jpg')
faces = (misc.imread(im) for im in files)
mv = MyViolaClassifier()
mv.add_examples(faces, 1)
files = glob.glob('../../faces/nofaces/*.jpg')
nofaces = (misc.imread(im) for im in files)
mv.add_examples(get_sub_pics_with_size(nofaces, (137, 100)), -1)
mv.learn()
mv.save('my_viola.pkl')
files = glob.glob('../../faces/*.jpg')
for f in files:
img = misc.imread(f)
new_path = f.replace('/faces\\', '/faces\\new1\\')
i, j = find_face(img, (137, 100), mv)
i1, i2 = i
j1, j2 = j
new_img = img[i1:i2, j1:j2]
try:
misc.imsave(new_path, new_img)
except ValueError:
pass
def plot_image_faces(img, shape, mv):
plot_im_with_rects(img, get_all_faces_rects(img, shape, mv))
def plot_im_with_rects(img, rect_list):
img1 = img
for rect in rect_list:
img1 = funcs.implusrect(img1, rect[0], rect[1], (0, 255, 0))
plt.imshow(img1)
def get_all_faces_rects(img, shape, mv):
return [a[0] for a in filter_overlap_windows(get_all_windows(img, shape, mv))]
def get_all_windows(img, shape, mv):
scales = np.arange(.2, .35, .02)
m, n = shape
for scl in scales:
img_ = misc.imresize(img, scl)
mv.change_image(img_)
x, y = img_.shape[:2]
if x < m or y < n:
continue
for i, j in funcs.iter_shape((x, y), shape, 4):
val = mv.valuefy((i, j))
if val > 0:
res_i = (int(i[0] / scl), int(i[1] / scl))
res_j = (int(j[0] / scl), int(j[1] / scl))
yield ((res_i, res_j), val)
def is_pos_in_rect(pos, rect):
x, y = pos
(i1, i2), (j1, j2) = rect
return i1 <= x <= i2 and j1 <= y <= j2
def mid_point(rect):
(i1, i2), (j1, j2) = rect
return int((i1 + i2) / 2), int((j1 + j2) / 2)
def are_overlap(window1, window2):
return is_pos_in_rect(mid_point(window1), window2) or is_pos_in_rect(mid_point(window2), window1)
def filter_overlap_windows(windows):
maxs = []
for w in windows:
w_waiting = True
index = 0
while index < len(maxs) and w_waiting:
if are_overlap(w[0], maxs[index][0]):
if w[1] > maxs[index][1]:
maxs[index] = w
w_waiting = False
index += 1
if w_waiting:
maxs.append(w)
return maxs
| gpl-3.0 | -5,453,217,242,674,596,000 | 27.094891 | 113 | 0.50013 | false |
krunal3103/servo | tests/wpt/harness/wptrunner/manifestinclude.py | 118 | 5056 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""Manifest structure used to store paths that should be included in a test run.
The manifest is represented by a tree of IncludeManifest objects, the root
representing the file and each subnode representing a subdirectory that should
be included or excluded.
"""
import glob
import os
import urlparse
from wptmanifest.node import DataNode
from wptmanifest.backends import conditional
from wptmanifest.backends.conditional import ManifestItem
class IncludeManifest(ManifestItem):
def __init__(self, node):
"""Node in a tree structure representing the paths
that should be included or excluded from the test run.
:param node: AST Node corresponding to this Node.
"""
ManifestItem.__init__(self, node)
self.child_map = {}
@classmethod
def create(cls):
"""Create an empty IncludeManifest tree"""
node = DataNode(None)
return cls(node)
def append(self, child):
ManifestItem.append(self, child)
self.child_map[child.name] = child
assert len(self.child_map) == len(self.children)
def include(self, test):
"""Return a boolean indicating whether a particular test should be
included in a test run, based on the IncludeManifest tree rooted on
this object.
:param test: The test object"""
path_components = self._get_components(test.url)
return self._include(test, path_components)
def _include(self, test, path_components):
if path_components:
next_path_part = path_components.pop()
if next_path_part in self.child_map:
return self.child_map[next_path_part]._include(test, path_components)
node = self
while node:
try:
skip_value = self.get("skip", {"test_type": test.item_type}).lower()
assert skip_value in ("true", "false")
return skip_value != "true"
except KeyError:
if node.parent is not None:
node = node.parent
else:
# Include by default
return True
def _get_components(self, url):
rv = []
url_parts = urlparse.urlsplit(url)
variant = ""
if url_parts.query:
variant += "?" + url_parts.query
if url_parts.fragment:
variant += "#" + url_parts.fragment
if variant:
rv.append(variant)
rv.extend([item for item in reversed(url_parts.path.split("/")) if item])
return rv
def _add_rule(self, test_manifests, url, direction):
maybe_path = os.path.join(os.path.abspath(os.curdir), url)
rest, last = os.path.split(maybe_path)
variant = ""
if "#" in last:
last, fragment = last.rsplit("#", 1)
variant += "#" + fragment
if "?" in last:
last, query = last.rsplit("?", 1)
variant += "?" + query
maybe_path = os.path.join(rest, last)
paths = glob.glob(maybe_path)
if paths:
urls = []
for path in paths:
for manifest, data in test_manifests.iteritems():
rel_path = os.path.relpath(path, data["tests_path"])
if ".." not in rel_path.split(os.sep):
urls.append(data["url_base"] + rel_path.replace(os.path.sep, "/") + variant)
break
else:
urls = [url]
assert direction in ("include", "exclude")
for url in urls:
components = self._get_components(url)
node = self
while components:
component = components.pop()
if component not in node.child_map:
new_node = IncludeManifest(DataNode(component))
node.append(new_node)
new_node.set("skip", node.get("skip", {}))
node = node.child_map[component]
skip = False if direction == "include" else True
node.set("skip", str(skip))
def add_include(self, test_manifests, url_prefix):
"""Add a rule indicating that tests under a url path
should be included in test runs
:param url_prefix: The url prefix to include
"""
return self._add_rule(test_manifests, url_prefix, "include")
def add_exclude(self, test_manifests, url_prefix):
"""Add a rule indicating that tests under a url path
should be excluded from test runs
:param url_prefix: The url prefix to exclude
"""
return self._add_rule(test_manifests, url_prefix, "exclude")
def get_manifest(manifest_path):
with open(manifest_path) as f:
return conditional.compile(f, data_cls_getter=lambda x, y: IncludeManifest)
| mpl-2.0 | 4,475,041,681,250,929,000 | 34.111111 | 100 | 0.582476 | false |
lapsed/whatbot | requestfinder.py | 1 | 2742 | # Request finder
import time
from mediamonkey import MediaMonkey
from what import WhatCD
from whatdao import WhatDAO
from whatconfig import WhatConfigParser
from whatparser import Parser
class RequestFinder():
def dump_page(self, filename, page):
f = open(filename, 'w')
f.write(page)
f.close()
def find_requests(self):
sleep_time = int(config.get('request', 'sleep_time'))
# iterate over request pages specified on command line
for i in range(int(config.get('request', 'startpage')), int(config.get('request', 'endpage')) + 1):
print 'Processing request page ' + str(i)
try:
page = whatcd.getRequests(i)
requests = parser.parseRequests(page)
except:
print "Error processing request page i, dumping source"
self.dump_page('request_%03d.htm' % i)
continue
# for each request
for request in requests:
# Sleep before hitting the site to avoid flooding
print request
time.sleep(sleep_time)
local_request = dao.loadRequest(request.request_id)
# if doesn't already exist in local database
if local_request.request_id == 0:
print "Getting details of remote request..."
# Sleep before hitting the site to avoid flooding
time.sleep(sleep_time)
# load request page
try:
page = whatcd.getRequest(request.request_id)
torrent_group_id = parser.parseRequest(page)
except:
print "Error processing request ID %d, dumping source"
self.dump_page('request_details_%d.htm' % request.request_id)
continue
if torrent_group_id:
request.torrent_group_id = torrent_group_id
time.sleep(sleep_time)
# load torrent details page and check for FLAC
try:
page = whatcd.getTorrentGroup(torrent_group_id)
request.flac_present = parser.parseGroup(page)
except:
print "Error processing torrent ID %d, dumping source"
self.dump_page('torrent_details_%d.htm' % torrent_group_id)
continue
# store request details in local sql database
print "Storing request..."
dao.storeRequest(request)
else:
print "Updating local copy of top level request data"
# update filled status and reward only if present in local db
local_request.filled = request.filled
local_request.reward = request.reward
dao.storeRequest(local_request)
if __name__ == "__main__":
# Create and initialise singletons
config = WhatConfigParser()
config.read('whatbot.cfg')
mm = MediaMonkey(config)
whatcd = WhatCD(config, mm)
parser = Parser()
dao = WhatDAO()
# If we can log in to what.cd, go look for requests
if whatcd.login():
rf = RequestFinder()
rf.find_requests()
else:
print "Aborting: Unable to log in to what.cd"
| unlicense | -1,743,930,517,610,258,200 | 30.883721 | 101 | 0.683078 | false |
jriguera/Diamond | src/collectors/portstat/tests/test_portstat.py | 33 | 2035 | from test import CollectorTestCase
from test import get_collector_config
from mock import call, Mock, patch
from unittest import TestCase
from diamond.collector import Collector
from portstat import get_port_stats, PortStatCollector
class PortStatCollectorTestCase(CollectorTestCase):
TEST_CONFIG = {
'port': {
'something1': {
'number': 5222,
},
'something2': {
'number': 8888,
}
}
}
def setUp(self):
config = get_collector_config('PortStatCollector',
self.TEST_CONFIG)
self.collector = PortStatCollector(config, None)
def test_import(self):
self.assertTrue(PortStatCollector)
@patch('portstat.get_port_stats')
@patch.object(Collector, 'publish')
def test_collect(self, publish_mock, get_port_stats_mock):
get_port_stats_mock.return_value = {'foo': 1}
self.collector.collect()
get_port_stats_mock.assert_has_calls([call(5222), call(8888)],
any_order=True)
self.assertPublished(publish_mock, 'something1.foo', 1)
self.assertPublished(publish_mock, 'something2.foo', 1)
class GetPortStatsTestCase(TestCase):
@patch('portstat.psutil.net_connections')
def test_get_port_stats(self, net_connections_mock):
ports = [Mock() for _ in range(5)]
ports[0].laddr = (None, 5222)
ports[0].status = 'ok'
ports[1].laddr = ports[2].laddr = ports[3].laddr = (None, 8888)
ports[1].status = 'ok'
ports[2].status = 'OK'
ports[3].status = 'bad'
ports[4].laddr = (None, 9999)
net_connections_mock.return_value = ports
cnts = get_port_stats(5222)
net_connections_mock.assert_called_once()
self.assertEqual(cnts, {'ok': 1})
cnts = get_port_stats(8888)
net_connections_mock.assert_called_once()
self.assertEqual(cnts, {'ok': 2, 'bad': 1})
| mit | 8,952,879,685,993,526,000 | 26.876712 | 71 | 0.590172 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.