repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jeremiahyan/odoo | addons/sale_timesheet/tests/test_project_overview.py | 1 | 8777 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.sale_timesheet.tests.test_reporting import TestReporting
from odoo.tools import float_compare
from odoo.tests import tagged
@tagged('-at_install', 'post_install')
class TestSaleProject(TestReporting):
def test_project_overview_by_project(self):
rounding = self.env.company.currency_id.rounding
so_line_deliver_global_project = self.env['sale.order.line'].create({
'name': self.product_delivery_timesheet2.name,
'product_id': self.product_delivery_timesheet2.id,
'product_uom_qty': 50,
'product_uom': self.product_delivery_timesheet2.uom_id.id,
'price_unit': self.product_delivery_timesheet2.list_price,
'order_id': self.sale_order_2.id,
})
self.sale_order_2.action_confirm()
project_so = self.so_line_order_project.project_id
# log timesheet for billable time
timesheet1 = self._log_timesheet_manager(project_so, 10, so_line_deliver_global_project.task_id)
task_so = self.so_line_order_project.task_id
# logged some timesheets: on project only, then on tasks with different employees
timesheet2 = self._log_timesheet_user(project_so, 2)
timesheet3 = self._log_timesheet_user(project_so, 3, task_so)
timesheet4 = self._log_timesheet_manager(project_so, 1, task_so)
# create a task which is not linked to sales order and fill non-billable timesheet
task = self.env['project.task'].create({
'name': 'Task',
'project_id': project_so.id,
'allow_billable': False,
'sale_line_id': False
})
timesheet5 = self._log_timesheet_user(project_so, 5, task)
# invoice the Sales Order SO2
context = {
"active_model": 'sale.order',
"active_ids": [self.sale_order_2.id],
"active_id": self.sale_order_2.id,
'open_invoices': True,
}
payment = self.env['sale.advance.payment.inv'].create({
'advance_payment_method': 'delivered',
})
action_invoice = payment.with_context(context).create_invoices()
invoice = self.env['account.move'].browse(action_invoice['res_id'])
invoice.action_post()
# simulate the auto creation of the SO line for expense, like we confirm a vendor bill.
so_line_expense = self.env['sale.order.line'].create({
'name': self.product_expense.name,
'product_id': self.product_expense.id,
'product_uom_qty': 0.0,
'product_uom': self.product_expense.uom_id.id,
'price_unit': self.product_expense.list_price, # reinvoice at sales price
'order_id': self.sale_order_2.id,
'is_expense': True,
})
expense = self.env['account.analytic.line'].create({
'name': 'expense on project_so',
'account_id': project_so.analytic_account_id.id,
'so_line': so_line_expense.id,
'employee_id': self.employee_user.id,
'unit_amount': 4,
'amount': 4 * self.product_expense.list_price * -1,
'product_id': self.product_expense.id,
'product_uom_id': self.product_expense.uom_id.id,
})
other_revenues = self.env['account.analytic.line'].create({
'name': 'pther revenues on project_so',
'account_id': project_so.analytic_account_id.id,
'employee_id': self.employee_user.id,
'unit_amount': 1,
'amount': self.product_expense.list_price,
'product_id': self.product_expense.id,
'product_uom_id': self.product_expense.uom_id.id,
})
view_id = self.env.ref('sale_timesheet.project_timesheet_action_client_timesheet_plan').id
vals = self.env['project.project']._qweb_prepare_qcontext(view_id, [['id', '=', project_so.id]])
dashboard_value = timesheet2.unit_amount + timesheet3.unit_amount + timesheet4.unit_amount + timesheet5.unit_amount + timesheet1.unit_amount
project_so_timesheet_sold_unit = timesheet3.unit_amount + timesheet4.unit_amount
project_rate_non_billable = timesheet5.unit_amount / dashboard_value * 100
project_rate_non_billable_project = timesheet2.unit_amount / dashboard_value * 100
project_rate_billable_time = timesheet1.unit_amount / dashboard_value * 100
project_rate_billable_fixed = project_so_timesheet_sold_unit / dashboard_value * 100
project_rate_total = project_rate_non_billable + project_rate_non_billable_project + project_rate_billable_time + project_rate_billable_fixed
project_invoiced = self.so_line_order_project.price_unit * self.so_line_order_project.product_uom_qty * timesheet1.unit_amount
project_timesheet_cost = timesheet2.amount + timesheet3.amount + timesheet4.amount + timesheet5.amount + timesheet1.amount
self.assertEqual(float_compare(vals['dashboard']['time']['non_billable'], timesheet5.unit_amount, precision_rounding=rounding), 0, "The hours non-billable should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['non_billable_project'], timesheet2.unit_amount, precision_rounding=rounding), 0, "The hours non-billable-project should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['billable_time'], timesheet1.unit_amount, precision_rounding=rounding), 0, "The hours billable-time should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['billable_fixed'], project_so_timesheet_sold_unit, precision_rounding=rounding), 0, "The hours billable-fixed should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['time']['total'], dashboard_value, precision_rounding=rounding), 0, "The total hours should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['non_billable'], project_rate_non_billable, precision_rounding=rounding), 0, "The rate non-billable should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['non_billable_project'], project_rate_non_billable_project, precision_rounding=rounding), 0, "The rate non-billable-project should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['billable_time'], project_rate_billable_time, precision_rounding=rounding), 0, "The rate billable-time should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['billable_fixed'], project_rate_billable_fixed, precision_rounding=rounding), 0, "The rate billable-fixed should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['rates']['total'], project_rate_total, precision_rounding=rounding), 0, "The total rates should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['invoiced'], project_invoiced, precision_rounding=rounding), 0, "The amount invoiced should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['cost'], project_timesheet_cost, precision_rounding=rounding), 0, "The amount cost should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['expense_cost'], expense.amount, precision_rounding=rounding), 0, "The amount expense-cost should be the one from the SO2 line, as we are in ordered quantity")
self.assertEqual(float_compare(vals['dashboard']['profit']['other_revenues'], other_revenues.amount, precision_rounding=rounding), 0, "The amount of the other revenues should be equal to the created other_revenues account analytic line")
self.assertEqual(float_compare(vals['dashboard']['profit']['total'], project_invoiced + project_timesheet_cost + expense.amount + other_revenues.amount, precision_rounding=rounding), 0, "The total amount should be the sum of the SO2 line and the created other_revenues account analytic line")
self.assertEqual(float_compare(vals['repartition_employee_max'], 11.0, precision_rounding=rounding), 0, "The amount of repartition-employee-max should be the one from SO2 line")
| gpl-3.0 | -8,200,607,940,307,059,000 | 73.381356 | 300 | 0.681896 | false |
EqAfrica/machinekit | nosetests/test_netcmd.py | 1 | 3441 | #!/usr/bin/env python
from nose import with_setup
from machinekit.nosetests.realtime import setup_module,teardown_module
from machinekit.nosetests.support import fnear
from machinekit import hal
import os
def test_component_creation():
global c1,c2
c1 = hal.Component("c1")
c1.newpin("s32out", hal.HAL_S32, hal.HAL_OUT, init=42)
c1.newpin("s32in", hal.HAL_S32, hal.HAL_IN)
c1.newpin("s32io", hal.HAL_S32, hal.HAL_IO)
c1.newpin("floatout", hal.HAL_FLOAT, hal.HAL_OUT, init=42)
c1.newpin("floatin", hal.HAL_FLOAT, hal.HAL_IN)
c1.newpin("floatio", hal.HAL_FLOAT, hal.HAL_IO)
c1.ready()
c2 = hal.Component("c2")
c2.newpin("s32out", hal.HAL_S32, hal.HAL_OUT, init=4711)
c2.newpin("s32in", hal.HAL_S32, hal.HAL_IN)
c2.newpin("s32io", hal.HAL_S32, hal.HAL_IO)
c2.newpin("floatout", hal.HAL_FLOAT, hal.HAL_OUT, init=4711)
c2.newpin("floatin", hal.HAL_FLOAT, hal.HAL_IN)
c2.newpin("floatio", hal.HAL_FLOAT, hal.HAL_IO)
c2.ready()
def test_net_existing_signal_with_bad_type():
hal.new_sig("f", hal.HAL_FLOAT)
try:
hal.net("f", "c1.s32out")
raise "should not happen"
except TypeError:
pass
del hal.signals["f"]
def test_net_match_nonexistant_signals():
try:
hal.net("nosuchsig", "c1.s32out","c2.s32out")
raise "should not happen"
except TypeError:
pass
def test_net_pin2pin():
try:
hal.net("c1.s32out","c2.s32out")
#TypeError: net: 'c1.s32out' is a pin - first argument must be a signal name
raise "should not happen"
except TypeError:
pass
def test_net_existing_signal():
hal.new_sig("s32", hal.HAL_S32)
assert hal.pins["c1.s32out"].linked == False
hal.net("s32", "c1.s32out")
assert hal.pins["c1.s32out"].linked == True
hal.new_sig("s32too", hal.HAL_S32)
try:
hal.net("s32too", "c1.s32out")
raise "should not happen"
except RuntimeError:
pass
del hal.signals["s32"]
def test_new_sig():
floatsig1 = hal.new_sig("floatsig1", hal.HAL_FLOAT)
try:
hal.new_sig("floatsig1", hal.HAL_FLOAT)
# RuntimeError: Failed to create signal floatsig1: HAL: ERROR: duplicate signal 'floatsig1'
raise "should not happen"
except RuntimeError:
pass
try:
hal.new_sig(32423 *32432, hal.HAL_FLOAT)
raise "should not happen"
except TypeError:
pass
try:
hal.new_sig(None, hal.HAL_FLOAT)
raise "should not happen"
except TypeError:
pass
try:
hal.new_sig("badtype", 1234)
raise "should not happen"
except TypeError:
pass
def test_check_net_args():
try:
hal.net()
except TypeError:
pass
try:
hal.net(None, "c1.s32out")
except TypeError:
pass
try:
hal.net("c1.s32out")
# TypeError: net: 'c1.s32out' is a pin - first argument must be a signal name
except TypeError:
pass
assert "noexiste" not in hal.signals
hal.net("noexiste", "c1.s32out")
assert "noexiste" in hal.signals
ne = hal.signals["noexiste"]
assert ne.writers == 1
assert ne.readers == 0
assert ne.bidirs == 0
try:
hal.net("floatsig1", "c1.s32out")
raise "should not happen"
except RuntimeError:
pass
(lambda s=__import__('signal'):
s.signal(s.SIGTERM, s.SIG_IGN))()
| lgpl-2.1 | -2,024,453,520,305,950,200 | 24.87218 | 99 | 0.610869 | false |
joke2k/faker | setup.py | 1 | 2446 | #!/usr/bin/env python
import os
from pathlib import Path
from setuptools import find_packages, setup
here = Path(__file__).resolve().parent
README = (here / 'README.rst').read_text(encoding='utf-8')
VERSION = (here / 'VERSION').read_text(encoding='utf-8').strip()
excluded_packages = ["docs", "tests", "tests.*"]
if not os.environ.get('READTHEDOCS', False):
excluded_packages += ["faker.sphinx", "faker.sphinx.*"]
# this module can be zip-safe if the zipimporter implements iter_modules or if
# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
try:
import pkgutil
import zipimport
zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
except AttributeError:
zip_safe = False
setup(
name='Faker',
version=VERSION,
description="Faker is a Python package that generates fake data for you.",
long_description=README,
entry_points={
'console_scripts': ['faker=faker.cli:execute_from_command_line'],
'pytest11': ['faker = faker.contrib.pytest.plugin'],
},
classifiers=[
# See https://pypi.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
keywords='faker fixtures data test mock generator',
author='joke2k',
author_email='[email protected]',
url='https://github.com/joke2k/faker',
license='MIT License',
packages=find_packages(exclude=excluded_packages),
platforms=["any"],
zip_safe=zip_safe,
python_requires=">=3.6",
install_requires=[
"python-dateutil>=2.4",
"text-unidecode==1.3",
],
)
| mit | -2,308,311,519,361,657,300 | 34.449275 | 78 | 0.643908 | false |
daanwierstra/pybrain | pybrain/rl/learners/search/incrementalcomplexity/incrementalcomplexity.py | 1 | 1767 | __author__ = 'Tom Schaul, [email protected]'
# TODO: inheritance!
class IncrementalComplexitySearch(object):
""" Draft of an OOPS-inspired search that incrementally expands the search space
and the allocated time (to a population of search processes). """
def __init__(self, initSearchProcess, maxPhases = 10, searchSteps = 50, desiredFitness = None):
self.maxPhases = maxPhases
self.searchSteps = searchSteps
self.desiredFitness = desiredFitness
self.processes = [initSearchProcess]
self.phase = 0
def optimize(self, **args):
while self.phase <= self.maxPhases and not self.problemSolved():
self._onePhase(**args)
# increase the number of processes
for p in self.processes[:]:
self.processes.append(p.newSimilarInstance())
self.increaseSearchSpace()
self.phase += 1
# return best evolvable
best = -1e100
for p in self.processes:
if p.bestFitness > best:
best = p.bestFitness
res = p.evolvable
return res
def _onePhase(self, verbose = True, **args):
if verbose:
print 'Phase', self.phase
for p in self.processes:
p.search(self.searchSteps, **args)
if verbose:
print '', p.bestFitness, p.evolvable.weightLengths
def increaseSearchSpace(self):
for p in self.processes:
p.increaseMaxComplexity()
def problemSolved(self):
if self.desiredFitness != None:
for p in self.processes:
if p.bestFitness > self.desiredFitness:
return True
return False | bsd-3-clause | 4,677,843,574,635,908,000 | 33.666667 | 99 | 0.578947 | false |
marteinn/wagtail-alt-generator | wagtailaltgenerator/tests/demosite/settings.py | 1 | 2042 | #!/usr/bin/env python
import os
DEBUG = False
TIME_ZONE = "Europe/Stockholm"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3"}}
SECRET_KEY = "not needed"
USE_TZ = True
LANGUAGE_CODE = "en"
INSTALLED_APPS = [
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.sites",
"django.contrib.admin",
"django.contrib.messages",
"wagtail.core",
"wagtail.sites",
"wagtail.users",
"wagtail.images",
"wagtail.documents",
"taggit",
"wagtailaltgenerator",
"wagtailaltgenerator.tests.demopages",
"wagtailaltgenerator.tests.demosite",
]
ROOT_URLCONF = "wagtailaltgenerator.tests.demosite.urls"
MIDDLEWARE = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"wagtail.core.middleware.SiteMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
)
ALT_GENERATOR_MIN_CONFIDENCE = 0
COMPUTER_VISION_API_KEY = getattr(os.environ, "COMPUTER_VISION_API_KEY", None)
COMPUTER_VISION_REGION = "canada"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
}
}
]
| mit | -7,871,863,058,548,776,000 | 27.760563 | 78 | 0.669931 | false |
jparicka/twitter-tools | profiles/models.py | 1 | 1328 | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
class Profile(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=100, blank=True, verbose_name="name", db_index=True)
headline = models.CharField(max_length=512, blank=True, verbose_name="name", db_index=True)
secret = models.CharField(max_length=100, blank=True, verbose_name="secret_key", db_index=True)
country = models.CharField(max_length=10, blank=True, verbose_name="country", db_index=True)
language = models.CharField(max_length=10, blank=True, verbose_name="language", db_index=True)
mobile = models.CharField(max_length=20, blank=True, verbose_name="mobile_number_1")
picture = models.URLField(blank=True, verbose_name="picture")
oauth_token = models.CharField(max_length=200, blank=True)
oauth_secret = models.CharField(max_length=200, blank=True)
street_address_1 = models.CharField(max_length=100, blank=True, verbose_name="street_address_1")
street_address_2 = models.CharField(max_length=100, blank=True, verbose_name="street_address_2")
street_address_3 = models.CharField(max_length=100, blank=True, verbose_name="street_address_3")
initial_assessment = models.BooleanField(default=False)
| mit | -2,055,734,439,932,275,200 | 44.827586 | 100 | 0.730422 | false |
rcatwood/Savu | savu/data/data_structures/data_type.py | 1 | 4089 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: data_type
:platform: Unix
:synopsis: A module containing classes for different input data types other
than hdf5.
.. moduleauthor:: Nicola Wadeson <[email protected]>
"""
import os
import numpy as np
import fabio
class DataTypes(object):
def __getitem__(self, index):
""" Override __getitem__ and map to the relevant files """
raise NotImplementedError("__getitem__ must be implemented.")
def get_shape(self):
""" Get full stiched shape of a stack of files"""
raise NotImplementedError("get_shape must be implemented.")
class FabIO(DataTypes):
""" This class loads any of the FabIO python module supported image
formats. """
def __init__(self, folder, Data, dim, shape=None):
self._data_obj = Data
self.nFrames = None
self.start_file = fabio.open(self.__get_file_name(folder))
self.frame_dim = dim
self.image_shape = (self.start_file.dim2, self.start_file.dim1)
if shape is None:
self.shape = (self.nFrames,)
else:
self.shape = shape
def __getitem__(self, index):
size = [len(np.arange(i.start, i.stop, i.step)) for i in index]
data = np.empty(size)
tiffidx = [i for i in range(len(index)) if i not in self.frame_dim]
# print "original = ", index
index, frameidx = self.__get_indices(index, size)
for i in range(len(frameidx)):
# print "amended = ", index[i]
# print frameidx[i], [index[i][n] for n in tiffidx]
data[index[i]] = \
self.start_file.getframe(self.start_no + frameidx[i])\
.data[[index[i][n] for n in tiffidx]]
return data
def __get_file_name(self, folder):
import re
files = os.listdir(folder)
self.nFrames = len(files)
fname = sorted(files)[0]
self.start_no = [int(s) for s in re.findall(r'\d+', fname)][-1]
print "start number is", self.start_no
return folder + "/" + fname
def get_shape(self):
return self.shape + self.image_shape
def __get_idx(self, dim, sl, shape):
c = int(np.prod(shape[0:dim]))
r = int(np.prod(shape[dim+1:]))
values = np.arange(sl.start, sl.stop, sl.step)
return np.ravel(np.kron(values, np.ones((r, c))))
def __get_indices(self, index, size):
""" Get the indices for the new data array and the file numbers. """
sub_idx = np.array(index)[np.array(self.frame_dim)]
sub_size = [size[i] for i in self.frame_dim]
idx_list = []
for dim in range(len(sub_idx)):
idx = self.__get_idx(dim, sub_idx[dim], sub_size)
idx_list.append(idx.astype(int))
lshape = idx_list[0].shape[0]
index = np.tile(index, (lshape, 1))
frameidx = np.zeros(lshape)
for dim in range(len(sub_idx)):
start = index[0][self.frame_dim[dim]].start
index[:, self.frame_dim[dim]] = \
[slice(i-start, i-start+1, 1) for i in idx_list[dim]]
frameidx[:] += idx_list[dim]*np.prod(self.shape[dim+1:])
return index.tolist(), frameidx.astype(int)
class Map_3d_to_4d_h5(DataTypes):
""" This class converts a 3D dataset to a 4D dataset. """
def __init__(self, backing_file, shape):
self.shape = shape
def __getitem__(self, index):
print index
def get_shape(self):
return self.shape
| gpl-3.0 | 2,109,054,481,640,022,500 | 33.361345 | 78 | 0.604304 | false |
pablolizardo/dotfiles | inkscape/symbols/generate.py | 1 | 1934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
inkscapeSymbolGenerator: A inkscape symbol library generator
Copyright (C) 2015 Xavi Julián Olmos
See the file LICENSE for copying permission.
"""
import sys, os
import logging
from optparse import OptionParser
####Objetivo
#If select all merge all files in a single SVG and then
#If select file, clean it with SVGO
#Remove styles then
#Save
####Deberes
# Compactar el formato del script los ifs ver si existen alternativas
# Utilizar paths de python - construccion de paths pythonica.
# Utilizar el nombre del archivo.
# Buscar regex para eliminar etiquetas en cleanSVGStyles()
# Añadir contenido a un fichero.
# Migrar de OptionParser to https://docs.python.org/3/library/optparse.html
def cleanSVGStyles(file):
print('Cleaning SVG....')
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('--folder', help='Folder to generate icons from', dest='folder')
optp.add_option('--file', help='File to generate icon from', dest='file')
optp.add_option('-o', '--output', help='Output file', dest='output')
opts, args = optp.parse_args()
if opts.folder is None:
if opts.file is None:
optp.error('At list one value for file or folder is needed')
else:
if opts.output is None:
os.system('svgo ' + opts.file)
else:
os.system('svgo ' + opts.file + ' -o ' + opts.output)
else:
if opts.file is None:
if opts.output is None:
os.system('svgo ' + opts.folder)
else:
os.system('cat ' + opts.folder + '/*.svg > ' + opts.output)
os.system('svgo ' + opts.output + ' -o ' + opts.output)
cleanSVGStyles(opts.output)
else:
optp.error('File and folder cannot exist')
| gpl-2.0 | -4,980,966,128,883,437,000 | 31.2 | 84 | 0.623706 | false |
broxtronix/distributed | distributed/diagnostics/progress.py | 1 | 9766 | from __future__ import print_function, division, absolute_import
from collections import defaultdict
import logging
import sys
import threading
import time
from timeit import default_timer
import dask
from toolz import valmap, groupby, concat
from tornado.ioloop import PeriodicCallback, IOLoop
from tornado import gen
from .plugin import SchedulerPlugin
from ..utils import sync, key_split, tokey
logger = logging.getLogger(__name__)
def dependent_keys(keys, who_has, processing, stacks, dependencies, exceptions,
complete=False):
""" All keys that need to compute for these keys to finish """
out = set()
errors = set()
stack = list(keys)
while stack:
key = stack.pop()
if key in out:
continue
if not complete and (who_has.get(key) or
key in processing or
key in stacks):
continue
if key in exceptions:
errors.add(key)
if not complete:
continue
out.add(key)
stack.extend(dependencies.get(key, []))
return out, errors
class Progress(SchedulerPlugin):
""" Tracks progress of a set of keys or futures
On creation we provide a set of keys or futures that interest us as well as
a scheduler. We traverse through the scheduler's dependencies to find all
relevant keys on which our keys depend. We then plug into the scheduler to
learn when our keys become available in memory at which point we record
their completion.
State
-----
keys: set
Set of keys that are not yet computed
all_keys: set
Set of all keys that we track
This class performs no visualization. However it is used by other classes,
notably TextProgressBar and ProgressWidget, which do perform visualization.
"""
def __init__(self, keys, scheduler, minimum=0, dt=0.1, complete=False):
self.keys = {k.key if hasattr(k, 'key') else k for k in keys}
self.keys = {tokey(k) for k in self.keys}
self.scheduler = scheduler
self.complete = complete
self._minimum = minimum
self._dt = dt
self.last_duration = 0
self._start_time = default_timer()
self._running = False
self.status = None
@gen.coroutine
def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.task_state):
yield gen.sleep(0.05)
self.keys = None
self.scheduler.add_plugin(self) # subtle race condition here
self.all_keys, errors = dependent_keys(keys, self.scheduler.who_has,
self.scheduler.processing, self.scheduler.stacks,
self.scheduler.dependencies, self.scheduler.exceptions,
complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(keys, self.scheduler.who_has,
self.scheduler.processing, self.scheduler.stacks,
self.scheduler.dependencies, self.scheduler.exceptions,
complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
logger.debug("Set up Progress keys")
for k in errors:
self.transition(k, None, 'erred', exception=True)
def transition(self, key, start, finish, *args, **kwargs):
if key in self.keys and start == 'processing' and finish == 'memory':
logger.debug("Progress sees key %s", key)
self.keys.remove(key)
if not self.keys:
self.stop()
if key in self.all_keys and finish == 'erred':
logger.debug("Progress sees task erred")
self.stop(exception=kwargs['exception'], key=key)
if key in self.keys and finish == 'forgotten':
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True)
def restart(self, scheduler):
self.stop()
def stop(self, exception=None, key=None):
if self in self.scheduler.plugins:
self.scheduler.plugins.remove(self)
if exception:
self.status = 'error'
else:
self.status = 'finished'
logger.debug("Remove Progress plugin")
class MultiProgress(Progress):
""" Progress variant that keeps track of different groups of keys
See Progress for most details. This only adds a function ``func=``
that splits keys. This defaults to ``key_split`` which aligns with naming
conventions chosen in the dask project (tuples, hyphens, etc..)
State
-----
keys: dict
Maps group name to set of not-yet-complete keys for that group
all_keys: dict
Maps group name to set of all keys for that group
Examples
--------
>>> split = lambda s: s.split('-')[0]
>>> p = MultiProgress(['y-2'], func=split) # doctest: +SKIP
>>> p.keys # doctest: +SKIP
{'x': {'x-1', 'x-2', 'x-3'},
'y': {'y-1', 'y-2'}}
"""
def __init__(self, keys, scheduler=None, func=key_split, minimum=0, dt=0.1,
complete=False):
self.func = func
Progress.__init__(self, keys, scheduler, minimum=minimum, dt=dt,
complete=complete)
@gen.coroutine
def setup(self):
keys = self.keys
while not keys.issubset(self.scheduler.tasks):
yield gen.sleep(0.05)
self.keys = None
self.scheduler.add_plugin(self) # subtle race condition here
self.all_keys, errors = dependent_keys(keys, self.scheduler.who_has,
self.scheduler.processing, self.scheduler.stacks,
self.scheduler.dependencies, self.scheduler.exceptions,
complete=self.complete)
if not self.complete:
self.keys = self.all_keys.copy()
else:
self.keys, _ = dependent_keys(keys, self.scheduler.who_has,
self.scheduler.processing, self.scheduler.stacks,
self.scheduler.dependencies, self.scheduler.exceptions,
complete=False)
self.all_keys.update(keys)
self.keys |= errors & self.all_keys
if not self.keys:
self.stop(exception=None, key=None)
# Group keys by func name
self.keys = valmap(set, groupby(self.func, self.keys))
self.all_keys = valmap(set, groupby(self.func, self.all_keys))
for k in self.all_keys:
if k not in self.keys:
self.keys[k] = set()
for k in errors:
self.transition(k, None, 'erred', exception=True)
logger.debug("Set up Progress keys")
def transition(self, key, start, finish, *args, **kwargs):
if start == 'processing' and finish == 'memory':
s = self.keys.get(self.func(key), None)
if s and key in s:
s.remove(key)
if not self.keys or not any(self.keys.values()):
self.stop()
if finish == 'erred':
logger.debug("Progress sees task erred")
k = self.func(key)
if (k in self.all_keys and key in self.all_keys[k]):
self.stop(exception=kwargs.get('exception'), key=key)
if finish == 'forgotten':
k = self.func(key)
if k in self.all_keys and key in self.all_keys[k]:
logger.debug("A task was cancelled (%s), stopping progress", key)
self.stop(exception=True)
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
>>> format_time(100000.4)
'27hr 46min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return '{0:2.0f}hr {1:2.0f}min {2:4.1f}s'.format(h, m, s)
elif m:
return '{0:2.0f}min {1:4.1f}s'.format(m, s)
else:
return '{0:4.1f}s'.format(s)
class AllProgress(SchedulerPlugin):
""" Keep track of all keys, grouped by key_split """
def __init__(self, scheduler):
self.all = defaultdict(set)
self.nbytes = defaultdict(lambda: 0)
self.state = defaultdict(lambda: defaultdict(set))
self.scheduler = scheduler
for key, state in self.scheduler.task_state.items():
k = key_split(key)
self.all[k].add(key)
self.state[state][k].add(key)
if key in self.scheduler.nbytes:
self.nbytes[k] += self.scheduler.nbytes[key]
scheduler.add_plugin(self)
def transition(self, key, start, finish, *args, **kwargs):
k = key_split(key)
self.all[k].add(key)
try:
self.state[start][k].remove(key)
except KeyError: # TODO: remove me once we have a new or clean state
pass
if finish != 'forgotten':
self.state[finish][k].add(key)
else:
self.all[k].remove(key)
if not self.all[k]:
del self.all[k]
try:
del self.nbytes[k]
except KeyError:
pass
for v in self.state.values():
try:
del v[k]
except KeyError:
pass
if start == 'memory':
self.nbytes[k] -= self.scheduler.nbytes[key]
if finish == 'memory':
self.nbytes[k] += self.scheduler.nbytes[key]
def restart(self, scheduler):
self.all.clear()
self.state.clear()
| bsd-3-clause | -4,504,122,622,001,199,000 | 32.331058 | 81 | 0.574135 | false |
iojancode/botija | plug.livolo.py | 1 | 1887 | import time
import sys
import RPi.GPIO as GPIO
off = '1242424352424342424242424242425342524342'
b0 = '12424243524243424242424242424242424242424242'
b1 = '124242435242434242424242424242534242424242'
b2 = '1242424352424342424242424242425353424242'
b3 = '124242435242434242424242424242424253424242'
b4 = '124242435242434242424242424242524342424242'
b5 = '124242435242434242424242424242425342424242'
b6 = '1242424352424342424242424242425342534242'
b7 = '124242435242434242424242424242424242534242'
b8 = '124242435242434242424242424242524243424242'
b9 = '124242435242434242424242424242425243424242'
if sys.argv[1:] == 'off':
NUM_ATTEMPTS = 1300
else:
NUM_ATTEMPTS = 170
TRANSMIT_PIN = 17
def transmit_code(code):
'''Transmit a chosen code string using the GPIO transmitter'''
GPIO.setmode(GPIO.BCM)
GPIO.setup(TRANSMIT_PIN, GPIO.OUT)
for t in range(NUM_ATTEMPTS):
for i in code:
if i == '1':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00055);
GPIO.output(TRANSMIT_PIN, 0)
elif i == '2':
GPIO.output(TRANSMIT_PIN, 0)
time.sleep(.00011);
GPIO.output(TRANSMIT_PIN, 1)
elif i == '3':
GPIO.output(TRANSMIT_PIN, 0)
time.sleep(.000303);
GPIO.output(TRANSMIT_PIN, 1)
elif i == '4':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00011);
GPIO.output(TRANSMIT_PIN, 0)
elif i == '5':
GPIO.output(TRANSMIT_PIN, 1)
time.sleep(.00029);
GPIO.output(TRANSMIT_PIN, 0)
else:
continue
GPIO.output(TRANSMIT_PIN, 0)
GPIO.cleanup()
if __name__ == '__main__':
for argument in sys.argv[1:]:
exec('transmit_code(' + str(argument) + ')') | gpl-3.0 | -279,637,103,844,104,030 | 32.122807 | 66 | 0.606253 | false |
flipdazed/SoftwareDevelopment | common.py | 1 | 10121 | #!/usr/bin/env python
# encoding: utf-8
# contains the common actions
import random
from logs import *
class Card(object):
"""Creates the card objects used in game"""
def __init__(self, name, attack, money, cost, name_padding=15, num_padding=2):
self.name = name
self.cost = cost
self.attack = attack
self.money = money
self.name_padding = name_padding
self.num_padding = num_padding
self.padded_vals = (
str(self.cost).ljust(self.num_padding),
self.name.ljust(self.name_padding),
str(self.attack).ljust(self.num_padding),
str(self.money).ljust(self.num_padding),
)
def __str__(self):
"""outputs string of the card details when called as print Card()"""
s_out = "Cost: {0} ~ {1} ~ Stats ... Attack: {2}, Money: {3}".format(
*self.padded_vals)
return s_out
def get_attack(self):
return self.attack
def get_money(self):
return self.money
@wrap_all(log_me)
class CommonActions(object):
"""Contains the common actions
used by all game classes
"""
def __init__(self):
# self.art = Art()
pass
def deck_to_hand(self):
"""
Move cards from central.central deck
to active central.central deck
Container is the dictionary within the
class that need to be called with the
getattr()
"""
# For each index in player hand
# Refills player hand from player deck.
# If deck is empty, discard pile is shuffled
# and becomes deck
for i in xrange(0, self.hand_size):
# Shuffle deck computer.pC['hand_size times
# if length of deck = 0
# Will only be done once
if len(self.deck) == 0:
self.logger.debug("Deck length is zero!")
if len(self.discard) == 0:
self.logger.debug("Discard length is also zero!")
self.logger.debug("Exiting the deck_to_hand routine as no more cards.")
return
random.shuffle(self.discard) # Shuffle discard pile
self.logger.debug("shuffled deck")
self.deck = self.discard # Make deck the discard
self.discard = [] # empty the discard pile
self.logger.debug("Moved discard pile to deck. Discard pile set to empty.")
card = self.deck.pop()
self.hand.append(card)
self.logger.debug("Iteration #{}: Drawn {} from deck and added to hand".format(i,card.name))
pass
def print_active_cards(self, title=None, index=False):
"""Display cards in active"""
if title is None: title = "Your Played Cards"
# switch depending on player type
self.logger.debug("Actor is: {}".format(type(self).__name__))
title = self.art.make_title(title)
self.player_logger(title)
self._print_cards(self.active, index=index)
self.player_logger(self.art.underline)
pass
def deck_creator(self, deck_list):
"""Creates the deck from a list of dictionaries
_Input_
list of dicts.
dict contents:
"card" : dict containing all **kwargs for Card()
"count" : number of cards with these settings to create
_Output_
list of Card() types
Expected input example:
[{"count":1, "card":{"name":'Archer', "attack":3, "money":0, "cost":2}},
{"count":2, "card":{"name":'Baker', "attack":0, "money":0, "cost":2}}]
Expected Output example:
[Card('Archer', 3,0,2), Card('Baker', 0,0,2), Card('Baker', 0,0,2)]
"""
deck = [] # get deck ready
for card in deck_list:
for _ in xrange(card["count"]):
# passes the dictionary as a keyword arg (**kwarg)
deck.append(Card(
name_padding=self.parent.max_card_name_len,
num_padding=2,
**card["params"]
))
self.logger.debug("Created {}x{}".format(card["count"], card["params"]["name"]))
return deck
def _print_cards(self, cards, index=False):
"""Prints out the cards provided"""
# max card name length
if len(cards) == 0:
self.logger.game(self.art.index_buffer+ \
"Nothing interesting to see here...")
else:
for i, card in enumerate(cards):
num_str = "[{}] ".format(i) if index else self.art.index_buffer
self.logger.game(num_str + "{}".format(card))
pass
@wrap_all(log_me)
class CommonUserActions(object):
"""Contains actions for user and computer"""
def __init__(self):
pass
def newgame(self):
# revert to initial state
for attr, val in self.init.iteritems():
setattr(self, attr, val)
self.active = []
self.hand = []
self.discard = []
self.deck = self.deck_creator(self.deck_settings)
pass
def end_turn(self):
"""Ends the turn of the user"""
self.logger.debug("Ending Turn: {}".format(self.name))
# If player has cards in the hand add to discard pile
self.discard_hand()
# If there cards in active deck
# then move all cards from active to discard
self.discard_active_cards()
# Move cards from deck to hand
self.deck_to_hand()
pass
def play_all_cards(self):
"""transfer all cards from hand to active
add values in hand to current totals
should only be used by User and Computer
"""
for i in xrange(0, len(self.hand)):
card = self.hand.pop()
self.active.append(card)
self.logger.debug("Iteration #{}: Drawn {} from deck and added to active deck".format(i,card.name))
self.__add_values_to_total(card)
pass
def play_a_card(self, card_number):
"""plays a specific card...
Transfer card to active
add values in hand to current totals
"""
i=0
card_number = int(card_number)
# Transfer card to active
# add values in hand to current totals
card = self.hand.pop(card_number)
self.active.append(card)
self.logger.debug("Iteration #{}: Drawn {} from deck and added to active deck".format(i,card.name))
self.__add_values_to_total(card)
pass
def __add_values_to_total(self, card):
"""Adds money and attack to total"""
money_i = card.get_money()
attack_i = card.get_attack()
self.logger.debug("Money:{}+{} Attack:{}+{}".format(self.money, money_i, self.attack, attack_i))
self.money += money_i
self.attack += attack_i
pass
def discard_hand(self):
"""If there are cards in the hand add to discard pile"""
if (len(self.hand) > 0 ):
# Iterate through all cards in player hand
for i in xrange(0, len(self.hand)):
card = self.hand.pop()
self.logger.debug("Iteration #{}: Moving {} from hand and added to discard pile".format(i, card.name))
self.discard.append(card)
else:
self.logger.debug("Hand length is zero. No cards to discard.")
pass
def discard_active_cards(self):
"""If there cards in PC active deck
then move all cards from active to discard"""
if (len(self.active) > 0 ):
for i in xrange(0, len(self.active)):
card = self.active.pop()
self.logger.debug("Iteration #{}: Moving {} from hand and added to discard pile".format(i, card.name))
self.discard.append(card)
else:
self.logger.debug("Active Deck length is zero. No cards to discard.")
pass
def display_values(self, attack=None, money=None):
""" Display player values"""
# allows forced values
if attack is None: attack = self.attack
if money is None: money = self.money
padded_name = self.name.ljust(self.parent.max_player_name_len)
out_str = "{} Values :: ".format(padded_name)
out_str += " Attack: {} Money: {}".format(
attack, money)
self.player_logger("")
self.player_logger(out_str)
self.player_logger("")
pass
def show_health(self):
"""Shows players' health"""
# creates an attribute based on the class
padded_name = self.name.ljust(self.parent.max_player_name_len)
out_str = "{} Health : ".format(padded_name)
out_str += "{}".format(self.health)
self.player_logger(out_str)
pass
def attack_player(self, other_player):
""" Attack another player
other_player expected input is a class
that corresponds to another sibling player
an example of this from self = game.User() would be:
self.attack(self.parent.computer)
which would attack the computer form the player
"""
self.logger.debug("{0} Attacking {1} with strength {2}".format(self.name, other_player.name, self.attack))
self.logger.debug("{0} Health before attack: {1}".format(other_player.name, other_player.health))
other_player.health -= self.attack
self.attack = 0
self.logger.debug("{0} Attack: {1}".format(self.name, self.attack))
pass
def reset_vals(self):
"""resets money and attack"""
self.logger.debug("Money and Attack set to 0 for {}".format(self.name))
self.money = 0
self.attack = 0
pass | gpl-3.0 | 8,199,571,315,852,250,000 | 35.021352 | 118 | 0.545993 | false |
tnagorra/nspell | lib/misc.py | 1 | 2396 | # cython: language_level=3
import re
class Mreplace:
def __init__(self, mydict):
self._mydict = mydict
self._rx = re.compile('|'.join(map(re.escape, self._mydict)))
def replace(self, text):
return self._rx.sub(lambda x: self._mydict[x.group(0)], text)
class Mmatch:
def __init__(self, mylist):
self._rx = re.compile('|'.join(mylist))
def match(self, text):
return self._rx.match(text)
_matches = {
'.*[?{}(),/\\"\';+=_*&^$#@!~`|\[\]]+.*',
'.*[a-zA-Z0-9]+.*',
'[-+]?[०-९]+(\.[०-९]+)?'
}
_validator = Mmatch(_matches)
def valid(mystr):
return not _validator.match(mystr)
_replacements = {
'':'',
'-':'-',
'—':'-',
'–':'-',
' :':' : ',
'।':' । ',
'’':' " ',
'‘':' " ',
'“':' " ',
'”':' " ',
'"':' " ',
"'":' " ',
'?':' ? ',
'!':' ! ',
',':' , ',
'/':' / ',
'÷':' ÷ ',
'…':' … ',
'{':' { ',
'}':' } ',
'[':' [ ',
']':' ] ',
'(':' ( ',
')':' ) ',
'=': ' = ',
'***': ' ',
'**':' ',
'*':' ',
'~': ' ',
'`': ' ',
'#': ' ',
'...': ' ... ',
'..': ' ... ',
'.': ' . '
}
_tokenizer = Mreplace(_replacements)
def tokenize(mystr):
return _tokenizer.replace(mystr).split()
# FIXME tokenizer to split the non-valid words
# TODO Use regex to match for 'ं'
# Dictionary of characters that have similar phonics, normalized words
# will have zero edit distance if they differ in only _phonics
_phonics = {
'ा':'आ',
'ो':'ओ',
'ी':'इ',
'ि':'इ',
'ई':'इ',
'ू':'उ',
'ु':'उ',
'ऊ':'उ',
'े':'ए',
'्':'',
'श':'स',
'ष':'स',
'व':'ब',
'':'', # Contains a non-joiner
}
_normalizer = Mreplace(_phonics)
# Normalize word (
def normalize(word):
return _normalizer.replace(word)
_dependent = {
'ँ':'',
'ं':'',
'ः':'',
'ा':'',
'ि':'',
'ी':'',
'ु':'',
'ू':'',
'ृ':'',
'े':'',
'ै':'',
'ो':'',
'ौ':'',
'्':'',
'':'',
}
_len = Mreplace(_dependent)
def length(mystr):
x = _len.replace(mystr)
return (len(x),x)
| gpl-3.0 | 3,902,317,311,436,835,300 | 16.40458 | 70 | 0.348246 | false |
dkkline/CanSat14-15 | feeder/feeder.py | 1 | 3082 | """
Contains a tornado-based WebSocket server in charge of supplying
connected clients with live or replay data.
"""
import tornado.ioloop
import tornado.web
import tornado.websocket
from collections import deque
from pprint import pprint
import json
from .config import CACHE_SIZE, PORT, FREQUENCY
from groundstation.config import COM_FILE, BIND_ADDRESS
from groundstation.parse import parse_line
from groundstation.exceptions import InvalidLine
from groundstation.utilities import Buffer
com_handle = open(COM_FILE, "r")
buf = Buffer(com_handle)
clients = []
cache = deque(maxlen=CACHE_SIZE)
class BaseWebSocket(tornado.websocket.WebSocketHandler):
"""
A base class for all WebSocket interfaces.
"""
def check_origin(self, origin):
return True # All clients are welcome
class LiveDataWebSocket(BaseWebSocket):
"""
Serves clients connected to the live endpoint with live data.
"""
def open(self):
"""
Called when a client opens the connection.
"""
clients.append(self)
print("A client has opened a connection.")
for data_point in cache:
self.write_message(data_point)
def on_close(self):
"""
Called when a client closes the connection.
"""
clients.remove(self)
print("A client closed its connection.")
def on_message(self, message):
"""
Called when a client sends a message.
"""
print("[WARNNING] Got message: {}".format(message))
class ReplayWebSocket(BaseWebSocket):
"""
Serves clients connected to the replay endpoint.
"""
def broadcast(message):
"""
Broadcasts a message to all the connected clients.
"""
for client in clients:
client.write_message(message)
def get_data():
"""
Called by the ioloop to get data from the listener.
"""
try:
data = parse_line(buf)
except InvalidLine:
return
rel_data = {
"NTC": data["Temp_NTC"],
"Pressure": data["Press"],
"Height": data["Height"],
"Gyroscope": data["GyrZ"] / 360 * 60, # RPM
"Latitude": data["Lat"],
"Longitude": data["Long"]
}
# import random
# if random.randint(0, 10) == 5:
# rel_data["Latitude"] = float(random.randint(0, 10))
# rel_data["Longitude"] = float(random.randint(0, 10))
# rel_data["Height"] = float(random.randint(0, 1500))
pprint(rel_data)
post_data(rel_data)
# print(line, end="")
def post_data(data):
"""
Called by ``get_data``.
Sends ``data`` to the connected clients.
"""
json_data = json.dumps(data)
broadcast(json_data)
app = tornado.web.Application([
(r"/live", LiveDataWebSocket),
(r"/replay", ReplayWebSocket)
])
if __name__ == '__main__':
app.listen(PORT, BIND_ADDRESS)
loop = tornado.ioloop.IOLoop.instance()
getter = tornado.ioloop.PeriodicCallback(get_data, FREQUENCY,
io_loop=loop)
getter.start()
loop.start()
| mit | -8,494,810,209,948,938,000 | 21.661765 | 65 | 0.61843 | false |
panthorstudios/Gold-Rush | oldgame.py | 1 | 10225 | from random import random
from random import randint
import pygame
from pygame.locals import *
from miner import Miner
from explosion import Explosion
class Game(object):
TITLE = "Gold Rush!"
BOARD_LEFT = 20
BOARD_TOP = 130
SQUARE_SIZE = 32
BLACK = (0,0,0)
GREEN=(128,255,128)
YELLOW=(255,255,128)
RED=(255,128,128)
FRAMES_PER_SECOND = 30
ASSAY_X = 540
ASSAY_Y = 84
CHARGES_X = 180
CASH_X = 20
CASH_OFFSET = 30
GOLD_X = 16
CHARGES_OFFSET = 32
HEALTH_X =CHARGES_X + 40
TITLE_X = 340
def display_gold(self):
scoretext='%03d' % self.gold
for i in range(len(scoretext)):
num=int(scoretext[i])*24
pos=i*24
self.screen.blit(self.digits,(self.CASH_X+self.CASH_OFFSET+(i*24),20),(num,0,24,35))
def display_charges(self):
scoretext='%02d' % self.charges
for i in range(len(scoretext)):
num=int(scoretext[i])*24
pos=i*24
self.screen.blit(self.digits,(self.CHARGES_X+self.CHARGES_OFFSET+(i*24),20),(num,0,24,35))
def display_cash(self):
scoretext='%05d' % self.cash
for i in range(len(scoretext)):
num=int(scoretext[i])*24
pos=i*24
self.screen.blit(self.digits,(self.CASH_X+self.CASH_OFFSET+(i*24),66),(num,0,24,35))
def display_health(self):
h=int(84*(self.health/100.0))
b=84-h
c=self.GREEN
if self.health<20:
c=self.RED
elif self.health<40:
c=self.YELLOW
self.screen.fill(c,(self.HEALTH_X,70,h,32))
self.screen.fill(self.BLACK,(self.HEALTH_X+h,70,b,32))
# num=int(scoretext[i])*24
# pos=i*24
# self.screen.blit(self.digits,(self.CASH_X+self.CASH_OFFSET+(i*24),66),(num,0,24,35))
def __init__(self):
pygame.mixer.pre_init(44100,-16,2,2048)
pygame.init()
self.screen=pygame.display.set_mode((680,600))
pygame.display.set_caption(self.TITLE)
self.pressedkey=None
self.bellsound=pygame.mixer.Sound('assets/sounds/bell.ogg')
self.chargesound=pygame.mixer.Sound('assets/sounds/bomb.ogg')
self.yeehawsound=pygame.mixer.Sound('assets/sounds/yeehaw.ogg')
self.kachingsound=pygame.mixer.Sound('assets/sounds/kaching.ogg')
self.board=[]
self.bgbase=pygame.image.load('assets/images/background.png')
self.bg=pygame.image.load('assets/images/background.png')
self.digits=pygame.image.load('assets/images/digits.png')
self.gamearea=pygame.Surface(self.bg.get_size())
self.is_playing=False
# currently 2 nugget images
self.nuggets=[]
self.nuggets.append(pygame.image.load('assets/images/gold01-%dpx.png' % self.SQUARE_SIZE))
self.nuggets.append(pygame.image.load('assets/images/gold02-%dpx.png' % self.SQUARE_SIZE))
self.explosion=Explosion(0,0,self.SQUARE_SIZE)
self.explosion_group=pygame.sprite.RenderPlain(self.explosion)
self.miner=Miner(0,0)
self.clock=pygame.time.Clock()
# add title
text=pygame.image.load('assets/images/text_title.png')
self.screen.blit(text,(self.TITLE_X,self.BOARD_LEFT))
# add assay office
self.office=pygame.image.load('assets/images/assayoffice.png')
self.screen.blit(self.office,(self.ASSAY_X+self.BOARD_LEFT,self.ASSAY_Y))
self.cash=0
self.gold=0
self.charges=10
self.health=100
# add "Gold"
text=pygame.image.load('assets/images/nugget.png')
self.screen.blit(text,(self.GOLD_X,self.BOARD_LEFT))
self.display_gold()
# add "Cash"
text=pygame.image.load('assets/images/text_cash.png')
self.screen.blit(text,(self.CASH_X,66))
self.display_cash()
# add "Charges"
text=pygame.image.load('assets/images/dynamite.png')
self.screen.blit(text,(self.CHARGES_X,16))
self.display_charges()
# add "Miner head"
text=pygame.image.load('assets/images/miner_head.png')
self.screen.blit(text,(self.CHARGES_X,66))
self.display_health()
self.setup()
def setup(self):
# initialize score items
self.cash=0
self.gold=0
self.charges=10
# load background image every time
self.bg=pygame.image.load('assets/images/background.png')
#redraw assay office
self.bg.blit(self.office,(self.ASSAY_X,self.ASSAY_Y-self.BOARD_TOP))
self.board=[]
# top row of empty spaces
pathsup=2
self.board.append([' ']*20)
self.board.append(['*']*20)
for y in range(2,14):
row=[]
for x in range(20):
c='*'
if random()<0.4:
# make a hole
self.bg.fill(self.BLACK,(x*self.SQUARE_SIZE,y*self.SQUARE_SIZE,self.SQUARE_SIZE,self.SQUARE_SIZE))
c=' '
if y>1:
c='G'
nugg=self.nuggets[0 if random()<0.5 else 1]
self.bg.blit(nugg,(x*self.SQUARE_SIZE,y*self.SQUARE_SIZE))
row.append(c)
self.board.append(row)
# add soil
self.gamearea.blit(self.bg,(0,0))
pygame.display.flip()
def print_board(self):
for row in self.board:
print ' '.join(row)
def mainloop(self):
deltat=self.clock.tick(self.FRAMES_PER_SECOND)
tx=self.miner.x
ty=self.miner.y
self.miner_group.clear(self.gamearea,self.bg)
self.explosion_group.clear(self.gamearea,self.bg)
pressedspace=False
for event in pygame.event.get():
#print event
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit(0)
elif event.key in (K_RIGHT,K_LEFT,K_UP,K_DOWN):
self.pressedkey= event.key
elif event.key == K_SPACE:
pressedspace = True
elif event.type == KEYUP:
if event.key in (K_RIGHT,K_LEFT,K_UP,K_DOWN):
if self.pressedkey == event.key:
self.pressedkey = None
#elif event.key == K_SPACE:
#pressedspace = False
# only draw explosion if necessary
if self.explosion.update(deltat):
self.explosion_group.update(deltat)
self.explosion_group.draw(self.gamearea)
else:
if pressedspace and self.pressedkey:
# Do explosion
pressedspace=False
bx=self.miner.x
by=self.miner.y
if self.pressedkey == K_LEFT:
bx-=1
if self.pressedkey == K_RIGHT:
bx+=1
if self.pressedkey == K_UP:
by-=1
if self.pressedkey == K_DOWN:
by+=1
if bx>=0 and bx<20 and (by>0 or (by==0 and self.pressedkey == K_DOWN)) and by<20 and self.charges>0:
self.explosion.explode(bx,by)
self.charges-=1
# print "(%d,%d)->(%d,%d) Boom! %d charges left." % (self.miner.x,self.miner.y,bx,by,self.charges)
self.board[by][bx]=' '
self.bg.fill(self.BLACK,(bx*self.SQUARE_SIZE,by*self.SQUARE_SIZE,self.SQUARE_SIZE,self.SQUARE_SIZE))
self.gamearea.blit(self.bg,(0,0))
self.display_charges()
#self.screen.blit(self.digits,(460+(i*24),20),(num,0,24,35))
self.chargesound.play()
for j in range(20):
x=randint(0,19)
y=randint(2,11)
o=self.board[y][x]
a=self.board[y-1][x]
if o==' ' and a=='*':
self.board[y][x]='*'
xpos=x*self.SQUARE_SIZE
ypos=y*self.SQUARE_SIZE
self.bg.blit(self.bgbase,(x*self.SQUARE_SIZE,y*self.SQUARE_SIZE),(xpos,ypos,self.SQUARE_SIZE,self.SQUARE_SIZE))
if self.pressedkey == K_RIGHT and self.miner.can_move():
if tx<19:
tx += 1
if self.pressedkey == K_LEFT and self.miner.can_move():
if tx>0:
tx -= 1
if self.pressedkey == K_UP and self.miner.can_move():
if ty>0:
ty -= 1
else:
if tx==17:
if self.gold!=0:
self.cash+=self.gold*self.charges
self.gold=0
self.kachingsound.play()
self.display_gold()
self.display_cash()
self.yeehawsound.play()
if self.pressedkey == K_DOWN and self.miner.can_move():
if ty<13:
ty += 1
o=self.board[ty][tx]
if (tx!=self.miner.x or ty!=self.miner.y) and o in ' G':
self.miner.set_location(tx,ty)
if o=='G':
self.board[ty][tx]=' '
self.gold += 1
self.bellsound.play()
self.bg.fill(self.BLACK,(self.miner.x*self.SQUARE_SIZE,self.miner.y*self.SQUARE_SIZE,self.SQUARE_SIZE,self.SQUARE_SIZE))
self.gamearea.blit(self.bg,(0,0))
self.display_gold()
self.miner.update_move()
self.miner_group.update(deltat)
self.miner_group.draw(self.gamearea)
if self.miner.y>0:
self.health-=0.25
if self.health<0:
self.health=0
pass
self.display_health()
else:
self.health+=1
if self.health>100:
self.health=100
self.display_health()
self.screen.blit(self.gamearea,(self.BOARD_LEFT,self.BOARD_TOP))
pygame.display.flip()
| mit | 4,087,435,778,622,428,000 | 32.52459 | 140 | 0.522836 | false |
mpatacchiola/dissecting-reinforcement-learning | src/6/multi-armed-bandit/boltzman_agent_bandit.py | 1 | 4551 | #!/usr/bin/env python
# MIT License
# Copyright (c) 2017 Massimiliano Patacchiola
# https://mpatacchiola.github.io/blog/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#Average cumulated reward: 648.0975
#Std Cumulated Reward: 16.1566083616
#Average utility distribution: [ 0.29889418 0.49732589 0.79993241]
#Average utility RMSE: 0.0016711564118
from multi_armed_bandit import MultiArmedBandit
import numpy as np
import random
def return_rmse(predictions, targets):
"""Return the Root Mean Square error between two arrays
@param predictions an array of prediction values
@param targets an array of target values
@return the RMSE
"""
return np.sqrt(((predictions - targets)**2).mean())
def boltzmann(x, temperature):
"""Compute boltzmann distribution of array x.
@param x the input array
@param temperature
@return the boltzmann array
"""
exponent = np.true_divide(x - np.max(x), temperature)
return np.exp(exponent) / np.sum(np.exp(exponent))
def return_boltzmann_action(temperature, reward_counter_array):
"""Return an action using an epsilon greedy strategy
@return the action selected
"""
tot_arms = reward_counter_array.shape[0]
boltzmann_distribution = boltzmann(reward_counter_array, temperature)
return np.random.choice(tot_arms, p=boltzmann_distribution)
def main():
reward_distribution = [0.3, 0.5, 0.8]
my_bandit = MultiArmedBandit(reward_probability_list=reward_distribution)
temperature_start = 0.1
temperature_stop = 0.0001
epsilon = 0.1
tot_arms = 3
tot_episodes = 2000
tot_steps = 1000
print_every_episodes = 100
cumulated_reward_list = list()
average_utility_array = np.zeros(tot_arms)
temperature_array = np.linspace(temperature_start, temperature_stop, num=tot_steps)
print("Starting Boltzmann agent...")
for episode in range(tot_episodes):
cumulated_reward = 0
reward_counter_array = np.zeros(tot_arms)
action_counter_array = np.full(tot_arms, 1.0e-5)
for step in range(tot_steps):
temperature = temperature_array[step]
action = return_boltzmann_action(temperature, np.true_divide(reward_counter_array, action_counter_array))
reward = my_bandit.step(action)
reward_counter_array[action] += reward
action_counter_array[action] += 1
cumulated_reward += reward
# Append the cumulated reward for this episode in a list
cumulated_reward_list.append(cumulated_reward)
utility_array = np.true_divide(reward_counter_array, action_counter_array)
average_utility_array += utility_array
if episode % print_every_episodes == 0:
print("Episode: " + str(episode))
print("Cumulated Reward: " + str(cumulated_reward))
print("Reward counter: " + str(reward_counter_array))
print("Utility distribution: " + str(utility_array))
print("Utility RMSE: " + str(return_rmse(utility_array, reward_distribution)))
print("")
# Print the average cumulated reward for all the episodes
print("Average cumulated reward: " + str(np.mean(cumulated_reward_list)))
print("Std Cumulated Reward: " + str(np.std(cumulated_reward_list)))
print("Average utility distribution: " + str(average_utility_array / tot_episodes))
print("Average utility RMSE: " + str(return_rmse(average_utility_array/tot_episodes, reward_distribution)))
if __name__ == "__main__":
main()
| mit | -6,810,992,298,096,698,000 | 42.342857 | 117 | 0.702044 | false |
oesteban/phantomas | phantomas/utils/shm.py | 1 | 6453 | """
This module contains an implementation of the real, antipodally symmetric
Spherical Harmonics basis as defined in [1]_.
References
----------
.. [1] Descoteaux, Maxime, Elaine Angelino, Shaun Fitzgibbons, and Rachid
Deriche. "Regularized, fast, and robust analytical Q-ball imaging"
Magnetic Resonance in Medicine 58, no. 3 (2007): 497-510
"""
import numpy as np
from scipy.misc import factorial
from scipy.special import lpmv, legendre, sph_harm
import hashlib
def angular_function(j, theta, phi):
"""
Returns the values of the spherical harmonics function at given
positions specified by colatitude and aximuthal angles.
Parameters
----------
j : int
The spherical harmonic index.
theta : array-like, shape (K, )
The colatitude angles.
phi : array-like, shape (K, )
The azimuth angles.
Returns
-------
f : array-like, shape (K, )
The value of the function at given positions.
"""
l = sh_degree(j)
m = sh_order(j)
# We follow here reverse convention about theta and phi w.r.t scipy.
sh = sph_harm(np.abs(m), l, phi, theta)
if m < 0:
return np.sqrt(2) * sh.real
if m == 0:
return sh.real
if m > 0:
return np.sqrt(2) * sh.imag
def spherical_function(j, x, y, z):
"""
Returns the values of the spherical harmonics function at given
positions specified by Cartesian coordinates.
Parameters
----------
x, y, z : array-like, shape (K, )
Cartesian coordinates.
Returns
-------
f : array-like, shape (K, )
The value of the function at given positions.
"""
theta = np.arccos(z)
phi = np.arctan2(y, x)
return angular_function(j, theta, phi)
def dimension(order):
r"""
Returns the dimension, :math:`R`, of the real, antipodally symmetric
spherical harmonics basis for a given truncation order.
Parameters
----------
order : int
The trunction order.
Returns
-------
R : int
The dimension of the truncated spherical harmonics basis.
"""
return (order + 1) * (order + 2) / 2
def j(l, m):
r"""
Returns the flattened spherical harmonics index corresponding to degree
``l`` and order ``m``.
Parameters
----------
l : int
Degree of the spherical harmonics. Should be even.
m : int
Order of the spherical harmonics, should verify :math:`-l \leq m \leq l`
Returns
-------
j : int
The associated index of the spherical harmonic.
"""
if np.abs(m) > l:
raise NameError('SphericalHarmonics.j: m must lie in [-l, l]')
return int(l + m + (2 * np.array(range(0, l, 2)) + 1).sum())
def sh_degree(j):
"""
Returns the degree, ``l``, of the spherical harmonic associated to index
``j``.
Parameters
----------
j : int
The flattened index of the spherical harmonic.
Returns
-------
l : int
The associated even degree.
"""
l = 0
while dimension(l) - 1 < j:
l += 2
return l
def sh_order(j):
"""
Returns the order, ``m``, of the spherical harmonic associated to index
``j``.
Parameters
----------
j : int
The flattened index of the spherical harmonic.
Returns
-------
m : int
The associated order.
"""
l = sh_degree(j)
return j + l + 1 - dimension(l)
class _CachedMatrix():
"""
Returns the spherical harmonics observation matrix.
Parameters
----------
theta : array-like, shape (K, )
The colatitude angles.
phi : array-like, shape (K, )
The azimuth angles.
order : int
The spherical harmonics truncation order.
cache : bool
Whether the result should be cached or not.
Returns
-------
H : array-like, shape (K, R)
The spherical harmonics observation matrix.
"""
def __init__(self):
self._cache = {}
def __call__(self, theta, phi, order=4, cache=True):
if not cache:
return self._eval_matrix(theta, phi, order)
key1 = self._hash(theta)
key2 = self._hash(phi)
if (key1, key2, order) in self._cache:
return self._cache[(key1, key2, order)]
else:
val = self._eval_matrix(theta, phi, order)
self._cache[(key1, key2, order)] = val
return val
def _hash(self, np_array):
return hashlib.sha1(np_array).hexdigest()
def _eval_matrix(self, theta, phi, order):
N = theta.shape[0]
dim_sh = dimension(order)
ls = [l for L in range(0, order + 1, 2) for l in [L] * (2*L + 1)]
ms = [m for L in range(0, order + 1, 2) for m in range(-L, L+1)]
ls = np.asarray(ls, dtype=np.int)[np.newaxis, :]
ms = np.asarray(ms, dtype=np.int)[np.newaxis, :]
sh = sph_harm(np.abs(ms), ls,
phi[:, np.newaxis], theta[:, np.newaxis])
H = np.where(ms > 0, sh.imag, sh.real)
H[:, (ms != 0)[0]] *= np.sqrt(2)
return H
matrix = _CachedMatrix()
def L(order=4):
"""Computees the Laplace-Beltrami operator matrix.
Parameters
----------
order : int
The truncation order (should be an even number).
"""
dim_sh = dimension(order)
L = np.zeros((dim_sh, dim_sh))
for j in range(dim_sh):
l = sh_degree(j)
L[j, j] = - (l * (l + 1))
return L
def P(order=4):
"""Returns the Funk-Radon operator matrix.
Parameters
----------
order : int
The truncation order (should be an even number).
"""
dim_sh = dimension(order)
P = zeros((dim_sh, dim_sh))
for j in range(dim_sh):
l = sh_degree(j)
P[j, j] = 2 * pi * legendre(l)(0)
return P
def convert_to_mrtrix(order):
"""
Returns the linear matrix used to convert coefficients into the mrtrix
convention for spherical harmonics.
Parameters
----------
order : int
Returns
-------
conversion_matrix : array-like, shape (dim_sh, dim_sh)
"""
dim_sh = dimension(order)
conversion_matrix = np.zeros((dim_sh, dim_sh))
for j in range(dim_sh):
l = sh_degree(j)
m = sh_order(j)
if m == 0:
conversion_matrix[j, j] = 1
else:
conversion_matrix[j, j - 2*m] = np.sqrt(2)
return conversion_matrix
| bsd-3-clause | 8,120,573,029,460,389,000 | 23.819231 | 80 | 0.561444 | false |
sassoftware/mint | mint/db/mirror.py | 1 | 5677 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mint.lib import database
class InboundMirrorsTable(database.KeyedTable):
name = 'InboundMirrors'
key = 'inboundMirrorId'
fields = ['inboundMirrorId', 'targetProjectId', 'sourceLabels',
'sourceUrl', 'sourceAuthType', 'sourceUsername',
'sourcePassword', 'sourceEntitlement',
'mirrorOrder', 'allLabels']
def getIdByHostname(self, hostname):
cu = self.db.cursor()
cu.execute("""
SELECT MIN(inboundMirrorId) FROM InboundMirrors
JOIN Projects ON Projects.projectId = InboundMirrors.targetProjectId
WHERE Projects.fqdn = ?
""", hostname)
return cu.fetchone()[0]
class OutboundMirrorsTable(database.KeyedTable):
name = 'OutboundMirrors'
key = 'outboundMirrorId'
fields = ['outboundMirrorId', 'sourceProjectId', 'targetLabels',
'allLabels', 'recurse', 'matchStrings', 'mirrorOrder',
'useReleases',
]
def __init__(self, db, cfg):
self.cfg = cfg
database.KeyedTable.__init__(self, db)
def get(self, *args, **kwargs):
res = database.KeyedTable.get(self, *args, **kwargs)
if 'allLabels' in res:
res['allLabels'] = bool(res['allLabels'])
if 'recurse' in res:
res['recurse'] = bool(res['recurse'])
return res
def delete(self, id):
cu = self.db.transaction()
try:
cu.execute("""DELETE FROM OutboundMirrors WHERE
outboundMirrorId = ?""", id)
# Cleanup mapping table ourselves if we are using SQLite,
# as it doesn't know about contraints.
if self.cfg.dbDriver == 'sqlite':
cu.execute("""DELETE FROM OutboundMirrorsUpdateServices WHERE
outboundMirrorId = ?""", id)
except:
self.db.rollback()
raise
else:
self.db.commit()
return True
def getOutboundMirrors(self):
cu = self.db.cursor()
cu.execute("""SELECT outboundMirrorId, sourceProjectId,
targetLabels, allLabels, recurse,
matchStrings, mirrorOrder, fullSync,
useReleases
FROM OutboundMirrors
ORDER by mirrorOrder""")
return [list(x[:3]) + [bool(x[3]), bool(x[4]), x[5].split(), \
x[6], bool(x[7]), bool(x[8])] \
for x in cu.fetchall()]
class OutboundMirrorsUpdateServicesTable(database.DatabaseTable):
name = "OutboundMirrorsUpdateServices"
fields = [ 'updateServiceId', 'outboundMirrorId' ]
def getOutboundMirrorTargets(self, outboundMirrorId):
cu = self.db.cursor()
cu.execute("""SELECT obus.updateServiceId, us.hostname,
us.mirrorUser, us.mirrorPassword, us.description
FROM OutboundMirrorsUpdateServices obus
JOIN
UpdateServices us
USING(updateServiceId)
WHERE outboundMirrorId = ?""", outboundMirrorId)
return [ list(x[:4]) + [x[4] and x[4] or ''] \
for x in cu.fetchall() ]
def setTargets(self, outboundMirrorId, updateServiceIds):
cu = self.db.transaction()
updates = [ (outboundMirrorId, x) for x in updateServiceIds ]
try:
cu.execute("""DELETE FROM OutboundMirrorsUpdateServices
WHERE outboundMirrorId = ?""", outboundMirrorId)
except:
pass # don't worry if there is nothing to do here
try:
cu.executemany("INSERT INTO OutboundMirrorsUpdateServices VALUES(?,?)",
updates)
except:
self.db.rollback()
raise
else:
self.db.commit()
return updateServiceIds
class UpdateServicesTable(database.KeyedTable):
name = 'UpdateServices'
key = 'updateServiceId'
fields = [ 'updateServiceId', 'hostname',
'mirrorUser', 'mirrorPassword', 'description' ]
def __init__(self, db, cfg):
self.cfg = cfg
database.KeyedTable.__init__(self, db)
def getUpdateServiceList(self):
cu = self.db.cursor()
cu.execute("""SELECT %s FROM UpdateServices""" % ', '.join(self.fields))
return [ list(x) for x in cu.fetchall() ]
def delete(self, id):
cu = self.db.transaction()
try:
cu.execute("""DELETE FROM UpdateServices WHERE
updateServiceId = ?""", id)
# Cleanup mapping table ourselves if we are using SQLite,
# as it doesn't know about contraints.
if self.cfg.dbDriver == 'sqlite':
cu.execute("""DELETE FROM OutboundMirrorsUpdateServices WHERE
updateServiceId = ?""", id)
except:
self.db.rollback()
raise
else:
self.db.commit()
return True
| apache-2.0 | 475,335,617,005,576,450 | 34.93038 | 83 | 0.573014 | false |
ikben/troposphere | examples/Route53_RoundRobin.py | 1 | 1994 | # Converted from Route53_RoundRobin.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
from troposphere import Join
from troposphere import Parameter, Ref, Template
from troposphere.route53 import RecordSet, RecordSetGroup
t = Template()
t.set_description(
"AWS CloudFormation Sample Template Route53_RoundRobin: Sample template "
"showing how to use weighted round robin (WRR) DNS entried via Amazon "
"Route 53. This contrived sample uses weighted CNAME records to "
"illustrate that the weighting influences the return records. It assumes "
" that you already have a Hosted Zone registered with Amazon Route 53. "
"**WARNING** This template creates an Amazon EC2 instance. "
"You will be billed for the AWS resources used if you create "
"a stack from this template.")
hostedzone = t.add_parameter(Parameter(
"HostedZone",
Description="The DNS name of an existing Amazon Route 53 hosted zone",
Type="String",
))
myDNSRecord = t.add_resource(RecordSetGroup(
"myDNSRecord",
HostedZoneName=Join("", [Ref(hostedzone), "."]),
Comment="Contrived example to redirect to aws.amazon.com 75% of the time "
"and www.amazon.com 25% of the time.",
RecordSets=[
RecordSet(
SetIdentifier=Join(" ", [Ref("AWS::StackName"), "AWS"]),
Name=Join("", [Ref("AWS::StackName"), ".", Ref("AWS::Region"), ".",
Ref(hostedzone), "."]),
Type="CNAME",
TTL="900",
ResourceRecords=["aws.amazon.com"],
Weight="3",
),
RecordSet(
SetIdentifier=Join(" ", [Ref("AWS::StackName"), "Amazon"]),
Name=Join("", [Ref("AWS::StackName"), ".", Ref("AWS::Region"), ".",
Ref(hostedzone), "."]),
Type="CNAME",
TTL="900",
ResourceRecords=["www.amazon.com"],
Weight="1",
),
],
))
print(t.to_json())
| bsd-2-clause | -3,320,936,269,870,272,000 | 35.254545 | 79 | 0.61334 | false |
cloudtools/troposphere | troposphere/applicationautoscaling.py | 1 | 3322 | from . import AWSObject, AWSProperty
from .validators import boolean, double, integer, positive_integer
class ScalableTargetAction(AWSProperty):
props = {
"MaxCapacity": (integer, False),
"MinCapacity": (integer, False),
}
class ScheduledAction(AWSProperty):
props = {
"EndTime": (str, False),
"ScalableTargetAction": (ScalableTargetAction, False),
"Schedule": (str, True),
"ScheduledActionName": (str, True),
"StartTime": (str, False),
"Timezone": (str, False),
}
class SuspendedState(AWSProperty):
props = {
"DynamicScalingInSuspended": (boolean, False),
"DynamicScalingOutSuspended": (boolean, False),
"ScheduledScalingSuspended": (boolean, False),
}
class ScalableTarget(AWSObject):
resource_type = "AWS::ApplicationAutoScaling::ScalableTarget"
props = {
"MaxCapacity": (integer, True),
"MinCapacity": (integer, True),
"ResourceId": (str, True),
"RoleARN": (str, True),
"ScalableDimension": (str, True),
"ScheduledActions": ([ScheduledAction], False),
"ServiceNamespace": (str, True),
"SuspendedState": (SuspendedState, False),
}
class StepAdjustment(AWSProperty):
props = {
"MetricIntervalLowerBound": (integer, False),
"MetricIntervalUpperBound": (integer, False),
"ScalingAdjustment": (integer, True),
}
class StepScalingPolicyConfiguration(AWSProperty):
props = {
"AdjustmentType": (str, False),
"Cooldown": (integer, False),
"MetricAggregationType": (str, False),
"MinAdjustmentMagnitude": (integer, False),
"StepAdjustments": ([StepAdjustment], False),
}
class MetricDimension(AWSProperty):
props = {
"Name": (str, True),
"Value": (str, True),
}
class CustomizedMetricSpecification(AWSProperty):
props = {
"Dimensions": ([MetricDimension], False),
"MetricName": (str, False),
"Namespace": (str, False),
"Statistic": (str, False),
"Unit": (str, True),
}
class PredefinedMetricSpecification(AWSProperty):
props = {
"PredefinedMetricType": (str, True),
"ResourceLabel": (str, False),
}
class TargetTrackingScalingPolicyConfiguration(AWSProperty):
props = {
"CustomizedMetricSpecification": (CustomizedMetricSpecification, False),
"DisableScaleIn": (boolean, False),
"PredefinedMetricSpecification": (PredefinedMetricSpecification, False),
"ScaleInCooldown": (positive_integer, False),
"ScaleOutCooldown": (positive_integer, False),
"TargetValue": (double, True),
}
class ScalingPolicy(AWSObject):
resource_type = "AWS::ApplicationAutoScaling::ScalingPolicy"
props = {
"PolicyName": (str, True),
"PolicyType": (str, False),
"ResourceId": (str, False),
"ScalableDimension": (str, False),
"ServiceNamespace": (str, False),
"ScalingTargetId": (str, False),
"StepScalingPolicyConfiguration": (
StepScalingPolicyConfiguration,
False,
),
"TargetTrackingScalingPolicyConfiguration": (
TargetTrackingScalingPolicyConfiguration,
False,
),
}
| bsd-2-clause | -1,866,505,902,557,993,200 | 27.393162 | 80 | 0.617399 | false |
eddiejessup/nex | tests/test_lexer.py | 1 | 4285 | from nex.constants.codes import CatCode
from nex.accessors import Codes
from nex.lexer import Lexer
class DummyCatCodeGetter:
def __init__(self):
self.char_to_cat = Codes.default_initial_cat_codes()
def get(self, char):
return self.char_to_cat[char]
def lex_string_to_tokens(s):
cat_code_getter = DummyCatCodeGetter()
lex = Lexer.from_string(s, get_cat_code_func=cat_code_getter.get)
return list(lex.advance_to_end())
def test_trioing():
"""Check trioing (escaping characters to obtain exotic character codes)."""
test_input = 'abc^^I^^K^^>'
# The code numbers we should return if all is well.
correct_code_nrs = [ord('a'), ord('b'), ord('c'),
ord('\t'), ord('K') - 64, ord('>') + 64]
# Check with various characters, including the usual '^'.
for trio_char in ['^', '1', '@']:
cat_code_getter = DummyCatCodeGetter()
# Set our chosen trioing character to have superscript CatCode, so we
# can use it for trioing (this is a necessary condition to trigger it).
cat_code_getter.char_to_cat[trio_char] = CatCode.superscript
# Input the test string, substituted with the chosen trioing character.
lex = Lexer.from_string(test_input.replace('^', trio_char),
cat_code_getter.get)
tokens = list(lex.advance_to_end())
# Check correct number of tokens were returned
assert len(tokens) == 6
# Check the correct code numbers were returned.
assert [ord(t.value['char']) for t in tokens] == correct_code_nrs
def test_comments():
"""Check comment characters."""
tokens = lex_string_to_tokens(r'hello% say hello')
assert [t.value['char'] for t in tokens] == list('hello')
def test_skipping_blanks():
"""Check multiple spaces are ignored."""
toks_single = lex_string_to_tokens(r'hello m')
toks_triple = lex_string_to_tokens(r'hello m')
# Check same char-cat pairs are returned.
assert ([t.value['char'] for t in toks_single] ==
[t.value['char'] for t in toks_triple])
assert ([t.value['cat'] for t in toks_single] ==
[t.value['cat'] for t in toks_triple])
def test_control_sequence():
"""Check multiple spaces are ignored."""
tokens = lex_string_to_tokens(r'a\howdy\world')
assert len(tokens) == 3
assert tokens[0].value['char'] == 'a'
assert tokens[1].value == 'howdy'
assert tokens[2].value == 'world'
# Check control sequences starting with a non-letter, make single-letter
# control sequences.
tokens_single = lex_string_to_tokens(r'\@a')
assert tokens_single[0].value == '@' and len(tokens_single) == 2
def test_control_sequence_spacing():
"""Check multiple spaces are ignored."""
tokens_close = lex_string_to_tokens(r'\howdy\world')
tokens_spaced = lex_string_to_tokens(r'\howdy \world')
tokens_super_spaced = lex_string_to_tokens(r'\howdy \world')
assert len(tokens_close) == len(tokens_spaced) == len(tokens_super_spaced)
def test_new_lines():
"""Check what happens when entering new-lines."""
# Note that I'm not even sure what the specification says *should* happen
# here.
# Check entering once in the middle of a line makes a space.
tokens = lex_string_to_tokens('a\n')
assert len(tokens) == 2 and tokens[1].value['char'] == ' '
# Check entering a new-line at a line beginning gives a \par.
tokens = lex_string_to_tokens('a\n\n')
assert (len(tokens) == 3 and tokens[1].value['char'] == ' ' and
tokens[2].value == 'par')
# Check entering a new-line when skipping spaces does nothing. Note that a
# space *is* returned, but from the first space after the 'a'.
tokens = lex_string_to_tokens('a \n')
assert len(tokens) == 2 and tokens[1].value['char'] == ' '
def test_tokenise():
"""Check what happens when entering non-lexical tokens."""
s = '@${'
tokens = lex_string_to_tokens(s)
assert len(tokens) == 3
# Tokens should just be wrapped as a lexical token, with the character
# returned by the reader, and category assigned by the state.
for c, t in zip(s, tokens):
assert t.value['char'] == c
assert t.value['cat'] == CatCode.other
| mit | 1,767,900,020,284,159,700 | 37.258929 | 79 | 0.633372 | false |
sportorg/pysport | sportorg/modules/winorient/winorient_server.py | 1 | 2292 | import datetime
from socket import *
from sportorg.utils.time import time_to_hhmmss
"""
Format of WDB data package
- length is 1772 bytes
1) 36b text block at the beginning
2 4132500 0 0 3974600\n
bib - finish_time - disqual_status - 0 - start_time
2) binary part
bytes 128-131 - card number
bytes 136-139 - qty of punches
bytes 144-147 - start in card
bytes 152-155 - finish in card
starting from b172: 8b blocks * 200
- byte 1 control number
- bytes 4-7 punch time
"""
def int_to_time(value):
""" convert value from 1/100 s to time """
today = datetime.datetime.now()
ret = datetime.datetime(
today.year,
today.month,
today.day,
value // 360000 % 24,
(value % 360000) // 6000,
(value % 6000) // 100,
(value % 100) * 10000,
)
return ret
host = 'localhost'
port = 1212
addr = (host, port)
udp_socket = socket(AF_INET, SOCK_DGRAM)
udp_socket.bind(addr)
# main loop
while True:
print('wait data...')
# recvfrom - receiving of data
conn, addr = udp_socket.recvfrom(1772)
print('client addr: ', addr)
print('data: ', conn)
# string = ''
# for i in conn:
# string += str( hex(i)) + '-'
# print(string)
text_array = bytes(conn[0:34]).decode().split()
bib = text_array[0]
result = int_to_time(int(text_array[1]))
status = text_array[2]
start = int_to_time(int(text_array[4]))
byteorder = 'little'
punch_qty = int.from_bytes(conn[136:140], byteorder)
card_start = int_to_time(int.from_bytes(conn[144:148], byteorder))
card_finish = int_to_time(int.from_bytes(conn[152:156], byteorder))
init_offset = 172
punches = []
for i in range(punch_qty):
cp = int.from_bytes(
conn[init_offset + i * 8 : init_offset + i * 8 + 1], byteorder
)
time = int_to_time(
int.from_bytes(
conn[init_offset + i * 8 + 4 : init_offset + i * 8 + 8], byteorder
)
)
punches.append((cp, time_to_hhmmss(time)))
print('bib=' + bib + ' result=' + time_to_hhmmss(result) + ' punches=')
print(punches)
# sendto - responce
udp_socket.sendto(b'message received by the server', addr)
# udp_socket.close()
| gpl-3.0 | -1,973,854,375,864,043,800 | 23.126316 | 82 | 0.585951 | false |
alonsebastian/SocialID | urls.py | 1 | 1921 | from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
(r'^accounts/login/$', 'accounts.views.customLogin'),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
(r'^accounts/register/$', 'accounts.views.register'),
(r'^accounts/confirmation/(?P<activation_key>\S*)/$', 'accounts.views.confirm'),
(r'accounts/changepass/$', 'accounts.views.changePassword'),
(r'^search/$', 'search.views.search'),
(r'^site/modify/$', 'personal_page.views.manage'),
(r'^$', 'static_ish.views.home'),
(r'^who-are-we/$', 'static_ish.views.about'),
(r'^how-it-works/$', 'static_ish.views.how'),
(r'^facebook/login/$', 'facebook.views.login'),
(r'^facebook/authentication_callback/$', 'facebook.views.authentication_callback'),
(r'^accounts/password/reset/$', 'django.contrib.auth.views.password_reset',
{'post_reset_redirect' : '/accounts/password/reset/done/'}),
(r'^accounts/password/reset/done/$', 'django.contrib.auth.views.password_reset_done'),
(r'^accounts/password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm',
{'post_reset_redirect' : '/accounts/password/done/'}),
(r'^accounts/password/done/$', 'django.contrib.auth.views.password_reset_complete'),
#SHOULD BE THE VERY LAST
(r'^(?P<id_>\S*)/$', 'personal_page.views.personal'),
# Examples:
# url(r'^$', 'demo.views.home', name='home'),
# url(r'^demo/', include('demo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| gpl-2.0 | -2,820,156,707,820,315,600 | 41.688889 | 127 | 0.652264 | false |
copelco/Durham-Open-Data-Catalog | fabfile.py | 1 | 9452 | import ConfigParser
import os
import re
import yaml
from argyle import rabbitmq, postgres, nginx, system
from argyle.base import upload_template
from argyle.postgres import create_db_user, create_db
from argyle.supervisor import supervisor_command, upload_supervisor_app_conf
from argyle.system import service_command, start_service, stop_service, restart_service
from fabric.api import cd, env, get, hide, local, put, require, run, settings, sudo, task
from fabric.contrib import files, console, project
# Directory structure
PROJECT_ROOT = os.path.dirname(__file__)
CONF_ROOT = os.path.join(PROJECT_ROOT, 'conf')
SERVER_ROLES = ['app', 'lb', 'db']
env.project = 'OpenDataCatalog'
env.project_user = 'opendata'
env.repo = u'[email protected]:copelco/Durham-Open-Data-Catalog.git'
env.shell = '/bin/bash -c'
env.disable_known_hosts = True
env.port = 22
env.forward_agent = True
# Additional settings for argyle
env.ARGYLE_TEMPLATE_DIRS = (
os.path.join(CONF_ROOT, 'templates')
)
@task
def vagrant():
env.environment = 'production'
env.hosts = ['127.0.0.1']
env.port = 2222
env.branch = 'master'
env.server_name = 'dev.example.com'
setup_path()
@task
def production():
env.environment = 'production'
env.hosts = ['50.17.226.149']
env.branch = 'master'
env.server_name = 'opendatadurham.org'
setup_path()
def setup_path():
env.home = '/home/%(project_user)s/' % env
env.root = os.path.join(env.home, 'www', env.environment)
env.code_root = os.path.join(env.root, env.project)
env.project_root = os.path.join(env.code_root, env.project)
env.virtualenv_root = os.path.join(env.root, 'env')
env.log_dir = os.path.join(env.root, 'log')
env.db = '%s_%s' % (env.project, env.environment)
env.vhost = '%s_%s' % (env.project, env.environment)
env.settings = 'local_settings'
load_secrets()
def load_secrets():
stream = open(os.path.join(PROJECT_ROOT, "secrets.yaml"), 'r')
secrets = yaml.load(stream)
env_secrets = secrets[env.environment]
for key, val in env_secrets.iteritems():
setattr(env, key, val)
@task
def salt_reload():
sudo('salt-call --local state.highstate -l debug')
def know_github():
"""Make sure github.com is in the server's ssh known_hosts file"""
KEYLINE = "|1|t0+3ewjYdZOrDwi/LvvAw/UiGEs=|8TzF6lRm2rdxaXDcByTBWbUIbic= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ=="
files.append("/etc/ssh/ssh_known_hosts", KEYLINE, use_sudo=True)
@task
def setup_server(*roles):
"""Install packages and add configurations for server given roles."""
require('environment')
roles = list(roles)
if not roles:
abort("setup_server requires one or more server roles, e.g. setup_server:app or setup_server:all")
if roles == ['all', ]:
roles = SERVER_ROLES
if 'base' not in roles:
roles.insert(0, 'base')
if 'app' in roles:
# Create project directories and install Python requirements
project_run('mkdir -p %(root)s' % env)
project_run('mkdir -p %(log_dir)s' % env)
# FIXME: update to SSH as normal user and use sudo
# we ssh as the project_user here to maintain ssh agent
# forwarding, because it doesn't work with sudo. read:
# http://serverfault.com/questions/107187/sudo-su-username-while-keeping-ssh-key-forwarding
with settings(user=env.project_user):
if not files.exists(env.code_root):
run('git clone --quiet %(repo)s %(code_root)s' % env)
with cd(env.code_root):
run('git checkout %(branch)s' % env)
run('git pull')
# Install and create virtualenv
# TODO: pip is installed by salt, should not need to test for it here
# TODO: we should make sure we install virtualenv as well
with settings(hide('everything'), warn_only=True):
test_for_pip = run('which pip')
if not test_for_pip:
sudo("easy_install -U pip")
with settings(hide('everything'), warn_only=True):
test_for_virtualenv = run('which virtualenv')
if not test_for_virtualenv:
sudo("pip install -U virtualenv")
if not files.exists(env.virtualenv_root):
project_run('virtualenv -p python2.7 --quiet --clear --distribute %s' % env.virtualenv_root)
# TODO: Why do we need this next part?
path_file = os.path.join(env.virtualenv_root, 'lib', 'python2.7', 'site-packages', 'project.pth')
files.append(path_file, env.project_root, use_sudo=True)
sudo('chown %s:%s %s' % (env.project_user, env.project_user, path_file))
update_requirements()
upload_local_settings()
syncdb()
upload_supervisor_app_conf(app_name=u'gunicorn')
upload_supervisor_app_conf(app_name=u'group')
# Restart services to pickup changes
supervisor_command('reload')
supervisor_command('stop %(environment)s:*' % env)
supervisor_command('start %(environment)s:*' % env)
if 'lb' in roles:
nginx.remove_default_site()
nginx.upload_nginx_site_conf(site_name=u'%(project)s-%(environment)s.conf' % env)
def project_run(cmd):
""" Uses sudo to allow developer to run commands as project user."""
sudo(cmd, user=env.project_user)
@task
def update_requirements():
"""Update required Python libraries."""
require('environment')
project_run(u'HOME=%(home)s %(virtualenv)s/bin/pip install --quiet --use-mirrors -r %(requirements)s' % {
'virtualenv': env.virtualenv_root,
'requirements': os.path.join(env.code_root, 'requirements', 'base.txt'),
'home': env.home,
})
@task
def manage_run(command):
"""Run a Django management command on the remote server."""
require('environment')
manage_base = u"%(virtualenv_root)s/bin/django-admin.py " % env
if '--settings' not in command:
command = u"%s --settings=%s" % (command, env.settings)
project_run(u'%s %s' % (manage_base, command))
@task
def manage_shell():
"""Drop into the remote Django shell."""
manage_run("shell")
@task
def syncdb():
"""Run syncdb and South migrations."""
manage_run('syncdb --noinput')
manage_run('migrate --noinput')
@task
def collectstatic():
"""Collect static files."""
manage_run('collectstatic --noinput')
def match_changes(changes, match):
pattern = re.compile(match)
return pattern.search(changes) is not None
@task
def deploy(branch=None):
"""Deploy to a given environment."""
require('environment')
if branch is not None:
env.branch = branch
requirements = False
migrations = False
# Fetch latest changes
with cd(env.code_root):
with settings(user=env.project_user):
run('git fetch origin')
# Look for new requirements or migrations
changes = run("git diff origin/%(branch)s --stat-name-width=9999" % env)
requirements = match_changes(changes, r"requirements/")
migrations = match_changes(changes, r"/migrations/")
if requirements or migrations:
supervisor_command('stop %(environment)s:*' % env)
with settings(user=env.project_user):
run("git reset --hard origin/%(branch)s" % env)
upload_local_settings()
if requirements:
update_requirements()
# New requirements might need new tables/migrations
syncdb()
elif migrations:
syncdb()
collectstatic()
supervisor_command('restart %(environment)s:*' % env)
@task
def upload_local_settings():
"""Upload local_settings.py template to server."""
require('environment')
dest = os.path.join(env.project_root, 'local_settings.py')
upload_template('django/local_settings.py', dest, use_sudo=True)
with settings(warn_only=True):
sudo('chown %s:%s %s' % (env.project_user, env.project_user, dest))
@task
def get_db_dump(clean=True):
"""Get db dump of remote enviroment."""
require('environment')
dump_file = '%(environment)s.sql' % env
temp_file = os.path.join(env.home, dump_file)
flags = '-Ox'
if clean:
flags += 'c'
sudo('pg_dump %s %s > %s' % (flags, env.db, temp_file), user=env.project_user)
get(temp_file, dump_file)
@task
def load_db_dump(dump_file):
"""Load db dump on a remote environment."""
require('environment')
temp_file = os.path.join(env.home, '%(environment)s.sql' % env)
put(dump_file, temp_file, use_sudo=True)
sudo('psql -d %s -f %s' % (env.db, temp_file), user=env.project_user)
@task
def salt_bootstrap():
salt_base = os.path.join(PROJECT_ROOT, "salt/")
minion_file = os.path.join(salt_base, "minion")
put(minion_file, "/etc/salt/minion", use_sudo=True)
salt_root = os.path.join(salt_base, 'roots/')
project.rsync_project(local_dir=salt_root, remote_dir="/tmp/salt",
delete=True)
sudo('rm -rf /srv/*')
sudo('mv /tmp/salt/* /srv/')
sudo('chown root:root -R /srv/')
sudo('salt-call --local state.highstate -l debug')
| mit | -3,924,229,454,974,821,400 | 34.400749 | 457 | 0.653407 | false |
googleapis/python-compute | google/cloud/compute_v1/services/target_pools/pagers.py | 1 | 5656 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetPoolAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetPoolAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetPoolAggregatedList],
request: compute.AggregatedListTargetPoolsRequest,
response: compute.TargetPoolAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListTargetPoolsRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetPoolAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListTargetPoolsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetPoolAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.TargetPoolsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.TargetPoolsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetPoolList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetPoolList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetPoolList],
request: compute.ListTargetPoolsRequest,
response: compute.TargetPoolList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListTargetPoolsRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetPoolList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListTargetPoolsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetPoolList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.TargetPool]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | -9,127,447,600,097,576,000 | 35.727273 | 85 | 0.650813 | false |
keyme/visual-diff | gui.py | 1 | 4290 | #!/usr/bin/python3
from functools import partial
from math import ceil
import tkinter as tk
from zoom_map import ZoomMap
class _Context(tk.Text):
CONTEXT_COUNT = 3 # Lines to display before/after the current one
# TODO: What about files with over 99,999 lines?
LINE_NUMBER_WIDTH = 5 # Number of characters to allocate for line numbers
PRELUDE_WIDTH = LINE_NUMBER_WIDTH + 2 # Line number, colon, space
# TODO: What about files with very long lines? They currently wrap around to
# the next line and push later context out of the widget. Should we truncate
# them instead? and if so, should we change which part gets cut based on the
# location of the token within the line?
TEXT_WIDTH = 80
def __init__(self, tk_parent, data, zoom_map):
height = 2 * self.CONTEXT_COUNT + 1
width = self.PRELUDE_WIDTH + self.TEXT_WIDTH
super().__init__(tk_parent, width=width, height=height,
state=tk.DISABLED, font="TkFixedFont")
self.pack()
# TODO: Use a NamedTuple?
self._tokens, self._lines, self._boundaries = data
self._zoom_map = zoom_map
def display(self, pixel):
# The zoom level is equivalent to the number of tokens described by the
# current pixel in the map.
zoom_level = self._zoom_map.zoom_level
first_token_index = int(pixel * zoom_level)
last_token_index = min(first_token_index + ceil(zoom_level),
len(self._boundaries)) - 1
if not (0 <= first_token_index < len(self._boundaries)):
# TODO: Restrict panning so that we can't go outside the image.
return # We're out of range of the image. Skip it.
line_number = self._boundaries[first_token_index][0][0]
# Recall that line_number comes from the token module, which starts
# counting at 1 instead of 0.
start = line_number - self.CONTEXT_COUNT - 1
end = line_number + self.CONTEXT_COUNT
lines = ["{:>{}}: {}".format(i + 1, self.LINE_NUMBER_WIDTH,
self._lines[i])
if 0 <= i < len(self._lines) else ""
for i in range(start, end)]
text = "\n".join(lines)
# Update the displayed code
self.configure(state=tk.NORMAL)
self.delete("1.0", tk.END)
self.insert(tk.INSERT, text)
# Highlight the tokens of interest...
(ar, ac) = self._boundaries[first_token_index][0]
(br, bc) = self._boundaries[last_token_index][1]
self.tag_add("token",
"{}.{}".format(self.CONTEXT_COUNT + 1,
ac + self.PRELUDE_WIDTH),
"{}.{}".format(self.CONTEXT_COUNT + 1 + br - ar,
bc + self.PRELUDE_WIDTH))
self.tag_config("token", background="yellow")
# ...but don't highlight the line numbers on multi-line tokens.
for i in range(self.CONTEXT_COUNT):
line = i + self.CONTEXT_COUNT + 2
self.tag_remove("token",
"{}.{}".format(line, 0),
"{}.{}".format(line, self.PRELUDE_WIDTH))
# Remember to disable editing again when we're done, so users can't
# modify the code we're displaying!
self.configure(state=tk.DISABLED)
class _Gui(tk.Frame):
def __init__(self, matrix, data_a, data_b, root):
super().__init__(root)
self.pack(fill=tk.BOTH, expand="true")
self._map = ZoomMap(self, matrix)
self._contexts = [_Context(self, data, self._map)
for data in (data_a, data_b)]
[self._map.bind(event, self._on_motion)
for event in ["<Motion>", "<Enter>"]]
def _on_motion(self, event):
# We're using (row, col) format, so the first one changes with Y.
self._contexts[0].display(self._map.canvasy(event.y))
self._contexts[1].display(self._map.canvasx(event.x))
def launch(matrix, data_a, data_b):
root = tk.Tk()
def _quit(event):
root.destroy()
[root.bind("<Control-{}>".format(char), _quit) for char in "wWqQ"]
gui = _Gui(matrix, data_a, data_b, root)
root.mainloop()
| gpl-3.0 | -6,514,295,874,511,223,000 | 39.471698 | 80 | 0.571329 | false |
salexkidd/restframework-definable-serializer | definable_serializer/tests/for_test/migrations/0001_initial.py | 1 | 1872 | # Generated by Django 2.0 on 2017-12-04 00:09
import definable_serializer.models.compat
import definable_serializer.models.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', definable_serializer.models.compat.YAMLField()),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('definition', definable_serializer.models.fields.DefinableSerializerByYAMLField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='answer',
name='paper',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='for_test.Paper'),
),
migrations.AddField(
model_name='answer',
name='respondent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='answer',
unique_together={('paper', 'respondent')},
),
]
| mit | 4,701,542,029,549,797,000 | 33.036364 | 114 | 0.573184 | false |
kamar42/merdeka-backend | merdeka/apps/mdk/views.py | 1 | 2179 | from django.shortcuts import render
from merdeka.apps.utils.func import find_model, make_response, json_response, jsonify, set_data, set_status, set_message, set_child
from .models import GoodsChilds
def api_view(request, **kwargs):
resp = make_response()
m = kwargs.get('model', None)
# drop if model is empty
if m is None:
set_status(resp, 'failed')
set_message(resp, 'Model Not Found')
return json_response(resp)
if '_' in m:
_model = ''
for _m in m.split('_'):
_model += _m[:1].upper() + _m[1:].lower()
else:
_model = m[:1].upper() + m[1:].lower()
model = find_model('mdk', _model)
# drop if model was not Found
if model is None:
set_status(resp, 'failed')
set_message(resp, 'Model Not Found')
return json_response(resp)
q = request.GET.get('slug', None)
records = model.objects.all()
if q:
records = model.objects.filter(unique_name=q)
# filtering goods and goodschild
if _model == 'Goods':
g = request.GET.get('goods', None)
if g:
records = model.objects.filter(commodity_id=g)
# c = GoodsChilds.objects.filter(goods=records)
# set_child(resp, [dict(
# id=_c.pk,
# name=_c.name,
# slug=_c.unique_name
# ) for _c in c])
elif _model == 'GoodsChilds':
g = request.GET.get('goods', None)
if g:
records = model.objects.filter(goods_id=g)
set_message(resp, 'We found '+str(records.count())+' records.')
if _model == 'Data':
set_data(resp, [dict(
id=r.pk,
commodity=r.commodity.name,
goods=r.goods.name,
goods_child=r.goods_child.name,
price=str(r.price),
unit=r.unit.name,
venue=r.venue.name,
province=r.province.name,
city=r.city.name
) for r in records])
else:
set_data(resp, [dict(
id=r.pk,
name=r.name,
slug=r.unique_name
) for r in records])
return json_response(resp)
| mit | 2,338,286,988,612,638,700 | 29.690141 | 131 | 0.535108 | false |
DigitalCampus/django-oppia | tests/profile/models/test_models.py | 1 | 9519 | from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from oppia.test import OppiaTestCase
from profile.models import UserProfile, CustomField, UserProfileCustomField
class ProfileCustomFieldsTest(OppiaTestCase):
fixtures = ['tests/test_user.json',
'tests/test_oppia.json',
'tests/test_quiz.json',
'tests/test_course_permissions.json']
VALUE_STR_DEFAULT = "my string"
def test_custom_field_model_name(self):
custom_field = CustomField(
id='my_cf_key',
label='String',
required=True,
type='str')
custom_field.save()
self.assertEqual(str(custom_field), 'my_cf_key')
def test_teacher_only(self):
user = self.normal_user
self.assertFalse(user.userprofile.is_teacher_only())
'''
Upload permissions
'''
def test_get_can_upload_admin(self):
profile = UserProfile.objects.get(user=self.admin_user)
self.assertEqual(profile.get_can_upload(), True)
def test_get_can_upload_staff(self):
profile = UserProfile.objects.get(user=self.staff_user)
self.assertEqual(profile.get_can_upload(), True)
def test_get_can_upload_teacher(self):
profile = UserProfile.objects.get(user=self.teacher_user)
self.assertEqual(profile.get_can_upload(), True)
def test_get_can_upload_user(self):
profile = UserProfile.objects.get(user=self.normal_user)
self.assertEqual(profile.get_can_upload(), False)
def test_get_can_upload_activity_log_admin(self):
profile = UserProfile.objects.get(user=self.admin_user)
self.assertEqual(profile.get_can_upload_activitylog(), True)
def test_get_can_upload_activity_log_staff(self):
profile = UserProfile.objects.get(user=self.staff_user)
self.assertEqual(profile.get_can_upload_activitylog(), True)
def test_get_can_upload_activity_log_teacher(self):
profile = UserProfile.objects.get(user=self.teacher_user)
self.assertEqual(profile.get_can_upload_activitylog(), False)
def test_get_can_upload_activity_log_user(self):
profile = UserProfile.objects.get(user=self.normal_user)
self.assertEqual(profile.get_can_upload_activitylog(), False)
'''
Custom fields
'''
def test_user_custom_field_model_name(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=self.VALUE_STR_DEFAULT)
upcf.save()
self.assertEqual('str: demo', str(upcf))
# test get_value string
def test_custom_field_get_value_str(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=self.VALUE_STR_DEFAULT)
upcf.save()
self.assertEqual(upcf.get_value(), self.VALUE_STR_DEFAULT)
self.assertNotEqual(upcf.get_value(), True)
self.assertNotEqual(upcf.get_value(), False)
self.assertNotEqual(upcf.get_value(), None)
self.assertNotEqual(upcf.get_value(), 123)
# test get_value int
def test_custom_field_get_value_int(self):
custom_field = CustomField(
id='int',
label='Integer',
required=True,
type='int')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_int=123)
upcf.save()
self.assertEqual(upcf.get_value(), 123)
self.assertNotEqual(upcf.get_value(), "123")
self.assertNotEqual(upcf.get_value(), True)
self.assertNotEqual(upcf.get_value(), False)
self.assertNotEqual(upcf.get_value(), None)
# get get value bool
def test_custom_field_get_value_bool(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=True)
upcf.save()
self.assertEqual(upcf.get_value(), True)
self.assertNotEqual(upcf.get_value(), "True")
self.assertNotEqual(upcf.get_value(), 123)
self.assertNotEqual(upcf.get_value(), False)
self.assertNotEqual(upcf.get_value(), None)
# test multiple rows in userprofilecustomfield
def test_custom_field_multiple_rows(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=self.VALUE_STR_DEFAULT)
upcf.save()
with self.assertRaises(IntegrityError):
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str="my other string")
upcf.save()
def test_wrong_type_bool_in_int(self):
custom_field = CustomField(
id='int',
label='Integer',
required=True,
type='int')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_int=True)
upcf.save()
self.assertEqual(True, upcf.get_value())
upcf.value_int = False
upcf.save()
self.assertEqual(False, upcf.get_value())
def test_wrong_type_bool_in_str(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=True)
upcf.save()
self.assertEqual(True, upcf.get_value())
upcf.value_str = False
upcf.save()
self.assertEqual(False, upcf.get_value())
def test_wrong_type_int_in_bool_123(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
with self.assertRaises(ValidationError):
UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=123).save()
def test_wrong_type_int_in_bool_0(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=0)
upcf.save()
self.assertEqual(0, upcf.get_value())
def test_wrong_type_int_in_bool_1(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=1)
upcf.save()
self.assertEqual(1, upcf.get_value())
def test_wrong_type_int_in_str(self):
custom_field = CustomField(
id='str',
label='String',
required=True,
type='str')
custom_field.save()
upcf = UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_str=123)
upcf.save()
self.assertEqual(123, upcf.get_value())
def test_wrong_type_str_in_bool(self):
custom_field = CustomField(
id='bool',
label='Boolean',
required=True,
type='bool')
custom_field.save()
with self.assertRaises(ValidationError):
UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_bool=self.VALUE_STR_DEFAULT).save()
def test_wrong_type_str_in_int(self):
custom_field = CustomField(
id='int',
label='Integer',
required=True,
type='int')
custom_field.save()
with self.assertRaises(ValueError):
UserProfileCustomField(key_name=custom_field,
user=self.normal_user,
value_int=self.VALUE_STR_DEFAULT).save()
| gpl-3.0 | 2,434,349,495,123,271,000 | 35.193916 | 76 | 0.542809 | false |
ingydotnet/crockford-py | package/info.py | 1 | 1307 | def get():
info = {}
info.update(
{ 'author': 'Ingy dot Net',
'author_email': '[email protected]',
'classifiers': [ 'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Intended Audience :: Developers'],
'description': "Encode and decode using Douglas Crockford's base32 encoding scheme:",
'long_description': 'crockford - Encode and Decode using the Crockford Base32 scheme\n---------------------------------------------------------------\n\nInstallation\n------------\n\nUse::\n\n > sudo pip install crockford\n\nor::\n\n > sudo easy install crockford\n\nor::\n\n > git clone git://github.com/ingydotnet/crockford-py.git\n > cd crockford-py\n > sudo make install\n\nUsage\n-----\n\n import crockford\n\n base32 = crockford.b32encode(string)\n string = crockford.b32decode(base32)\n\nAuthors\n-------\n\n* Ingy dot Net <[email protected]>\n\nCopyright\n---------\n\ncrockford is Copyright (c) 2011, Ingy dot Net\n\ncrockford is licensed under the New BSD License. See the LICENSE file.\n',
'name': 'crockford',
'packages': ['crockford'],
'scripts': [],
'url': 'http://github.com/ingydotnet/crockford-py/',
'version': '0.0.2'}
)
return info
| bsd-2-clause | -5,694,820,867,209,967,000 | 71.611111 | 722 | 0.604438 | false |
matrix-org/synapse | synapse/config/voip.py | 1 | 2161 | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class VoipConfig(Config):
section = "voip"
def read_config(self, config, **kwargs):
self.turn_uris = config.get("turn_uris", [])
self.turn_shared_secret = config.get("turn_shared_secret")
self.turn_username = config.get("turn_username")
self.turn_password = config.get("turn_password")
self.turn_user_lifetime = self.parse_duration(
config.get("turn_user_lifetime", "1h")
)
self.turn_allow_guests = config.get("turn_allow_guests", True)
def generate_config_section(self, **kwargs):
return """\
## TURN ##
# The public URIs of the TURN server to give to clients
#
#turn_uris: []
# The shared secret used to compute passwords for the TURN server
#
#turn_shared_secret: "YOUR_SHARED_SECRET"
# The Username and password if the TURN server needs them and
# does not use a token
#
#turn_username: "TURNSERVER_USERNAME"
#turn_password: "TURNSERVER_PASSWORD"
# How long generated TURN credentials last
#
#turn_user_lifetime: 1h
# Whether guests should be allowed to use the TURN server.
# This defaults to True, otherwise VoIP will be unreliable for guests.
# However, it does introduce a slight security risk as it allows users to
# connect to arbitrary endpoints without having first signed up for a
# valid account (e.g. by passing a CAPTCHA).
#
#turn_allow_guests: true
"""
| apache-2.0 | -7,243,922,222,192,386,000 | 35.016667 | 81 | 0.652938 | false |
NoumirPoutipou/oanq | conftest.py | 1 | 1596 | import pytest
from django.test import signals
from django.test.client import Client
from django.contrib.auth.models import User, Permission
from jinja2 import Template as Jinja2Template
## Wait for https://code.djangoproject.com/ticket/24622
ORIGINAL_JINJA2_RENDERER = Jinja2Template.render
def instrumented_render(template_object, *args, **kwargs):
context = dict(*args, **kwargs)
if 'request' in context:
context['user'] = context['request'].user
signals.template_rendered.send(
sender=template_object,
template=template_object,
context=context
)
return ORIGINAL_JINJA2_RENDERER(template_object, *args, **kwargs)
@pytest.fixture(scope="module")
def context(request):
Jinja2Template.render = instrumented_render
def fin():
Jinja2Template.render = ORIGINAL_JINJA2_RENDERER
request.addfinalizer(fin)
return None # provide nothing
@pytest.fixture()
def user(db):
user = User.objects.create_user('boubou', '[email protected]', 'pass')
return user
@pytest.fixture()
def user_client(db):
client = Client()
user = User.objects.create_user('boubou', '[email protected]', 'pass')
client.login(username='[email protected]', password='pass')
client.user = user
return client
@pytest.fixture()
def ivy_admin_client(db):
client = Client()
user = User.objects.create_user('boubouadmin', '[email protected]', 'pass')
user.user_permissions.add(Permission.objects.get(codename='change_admin'))
client.login(username='[email protected]', password='pass')
client.user = user
return client
| bsd-3-clause | 3,876,775,477,568,365,600 | 26.050847 | 78 | 0.701754 | false |
zozo123/buildbot | master/buildbot/status/client.py | 1 | 1078 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.status import base
from twisted.python import log
class PBListener(base.StatusReceiverBase):
# This class is still present in users' configs, so keep it here.
def __init__(self, port, user="statusClient", passwd="clientpw"):
log.msg("The PBListener status listener is unused and can be removed "
"from the configuration")
| gpl-3.0 | 5,875,943,680,776,824,000 | 40.461538 | 79 | 0.747681 | false |
Aquafina-water-bottle/Command-Compiler-Unlimited | test_fena/v1_13/test_teams.py | 1 | 1532 | from test_fena.test_common import test_cmd
def test_teams():
test_cmd("team add _team team_test", "team add fena.team team_test")
test_cmd("team add _team team test", "team add fena.team team test")
test_cmd("team empty _team", "team empty fena.team")
test_cmd("team _team + @a", "team join fena.team @a")
test_cmd("team _team + target", "team join fena.team target")
test_cmd("team leave @a", "team leave @a")
test_cmd("team leave target", "team leave target")
test_cmd("team remove _team", "team remove fena.team")
test_cmd("team _team friendlyfire = true", "team option fena.team friendlyfire true")
test_cmd("team _team color = green", "team option fena.team color green")
test_cmd("team _team seeFriendlyInvisibles = false", "team option fena.team seeFriendlyInvisibles false")
test_cmd("team _team nametagVisibility = hideForOwnTeam", "team option fena.team nametagVisibility hideForOwnTeam")
test_cmd("team _team deathMessageVisibility = never", "team option fena.team deathMessageVisibility never")
test_cmd("team _team collisionRule = pushOwnTeam", "team option fena.team collisionRule pushOwnTeam")
test_cmd(r'team _team prefix = {"text":"PREFIX","color":"blue"}', r'team option fena.team prefix {"text":"PREFIX","color":"blue"}')
test_cmd(r'team _team suffix = {"text":"SUFFIX","color":"red"}', r'team option fena.team suffix {"text":"SUFFIX","color":"red"}')
| mit | -1,500,593,524,616,907,500 | 71.952381 | 135 | 0.649478 | false |
appleseedhq/gaffer | python/GafferArnoldTest/__init__.py | 1 | 2815 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from ArnoldShaderTest import ArnoldShaderTest
from ArnoldRenderTest import ArnoldRenderTest
from ArnoldOptionsTest import ArnoldOptionsTest
from ArnoldAttributesTest import ArnoldAttributesTest
from ArnoldVDBTest import ArnoldVDBTest
from ArnoldLightTest import ArnoldLightTest
from ArnoldMeshLightTest import ArnoldMeshLightTest
from InteractiveArnoldRenderTest import InteractiveArnoldRenderTest
from ArnoldDisplacementTest import ArnoldDisplacementTest
from LightToCameraTest import LightToCameraTest
from IECoreArnoldPreviewTest import *
from ArnoldAOVShaderTest import ArnoldAOVShaderTest
from ArnoldAtmosphereTest import ArnoldAtmosphereTest
from ArnoldBackgroundTest import ArnoldBackgroundTest
from ArnoldTextureBakeTest import ArnoldTextureBakeTest
from ModuleTest import ModuleTest
from ArnoldShaderBallTest import ArnoldShaderBallTest
from ArnoldCameraShadersTest import ArnoldCameraShadersTest
from ArnoldLightFilterTest import ArnoldLightFilterTest
if __name__ == "__main__":
import unittest
unittest.main()
| bsd-3-clause | -7,469,409,601,125,669,000 | 46.711864 | 77 | 0.759858 | false |
cognitivefashion/cf-sdk-python | dominant_colors_product.py | 1 | 1998 | #------------------------------------------------------------------------------
# Get the dominant colors for an image in the catalog.
# GET /v1/catalog/{catalog_name}/dominant_colors/{id}/{image_id}
#------------------------------------------------------------------------------
import os
import json
import requests
from urlparse import urljoin
from pprint import pprint
from props import *
# Replace this with the custom url generated for you.
api_gateway_url = props['api_gateway_url']
# Pass the api key into the header.
headers = {'X-Api-Key': props['X-Api-Key']}
# Query parameters.
params = {}
# Optional parameters.
#params['fraction_pixels_threshold'] = 0.1
# Path parameters
catalog_name = props['catalog_name']
id ='SHRES16AWFSDR9346B'
image_id = '1'
api_endpoint = '/v1/catalog/%s/dominant_colors/%s/%s'%(catalog_name,id,image_id)
url = urljoin(api_gateway_url,api_endpoint)
response = requests.get(url,
headers=headers,
params=params)
print response.status_code
pprint(response.json())
# Human friendly repsonse.
results = response.json()
print('[image url ] %s'%(results['image_url']))
image_location = '%s?api_key=%s'%(urljoin(api_gateway_url,results['image_location']),
props['X-Api-Key'])
print('[original image ] %s'%(image_location))
image_location = '%s&api_key=%s'%(urljoin(api_gateway_url,results['bounding_box']['image_location']),
props['X-Api-Key'])
print('[bounding box ] %s'%(image_location))
for color_info in results['dominant_colors']:
print('[dominant colors] %s - %1.2f - %s - %s - %s - %s'%(color_info['hex'],
color_info['fraction_pixels'],
color_info['name'],
color_info['entrylevel_name'],
color_info['universal_name'],
color_info['pantone_id']))
| apache-2.0 | -561,431,765,489,356,350 | 31.754098 | 101 | 0.544044 | false |
esthermm/enco | enco_category/models/purchase_report.py | 1 | 4459 | # -*- coding: utf-8 -*-
# © 2017 Esther Martín - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields, models
from openerp import tools
class PurchaseReport(models.Model):
_inherit = 'purchase.report'
purchase_categ_id = fields.Many2one(
comodel_name='crm.case.categ', readonly=True)
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
s.period_ack as period_ack,
s.purchase_categ_id as purchase_categ_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit/cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit/cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit/cr.rate)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor,
s.purchase_categ_id,
s.period_ack
)
""")
| gpl-3.0 | 3,419,847,582,266,955,300 | 46.414894 | 163 | 0.475656 | false |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/linalg/tests/test_matfuncs.py | 1 | 33360 | #!/usr/bin/env python
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for linalg.matfuncs module
"""
from __future__ import division, print_function, absolute_import
import functools
import random
import warnings
import numpy as np
import scipy.linalg
import scipy.linalg._expm_frechet
from numpy import array, matrix, identity, dot, sqrt
from numpy.testing import (TestCase, run_module_suite,
assert_array_equal, assert_array_less, assert_equal,
assert_array_almost_equal, assert_allclose, assert_, decorators)
from scipy._lib._numpy_compat import _assert_warns
from scipy.linalg import _matfuncs_inv_ssq
from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power,
expm, expm_frechet, expm_cond, norm)
from scipy.linalg.matfuncs import expm2, expm3
from scipy.optimize import minimize
def _get_al_mohy_higham_2012_experiment_1():
"""
Return the test matrix from Experiment (1) of [1]_.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.2210e-1, 3e4],
[0, 0, 0, 3.0744e-1]], dtype=float)
return A
class TestSignM(TestCase):
def test_nils(self):
a = array([[29.2, -24.2, 69.5, 49.8, 7.],
[-9.2, 5.2, -18., -16.8, -2.],
[-10., 6., -20., -18., -2.],
[-9.6, 9.6, -25.5, -15.4, -2.],
[9.8, -4.8, 18., 18.2, 2.]])
cr = array([[11.94933333, -2.24533333, 15.31733333, 21.65333333, -2.24533333],
[-3.84266667, 0.49866667, -4.59066667, -7.18666667, 0.49866667],
[-4.08, 0.56, -4.92, -7.6, 0.56],
[-4.03466667, 1.04266667, -5.59866667, -7.02666667, 1.04266667],
[4.15733333, -0.50133333, 4.90933333, 7.81333333, -0.50133333]])
r = signm(a)
assert_array_almost_equal(r, cr)
def test_defective1(self):
a = array([[0.0, 1, 0, 0], [1, 0, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
r = signm(a, disp=False)
# XXX: what would be the correct result?
def test_defective2(self):
a = array((
[29.2, -24.2, 69.5, 49.8, 7.0],
[-9.2, 5.2, -18.0, -16.8, -2.0],
[-10.0, 6.0, -20.0, -18.0, -2.0],
[-9.6, 9.6, -25.5, -15.4, -2.0],
[9.8, -4.8, 18.0, 18.2, 2.0]))
r = signm(a, disp=False)
# XXX: what would be the correct result?
def test_defective3(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
r = signm(a, disp=False)
# XXX: what would be the correct result?
class TestLogM(TestCase):
def test_nils(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
m = (identity(7) * 3.1 + 0j) - a
logm(m, disp=False)
# XXX: what would be the correct result?
def test_al_mohy_higham_2012_experiment_1_logm(self):
# The logm completes the round trip successfully.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_logm, info = logm(A, disp=False)
A_round_trip = expm(A_logm)
assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)
def test_al_mohy_higham_2012_experiment_1_funm_log(self):
# The raw funm with np.log does not complete the round trip.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_funm_log, info = funm(A, np.log, disp=False)
A_round_trip = expm(A_funm_log)
assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14))
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
# Eigenvalues are related to the branch cut.
W = np.linalg.eigvals(M)
err_msg = 'M:{0} eivals:{1}'.format(M, W)
# Check sqrtm round trip because it is used within logm.
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
# Check logm round trip.
M_logm, info = logm(M, disp=False)
M_logm_round_trip = expm(M_logm)
assert_allclose(M_logm_round_trip, M, err_msg=err_msg)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_logm, info = logm(M, disp=False)
M_round_trip = expm(M_logm)
assert_allclose(M_round_trip, M)
def test_logm_type_preservation_and_conversion(self):
# The logm matrix function should preserve the type of a matrix
# whose eigenvalues are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
def test_complex_spectrum_real_logm(self):
# This matrix has complex eigenvalues and real logm.
# Its output dtype depends on its input dtype.
M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
for dt in float, complex:
X = np.array(M, dtype=dt)
w = scipy.linalg.eigvals(X)
assert_(1e-2 < np.absolute(w.imag).sum())
Y, info = logm(X, disp=False)
assert_(np.issubdtype(Y.dtype, dt))
assert_allclose(expm(Y), X)
def test_real_mixed_sign_spectrum(self):
# These matrices have real eigenvalues with mixed signs.
# The output logm dtype is complex, regardless of input dtype.
for M in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]]):
for dt in float, complex:
A = np.array(M, dtype=dt)
A_logm, info = logm(A, disp=False)
assert_(np.issubdtype(A_logm.dtype, complex))
def test_exactly_singular(self):
A = np.array([[0, 0], [1j, 1j]])
B = np.asarray([[1, 1], [0, 0]])
for M in A, A.T, B, B.T:
expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
L, info = _assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_nearly_singular(self):
M = np.array([[1e-100]])
expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
L, info = _assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_opposite_sign_complex_eigenvalues(self):
# See gh-6113
E = [[0, 1], [-1, 0]]
L = [[0, np.pi * 0.5], [-np.pi * 0.5, 0]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 4], [0, -1j]]
L = [[1j * np.pi * 0.5, 2 * np.pi], [0, -1j * np.pi * 0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 0], [0, -1j]]
L = [[1j * np.pi * 0.5, 0], [0, -1j * np.pi * 0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
class TestSqrtM(TestCase):
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_bad(self):
# See http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
e = 2 ** -5
se = sqrt(e)
a = array([[1.0, 0, 0, 1],
[0, e, 0, 0],
[0, 0, e, 0],
[0, 0, 0, 1]])
sa = array([[1, 0, 0, 0.5],
[0, se, 0, 0],
[0, 0, se, 0],
[0, 0, 0, 1]])
n = a.shape[0]
assert_array_almost_equal(dot(sa, sa), a)
# Check default sqrtm.
esa = sqrtm(a, disp=False, blocksize=n)[0]
assert_array_almost_equal(dot(esa, esa), a)
# Check sqrtm with 2x2 blocks.
esa = sqrtm(a, disp=False, blocksize=2)[0]
assert_array_almost_equal(dot(esa, esa), a)
def test_sqrtm_type_preservation_and_conversion(self):
# The sqrtm matrix function should preserve the type of a matrix
# whose eigenvalues are nonnegative with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]],
[[1, 1], [1, 1]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_blocksizes(self):
# Make sure I do not goof up the blocksizes when they do not divide n.
np.random.seed(1234)
for n in range(1, 8):
A = np.random.rand(n, n) + 1j * np.random.randn(n, n)
A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
for blocksize in range(1, 10):
A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
assert_allclose(A_sqrtm_default, A_sqrtm_new)
def test_al_mohy_higham_2012_experiment_1(self):
# Matrix square root of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
A_sqrtm, info = sqrtm(A, disp=False)
A_round_trip = A_sqrtm.dot(A_sqrtm)
assert_allclose(A_round_trip, A, rtol=1e-5)
assert_allclose(np.tril(A_round_trip), np.tril(A))
def test_strict_upper_triangular(self):
# This matrix has no square root.
for dt in int, float:
A = np.array([
[0, 3, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]], dtype=dt)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(np.isnan(A_sqrtm).all())
def test_weird_matrix(self):
# The square root of matrix B exists.
for dt in int, float:
A = np.array([
[0, 0, 1],
[0, 0, 0],
[0, 1, 0]], dtype=dt)
B = np.array([
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]], dtype=dt)
assert_array_equal(B, A.dot(A))
# But scipy sqrtm is not clever enough to find it.
B_sqrtm, info = sqrtm(B, disp=False)
assert_(np.isnan(B_sqrtm).all())
def test_disp(self):
np.random.seed(1234)
A = np.random.rand(3, 3)
B = sqrtm(A, disp=True)
assert_allclose(B.dot(B), A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1 + 1j, 2], [0, 1 - 1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
class TestFractionalMatrixPower(TestCase):
def test_round_trip_random_complex(self):
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1 / p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_round_trip_random_float(self):
# This test is more annoying because it can hit the branch cut;
# this happens when the matrix has an eigenvalue
# with no imaginary component and with a real negative component,
# and it means that the principal branch does not exist.
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1 / p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_larger_abs_fractional_matrix_powers(self):
np.random.seed(1234)
for n in (2, 3, 5):
for i in range(10):
M = np.random.randn(n, n) + 1j * np.random.randn(n, n)
M_one_fifth = fractional_matrix_power(M, 0.2)
# Test the round trip.
M_round_trip = np.linalg.matrix_power(M_one_fifth, 5)
assert_allclose(M, M_round_trip)
# Test a large abs fractional power.
X = fractional_matrix_power(M, -5.4)
Y = np.linalg.matrix_power(M_one_fifth, -27)
assert_allclose(X, Y)
# Test another large abs fractional power.
X = fractional_matrix_power(M, 3.8)
Y = np.linalg.matrix_power(M_one_fifth, 19)
assert_allclose(X, Y)
def test_random_matrices_and_powers(self):
# Each independent iteration of this fuzz test picks random parameters.
# It tries to hit some edge cases.
np.random.seed(1234)
nsamples = 20
for i in range(nsamples):
# Sample a matrix size and a random real power.
n = random.randrange(1, 5)
p = np.random.randn()
# Sample a random real or complex matrix.
matrix_scale = np.exp(random.randrange(-4, 5))
A = np.random.randn(n, n)
if random.choice((True, False)):
A = A + 1j * np.random.randn(n, n)
A = A * matrix_scale
# Check a couple of analytically equivalent ways
# to compute the fractional matrix power.
# These can be compared because they both use the principal branch.
A_power = fractional_matrix_power(A, p)
A_logm, info = logm(A, disp=False)
A_power_expm_logm = expm(A_logm * p)
assert_allclose(A_power, A_power_expm_logm)
def test_al_mohy_higham_2012_experiment_1(self):
# Fractional powers of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
# Test remainder matrix power.
A_funm_sqrt, info = funm(A, np.sqrt, disp=False)
A_sqrtm, info = sqrtm(A, disp=False)
A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5)
A_power = fractional_matrix_power(A, 0.5)
assert_array_equal(A_rem_power, A_power)
assert_allclose(A_sqrtm, A_power)
assert_allclose(A_sqrtm, A_funm_sqrt)
# Test more fractional powers.
for p in (1 / 2, 5 / 3):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1 / p)
assert_allclose(A_round_trip, A, rtol=1e-2)
assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1))
def test_briggs_helper_function(self):
np.random.seed(1234)
for a in np.random.randn(10) + 1j * np.random.randn(10):
for k in range(5):
x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
x_expected = a ** np.exp2(-k) - 1
assert_allclose(x_observed, x_expected)
def test_type_preservation_and_conversion(self):
# The fractional_matrix_power matrix function should preserve
# the type of a matrix whose eigenvalues
# are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
def test_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
@decorators.knownfailureif(True, 'Too unstable across LAPACKs.')
def test_singular(self):
# Negative fractional powers do not work with singular matrices.
for matrix_as_list in (
[[0, 0], [0, 0]],
[[1, 1], [1, 1]],
[[1, 2], [3, 6]],
[[0, 0, 0], [0, 1, 1], [0, -1, 1]]):
# Check fractional powers both for float and for complex types.
for newtype in (float, complex):
A = np.array(matrix_as_list, dtype=newtype)
for p in (-0.7, -0.9, -2.4, -1.3):
A_power = fractional_matrix_power(A, p)
assert_(np.isnan(A_power).all())
for p in (0.2, 1.43):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1 / p)
assert_allclose(A_round_trip, A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1 + 1j, 2], [0, 1 - 1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
class TestExpM(TestCase):
def test_zero(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = array([[0., 0], [0, 0]])
assert_array_almost_equal(expm(a), [[1, 0], [0, 1]])
assert_array_almost_equal(expm2(a), [[1, 0], [0, 1]])
assert_array_almost_equal(expm3(a), [[1, 0], [0, 1]])
def test_consistency(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = array([[0., 1], [-1, 0]])
assert_array_almost_equal(expm(a), expm2(a))
assert_array_almost_equal(expm(a), expm3(a))
a = array([[1j, 1], [-1, -2j]])
assert_array_almost_equal(expm(a), expm2(a))
assert_array_almost_equal(expm(a), expm3(a))
def test_npmatrix(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = matrix([[3., 0], [0, -3.]])
assert_array_almost_equal(expm(a), expm2(a))
def test_single_elt(self):
# See gh-5853
from scipy.sparse import csc_matrix
vOne = -2.02683397006j
vTwo = -2.12817566856j
mOne = csc_matrix([[vOne]], dtype='complex')
mTwo = csc_matrix([[vTwo]], dtype='complex')
outOne = expm(mOne)
outTwo = expm(mTwo)
assert_equal(type(outOne), type(mOne))
assert_equal(type(outTwo), type(mTwo))
assert_allclose(outOne[0, 0], complex(-0.44039415155949196,
-0.8978045395698304))
assert_allclose(outTwo[0, 0], complex(-0.52896401032626006,
-0.84864425749518878))
class TestExpmFrechet(TestCase):
def test_expm_frechet(self):
# a test of the basic functionality
M = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A = np.array([
[1, 2],
[5, 6],
], dtype=float)
E = np.array([
[3, 4],
[7, 8],
], dtype=float)
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
for kwargs in ({}, {'method': 'SPS'}, {'method': 'blockEnlarge'}):
observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_small_norm_expm_frechet(self):
# methodically test matrices with a range of norms, for better coverage
M_original = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A_original = np.array([
[1, 2],
[5, 6],
], dtype=float)
E_original = np.array([
[3, 4],
[7, 8],
], dtype=float)
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
for ma, mb in m_neighbor_pairs:
ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
target_norm_1 = 0.5 * (ell_a + ell_b)
scale = target_norm_1 / A_original_norm_1
M = scale * M_original
A = scale * A_original
E = scale * E_original
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_fuzz(self):
# try a bunch of crazy inputs
rfuncs = (
np.random.uniform,
np.random.normal,
np.random.standard_cauchy,
np.random.exponential)
ntests = 100
for i in range(ntests):
rfunc = random.choice(rfuncs)
target_norm_1 = random.expovariate(1.0)
n = random.randrange(2, 16)
A_original = rfunc(size=(n, n))
E_original = rfunc(size=(n, n))
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
scale = target_norm_1 / A_original_norm_1
A = scale * A_original
E = scale * E_original
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:n, n:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_problematic_matrix(self):
# this test case uncovered a bug which has since been fixed
A = np.array([
[1.50591997, 1.93537998],
[0.41203263, 0.23443516],
], dtype=float)
E = np.array([
[1.87864034, 2.07055038],
[1.34102727, 0.67341123],
], dtype=float)
A_norm_1 = scipy.linalg.norm(A, 1)
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
@decorators.slow
@decorators.skipif(True, 'this test is deliberately slow')
def test_medium_matrix(self):
# profile this to see the speed difference
n = 1000
A = np.random.exponential(size=(n, n))
E = np.random.exponential(size=(n, n))
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
p = np.reshape(p, A.shape)
p_norm = norm(p)
perturbation = eps * p * (A_norm / p_norm)
X_prime = expm(A + perturbation)
scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
return -scaled_relative_error
def _normalized_like(A, B):
return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A))
def _relative_error(f, A, perturbation):
X = f(A)
X_prime = f(A + perturbation)
return norm(X_prime - X) / norm(X)
class TestExpmConditionNumber(TestCase):
def test_expm_cond_smoke(self):
np.random.seed(1234)
for n in range(1, 4):
A = np.random.randn(n, n)
kappa = expm_cond(A)
assert_array_less(0, kappa)
def test_expm_bad_condition_number(self):
A = np.array([
[-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
[0, -1.201010529, 9.634696872e4, -4.681048289e9],
[0, 0, -1.132893222, 9.532491830e4],
[0, 0, 0, -1.179475332],
])
kappa = expm_cond(A)
assert_array_less(1e36, kappa)
def test_univariate(self):
np.random.seed(12345)
for x in np.linspace(-5, 5, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for x in np.logspace(-2, 2, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for i in range(10):
A = np.random.randn(1, 1)
assert_allclose(expm_cond(A), np.absolute(A)[0, 0])
@decorators.slow
def test_expm_cond_fuzz(self):
np.random.seed(12345)
eps = 1e-5
nsamples = 10
for i in range(nsamples):
n = np.random.randint(2, 5)
A = np.random.randn(n, n)
A_norm = scipy.linalg.norm(A)
X = expm(A)
X_norm = scipy.linalg.norm(X)
kappa = expm_cond(A)
# Look for the small perturbation that gives the greatest
# relative error.
f = functools.partial(_help_expm_cond_search,
A, A_norm, X, X_norm, eps)
guess = np.ones(n * n)
out = minimize(f, guess, method='L-BFGS-B')
xopt = out.x
yopt = f(xopt)
p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
p_best_relerr = _relative_error(expm, A, p_best)
assert_allclose(p_best_relerr, -yopt * eps)
# Check that the identified perturbation indeed gives greater
# relative error than random perturbations with similar norms.
for j in range(5):
p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
assert_allclose(norm(p_best), norm(p_rand))
p_rand_relerr = _relative_error(expm, A, p_rand)
assert_array_less(p_rand_relerr, p_best_relerr)
# The greatest relative error should not be much greater than
# eps times the condition number kappa.
# In the limit as eps approaches zero it should never be greater.
assert_array_less(p_best_relerr, (1 + 2 * eps) * eps * kappa)
if __name__ == "__main__":
run_module_suite()
| mit | -4,768,763,057,479,168,000 | 39.387409 | 91 | 0.523891 | false |
oubiwann/myriad-worlds | myriad/story.py | 1 | 3923 | import yaml
from myriad.character import Player
from myriad.world import Map, World
from myriad.item import Item, OpenableItem, OpenableReadableItem, ReadableItem
# XXX maybe the story object should have a map attribute assigned based on
# story type... e.g., provided ASCII map, procedurally generated map, etc.
class Story(object):
def __init__(self, filename):
self.storyFile = filename
self.stream = open(self.storyFile)
self.data = yaml.load(self.stream)
# XXX map should be an attribute of the world
self.map = Map(self.data.get("map"))
self.world = World()
self.world.setScapes(self.map.getScapes())
# XXX what form do these take when the map is procedural?
self.createItems()
self.updateScapes()
self.createCharacters()
def _getItem(self, itemName):
for item in self.data.get("items"):
if item.get("name") == itemName:
return item
def getMap(self):
return self.map.getData()
def createItems(self):
itemsData = self.data.get("items")
if not itemsData:
return
for itemData in itemsData:
self.createItem(itemData)
def updateScapes(self):
scapesData = self.data.get("scapes")
if not scapesData:
return
for scapeData in scapesData:
scape = self.world.scapes.get(scapeData.get("room-key"))
startingPlace = scapeData.get("startingPlace")
if startingPlace:
scape.startingPlace = True
self.setStartingPlace(scape)
scape.name = scapeData.get("name")
self.world.scapes[scape.name] = scape
scape.desc = scapeData.get("description")
scape.gameOver = scapeData.get("gameOver")
itemsList = scapeData.get("items")
if not itemsList:
continue
for itemName in itemsList:
self.processItem(itemName, scape)
def createItem(self, itemData):
items = []
if itemData.has_key("items"):
itemNames = itemData.pop("items")
items = [Item.items[x] for x in itemNames]
if itemData.get("isOpenable") and itemData.get("isReadable"):
itemData.pop("isReadable")
item = OpenableReadableItem(itemData.get("name"), items)
elif itemData.get("isOpenable"):
item = OpenableItem(itemData.get("name"), items)
elif itemData.get("isReadable"):
itemData.pop("isReadable")
item = ReadableItem(**itemData)
else:
item = Item(**itemData)
return item
def processItem(self, itemName, scape):
# XXX I don't like the way that all items are tracked on the Item
# object... it doesn't make sense that every item in the world would
# know about all other items in the world. Once that's fixed, we just
# use the scape object's addItem method
self.world.putItemInScape(itemName, scape)
def setStartingPlace(self, tile):
self.map.setStartingPlace(tile)
def getStartingPlace(self):
return self.map.getStartingPlace()
def createCharacters(self):
charactersData = self.data.get("characters")
if not charactersData:
return
for characterData in charactersData:
if characterData.get("isPlayer") == True:
player = Player(characterData.get("name"))
for itemName in characterData.get("inventory"):
player.take(Item.items[itemName])
self.world.placeCharacterInScape(
player, self.getStartingPlace(), isPlayer=True)
def createLayers(self):
layersData = self.data.get("layers")
if not layersData:
return
for layerData in layersData:
pass
| mit | -3,348,018,576,291,918,000 | 35.663551 | 78 | 0.604384 | false |
xflows/clowdflows-backend | workflows/api/serializers.py | 1 | 14887 | import json
from django.contrib.auth.models import User
from django.db.models import Prefetch
from django.template.loader import render_to_string
from rest_framework import serializers
from rest_framework.reverse import reverse
from mothra.settings import STATIC_URL, MEDIA_URL
from streams.models import Stream
from workflows.models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('username',)
class AbstractOptionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = AbstractOption
fields = ('name', 'value')
read_only_fields = ('name', 'value')
class AbstractInputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
options = AbstractOptionSerializer(many=True, read_only=True)
class Meta:
model = AbstractInput
fields = (
'id', 'name', 'short_name', 'description', 'variable', 'required', 'parameter', 'multi', 'default',
'parameter_type',
'order', 'options')
read_only_fields = (
'id', 'name', 'short_name', 'description', 'variable', 'required', 'parameter', 'multi', 'default',
'parameter_type',
'order', 'options')
class AbstractOutputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
class Meta:
model = AbstractOutput
fields = ('id', 'name', 'short_name', 'description', 'variable', 'order')
read_only_fields = ('id', 'name', 'short_name', 'description', 'variable', 'order')
class AbstractWidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField()
inputs = AbstractInputSerializer(many=True, read_only=True)
outputs = AbstractOutputSerializer(many=True, read_only=True)
cfpackage = serializers.SerializerMethodField()
visualization = serializers.SerializerMethodField()
def get_cfpackage(self, obj):
return obj.package
def get_visualization(self, obj):
return obj.visualization_view != ''
class Meta:
model = AbstractWidget
fields = ('id', 'name', 'interactive', 'visualization', 'static_image', 'order', 'outputs', 'inputs', 'cfpackage', 'description', 'always_save_results')
read_only_fields = ('id', 'name', 'interactive', 'visualization', 'static_image', 'order', 'outputs', 'inputs', 'cfpackage', 'always_save_results')
class CategorySerializer(serializers.HyperlinkedModelSerializer):
widgets = AbstractWidgetSerializer(many=True, read_only=True)
class Meta:
model = Category
fields = ('name', 'user', 'order', 'children', 'widgets')
read_only_fields = ('name', 'user', 'order', 'children', 'widgets')
CategorySerializer._declared_fields['children'] = CategorySerializer(many=True, read_only=True)
class ConnectionSerializer(serializers.HyperlinkedModelSerializer):
output_widget = serializers.SerializerMethodField()
input_widget = serializers.SerializerMethodField()
def get_output_widget(self, obj):
request = self.context['request']
return request.build_absolute_uri(reverse('widget-detail', kwargs={'pk': obj.output.widget_id}))
# return WidgetListSerializer(obj.output.widget, context=self.context).data["url"]
def get_input_widget(self, obj):
request = self.context['request']
return request.build_absolute_uri(reverse('widget-detail', kwargs={'pk': obj.input.widget_id}))
# return WidgetListSerializer(obj.input.widget, context=self.context).data["url"]
class Meta:
model = Connection
class OptionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Option
fields = ('name', 'value')
class InputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
deserialized_value = serializers.SerializerMethodField()
options = OptionSerializer(many=True, read_only=True)
abstract_input_id = serializers.SerializerMethodField()
def get_deserialized_value(self, obj):
if obj.parameter:
try:
json.dumps(obj.value)
except:
return repr(obj.value)
else:
return obj.value
else:
return ''
def get_abstract_input_id(self, obj):
return obj.abstract_input_id
class Meta:
model = Input
exclude = ('value', 'abstract_input')
read_only_fields = ('id', 'url', 'widget')
class OutputSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
abstract_output_id = serializers.SerializerMethodField()
def get_abstract_output_id(self, obj):
return obj.abstract_output_id
class Meta:
model = Output
exclude = ('value', 'abstract_output')
read_only_fields = ('id', 'url', 'widget')
def get_workflow_preview(request, obj):
min_x = 10000
min_y = 10000
max_x = 0
max_y = 0
max_width = 300
max_height = 200
normalized_values = {}
obj.normalized_widgets = obj.widgets.all()
obj.unique_connections = []
obj.pairs = []
for widget in obj.normalized_widgets:
if widget.x > max_x:
max_x = widget.x
if widget.x < min_x:
min_x = widget.x
if widget.y > max_y:
max_y = widget.y
if widget.y < min_y:
min_y = widget.y
for widget in obj.normalized_widgets:
x = (widget.x - min_x) * 1.0
y = (widget.y - min_y) * 1.0
normalized_max_x = max_x - min_x
if x == 0:
x = 1
if y == 0:
y = 1
if normalized_max_x == 0:
normalized_max_x = x * 2
normalized_max_y = max_y - min_y
if normalized_max_y == 0:
normalized_max_y = y * 2
widget.norm_x = (x / normalized_max_x) * max_width
widget.norm_y = (y / normalized_max_y) * max_height
normalized_values[widget.id] = (widget.norm_x, widget.norm_y)
for c in obj.connections.all():
if not (c.output.widget_id, c.input.widget_id) in obj.pairs:
obj.pairs.append((c.output.widget_id, c.input.widget_id))
for pair in obj.pairs:
conn = {}
conn['x1'] = normalized_values[pair[0]][0] + 40
conn['y1'] = normalized_values[pair[0]][1] + 15
conn['x2'] = normalized_values[pair[1]][0] - 10
conn['y2'] = normalized_values[pair[1]][1] + 15
obj.unique_connections.append(conn)
base_url = request.build_absolute_uri('/')[:-1]
images_url = '{}{}'.format(base_url, STATIC_URL)
preview_html = render_to_string('preview.html', {'w': obj, 'images_url': images_url})
return preview_html
class StreamSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
last_executed = serializers.DateTimeField(read_only=True)
period = serializers.IntegerField()
active = serializers.BooleanField(read_only=True)
class Meta:
model = Stream
class WorkflowListSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
user = UserSerializer(read_only=True)
stream = StreamSerializer()
is_subprocess = serializers.SerializerMethodField()
is_public = serializers.BooleanField(source='public')
can_be_streaming = serializers.SerializerMethodField()
def get_is_subprocess(self, obj):
if obj.widget == None:
return False
else:
return True
def get_can_be_streaming(self, obj):
return obj.can_be_streaming()
def get_stream_active(self, obj):
return None
class Meta:
model = Workflow
exclude = ('public',)
class WorkflowPreviewSerializer(WorkflowListSerializer):
preview = serializers.SerializerMethodField()
def get_preview(self, obj):
return get_workflow_preview(self.context['request'], obj)
class WidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
inputs = InputSerializer(many=True, read_only=True)
outputs = OutputSerializer(many=True, read_only=True)
description = serializers.CharField(source='abstract_widget.description', read_only=True)
icon = serializers.SerializerMethodField()
must_save = serializers.SerializerMethodField()
can_interact = serializers.SerializerMethodField()
workflow_link = serializers.HyperlinkedRelatedField(
read_only=True,
view_name='workflow-detail'
)
abstract_widget = serializers.PrimaryKeyRelatedField(queryset=AbstractWidget.objects.all(), allow_null=True)
def create(self, validated_data):
'''
Overrides the default create method to support nested creates
'''
w = Widget.objects.create(**validated_data)
aw = w.abstract_widget
input_order, param_order = 0, 0
for i in aw.inputs.all():
j = Input()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.required = i.required
j.parameter = i.parameter
j.value = None
j.abstract_input = i
if (i.parameter):
param_order += 1
j.order = param_order
else:
input_order += 1
j.order = input_order
if not i.multi:
j.value = i.default
j.parameter_type = i.parameter_type
if i.multi:
j.multi_id = i.id
j.save()
for k in i.options.all():
o = Option()
o.name = k.name
o.value = k.value
o.input = j
o.save()
outputOrder = 0
for i in aw.outputs.all():
j = Output()
j.name = i.name
j.short_name = i.short_name
j.description = i.description
j.variable = i.variable
j.widget = w
j.abstract_output = i
outputOrder += 1
j.order = outputOrder
j.save()
w.defered_outputs = w.outputs.defer("value").all()
w.defered_inputs = w.inputs.defer("value").all()
return w
def update(self, widget, validated_data):
'''
Overrides the default update method to support nested creates
'''
# Ignore inputs and outputs on patch - we allow only nested creates
if 'inputs' in validated_data:
validated_data.pop('inputs')
if 'outputs' in validated_data:
validated_data.pop('outputs')
widget, _ = Widget.objects.update_or_create(pk=widget.pk, defaults=validated_data)
if widget.type == 'subprocess':
widget.workflow_link.name = widget.name
widget.workflow_link.save()
return widget
def get_must_save(self, widget):
'''
Some widget always require their inputs and outputs to be saved.
'''
must_save = False
if widget.abstract_widget:
must_save = widget.abstract_widget.interactive or widget.is_visualization() or widget.abstract_widget.always_save_results
return must_save
def get_can_interact(self, widget):
can_interact = False
if widget.abstract_widget:
can_interact = widget.abstract_widget.interactive
return can_interact
def get_icon(self, widget):
full_path_tokens = self.context['request'].build_absolute_uri().split('/')
protocol = full_path_tokens[0]
base_url = full_path_tokens[2]
icon_path = 'special_icons/question-mark.png'
static_or_media = STATIC_URL
if widget.abstract_widget:
if widget.abstract_widget.static_image:
icon_path = '{}/icons/widget/{}'.format(widget.abstract_widget.package,
widget.abstract_widget.static_image)
elif widget.abstract_widget.image:
static_or_media = MEDIA_URL
icon_path = widget.abstract_widget.image
elif widget.abstract_widget.wsdl:
icon_path = 'special_icons/ws.png'
elif hasattr(widget, 'workflow_link'):
icon_path = 'special_icons/subprocess.png'
elif widget.type == 'input':
icon_path = 'special_icons/forward-arrow.png'
elif widget.type == 'output':
icon_path = 'special_icons/forward-arrow.png'
elif widget.type == 'output':
icon_path = 'special_icons/loop.png'
icon_url = '{}//{}{}{}'.format(protocol, base_url, static_or_media, icon_path)
return icon_url
class Meta:
model = Widget
fields = (
'id', 'url', 'workflow', 'x', 'y', 'name', 'save_results', 'must_save', 'can_interact', 'abstract_widget', 'finished',
'error', 'running', 'interaction_waiting', 'description', 'icon', 'type', 'progress', 'inputs', 'outputs',
'workflow_link')
class WidgetPositionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Widget
fields = ('x', 'y')
class WidgetListSerializer(serializers.HyperlinkedModelSerializer):
abstract_widget = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Widget
# exclude = ('abstract_widget',)
class StreamWidgetSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
class Meta:
model = Widget
fields = ('id', 'url', 'name')
class WorkflowSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(read_only=True)
widgets = WidgetSerializer(many=True, read_only=True)
user = UserSerializer(read_only=True)
connections = ConnectionSerializer(many=True, read_only=True)
is_subprocess = serializers.SerializerMethodField()
is_public = serializers.BooleanField(source='public')
def get_is_subprocess(self, obj):
if obj.widget == None:
return False
else:
return True
class Meta:
model = Workflow
exclude = ('public',)
class StreamDetailSerializer(StreamSerializer):
workflow = WorkflowListSerializer(read_only=True)
stream_visualization_widgets = serializers.SerializerMethodField()
def get_stream_visualization_widgets(self, obj):
widgets = obj.stream_visualization_widgets()
data = StreamWidgetSerializer(widgets, many=True, read_only=True, context={'request': self.context['request']}).data
return data
| mit | 7,936,824,907,788,334,000 | 34.70024 | 160 | 0.623363 | false |
8l/beri | cheritest/trunk/tests/fpu/test_raw_fpu_mov_gpr.py | 1 | 2722 | #-
# Copyright (c) 2012 Ben Thorner
# Copyright (c) 2013 Colin Rothwell
# All rights reserved.
#
# This software was developed by Ben Thorner as part of his summer internship
# and Colin Rothwell as part of his final year undergraduate project.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_raw_fpu_mov_gpr(BaseBERITestCase):
@attr('floatcmove')
def test_mov_gpr(self):
'''Test we can move conditional on a GPR'''
self.assertRegisterEqual(self.MIPS.s0, 0x41000000, "Failed MOVN on condition true in single precision");
self.assertRegisterEqual(self.MIPS.s1, 0x4000000000000000, "Failed MOVN on condition true in double precision");
self.assertRegisterEqual(self.MIPS.s2, 0x4000000041000000, "Failed MOVN on condition true in paired single precision");
self.assertRegisterEqual(self.MIPS.s3, 0x0, "Failed MOVN on condition false in single precision");
self.assertRegisterEqual(self.MIPS.s4, 0x0, "Failed MOVN on condition false in double precision");
self.assertRegisterEqual(self.MIPS.s5, 0x0, "Failed MOVN on condition false in paired single precision");
self.assertRegisterEqual(self.MIPS.s6, 0x41000000, "Failed MOVZ on condition true in single precision");
self.assertRegisterEqual(self.MIPS.s7, 0x4000000000000000, "Failed MOVZ on condition true in double precision");
self.assertRegisterEqual(self.MIPS.a0, 0x4000000041000000, "Failed MOVZ on condition true in paired single precision");
self.assertRegisterEqual(self.MIPS.a1, 0x0, "Failed MOVZ on condition false in single precision");
self.assertRegisterEqual(self.MIPS.a2, 0x0, "Failed MOVZ on condition false in double precision");
self.assertRegisterEqual(self.MIPS.a3, 0x0, "Failed MOVZ on condition false in paired single precision");
| apache-2.0 | -1,509,914,363,786,523,100 | 56.914894 | 127 | 0.754225 | false |
ai-se/george | Models/usp05.py | 1 | 7552 | """
# The USP05 Data Set
Standard header:
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
"""
@attribute ObjType {FT,PJ,RQ}
@attribute IntComplx {5.0,2.0,1.0,4.0,3.0,3.5,2.5,4.5,NULL}
@attribute DataFile {18.0,9.0,7.0,12.0,2.0,5.0,4.0,3.0,1.0,11.0,0.0,75.0,13.0,6.0,8.0,NULL,32.0}
@attribute DataEn {94.0,240.0,15.0,90.0,314.0,1.0,4.0,3.0,2.0,6.0,0.0,20.0,60.0,30.0,5.0,17.0,10.0,7.0,45.0,48.0,12.0,83.0,150.0,36.0,186.0,9.0,11.0,52.0,25.0,14.0,8.0,NULL,50.0,13.0}
@attribute DataOut {NULL,0.0,1.0,2.0,4.0,20.0,5.0,50.0,12.0,76.0,6.0,69.0,200.0,34.0,108.0,9.0,3.0,8.0,7.0,10.0,18.0,16.0,17.0,13.0,14.0,11.0}
@attribute UFP {NULL,0.0,2.0,3.0,4.0,50.0,46.0,66.0,48.0,36.0,44.0,14.0,8.0,10.0,20.0,25.0,35.0,1.0,6.0,49.0,19.0,64.0,55.0,30.0,180.0,190.0,250.0,1085.0,510.0,210.0,1714.0,11.0,5.0,7.0,17.0,27.0,34.0,154.0,18.0,321.0,90.0,75.0,60.0,40.0,95.0,29.0,23.0,15.0,32.0,31.0,26.0,37.0,12.0,16.0,224.0,22.0,235.0,59.0,147.0,153.0,166.0,137.0,33.0,56.0,57.0,76.0,104.0,105.0}
@attribute AppExpr numeric
@attribute Effort numeric
Data:
"""
def usp05(weighFeature = False,
split="median"):
vl=1;l=2;n=3;h=4;vh=5;xh=6;_=0;
FT=0;PJ=1;RQ=2;NULL=0;
return data(indep= [
# 0..6
'ObjType','IntComplx','DataFile','DataEn','DataOut','UFP','AppExpr'],
less = ['effort'],
_rows=[
[FT,5,18,94,NULL,NULL,4,2.5],
[FT,5,9,240,NULL,NULL,4,2.5],
[FT,2,9,15,0,0,4,2],
[FT,2,9,15,0,0,4,2],
[FT,2,9,15,0,0,5,3.5],
[FT,1,7,90,0,0,4,2],
[FT,2,9,90,0,0,5,2],
[FT,2,9,90,0,0,5,2],
[FT,5,12,314,0,0,5,16],
[FT,2,2,1,1,2,2,1],
[FT,1,2,4,1,0,1,2],
[FT,1,2,4,1,0,1,1],
[FT,4,2,3,1,0,3,5],
[FT,1,2,1,1,0,2,2],
[FT,1,2,1,1,0,2,2],
[FT,1,2,1,1,0,3,3],
[FT,2,5,2,2,0,2,7],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,4,4,1,0,2,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,4,4,1,0,2,1],
[FT,1,2,2,1,0,2,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,4,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,4,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,2,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,3,1,3,1,1],
[FT,1,2,2,1,3,1,1],
[FT,5,3,1,1,0,1,1],
[FT,1,2,1,1,0,1,1],
[FT,1,2,3,1,3,1,1],
[FT,1,2,2,1,3,1,1],
[FT,5,4,1,1,0,5,1],
[FT,1,2,1,1,0,2,1],
[FT,1,4,1,1,0,2,1],
[FT,4,4,6,1,4,4,1],
[FT,1,4,4,1,0,3,1],
[FT,2,4,2,1,0,3,1],
[FT,3,3,2,1,50,2,40],
[FT,2,3,1,1,46,2,40],
[FT,3,1,2,4,66,2,20],
[FT,3,2,1,2,48,2,20],
[FT,2,2,1,1,36,2,10],
[FT,4,2,3,1,44,2,20],
[FT,2,7,3,2,14,2,8],
[FT,3,2,2,1,8,4,3],
[FT,2,2,3,1,10,1,3],
[FT,2,12,0,0,10,1,6],
[FT,4,1,20,20,20,1,10],
[FT,3,5,20,5,25,2,6],
[FT,4,11,60,50,35,1,12],
[FT,1,4,30,12,20,3,8],
[FT,1,0,0,0,1,5,0.5],
[FT,1,0,0,0,1,4,1],
[FT,2,3,2,1,6,1,24],
[FT,1,2,2,0,4,4,0.5],
[FT,1,2,2,0,4,4,0.5],
[FT,1,2,1,0,4,4,0.5],
[FT,1,2,0,2,6,4,0.5],
[FT,3,0,15,1,49,4,24],
[FT,2,0,5,1,19,4,8],
[FT,3,0,20,1,64,4,20],
[FT,2,0,17,1,55,4,4],
[FT,4,0,10,0,30,4,30],
[FT,3,0,7,1,25,4,8],
[FT,3,0,45,0,180,5,5],
[PJ,4,75,48,76,190,4,75],
[PJ,3,13,12,6,250,2,220],
[PJ,3,7,83,69,1085,3,400],
[PJ,3,12,150,200,510,2,100],
[PJ,2,5,36,34,210,4,70],
[PJ,3,12,186,108,1714,3,69],
[RQ,3,5,4,2,10,5,2.5],
[RQ,3,5,4,2,10,5,2.5],
[RQ,3,4,0,9,10,5,2],
[RQ,3,3,7,4,11,5,1.5],
[RQ,2,3,3,2,4,5,2],
[RQ,4,6,6,2,5,5,2.5],
[RQ,3,4,4,4,2,5,2.5],
[RQ,1,9,15,0,0,5,2],
[RQ,1,9,15,0,0,5,1],
[RQ,1,9,15,0,0,5,1],
[RQ,1,9,15,0,0,5,0.5],
[RQ,3,8,1,1,14,3,7],
[RQ,3,8,4,1,14,3,5],
[RQ,3,3,1,1,6,3,15],
[RQ,3,2,3,1,4,2,2],
[RQ,3,3,2,1,8,2,8],
[RQ,1,2,1,1,7,1,2],
[RQ,1,2,1,1,7,1,2],
[RQ,4,5,9,1,8,3,11],
[RQ,4,5,11,1,8,3,11],
[RQ,2,3,2,6,7,2,5],
[RQ,2,3,2,6,8,2,3],
[RQ,3,4,1,4,7,2,3],
[RQ,3,3,9,1,8,3,2],
[RQ,3,3,11,1,5,3,2],
[RQ,2,2,4,1,5,3,2],
[RQ,3,2,4,1,5,2,2],
[RQ,2,3,1,5,17,2,3],
[RQ,5,4,10,3,27,5,20],
[RQ,3,8,2,2,5,3,5],
[RQ,1,1,1,1,0,1,1],
[RQ,1,2,1,5,2,2,1],
[RQ,1,1,1,8,0,1,1],
[RQ,5,1,3,1,34,2,20],
[RQ,2,2,1,1,36,2,10],
[RQ,4,13,3,1,154,2,30],
[RQ,2,1,2,0,18,2,10],
[RQ,3.5,6,52,7,321,3.5,20],
[RQ,2.5,3,4,1,14,1,15],
[RQ,3.5,4,5,10,30,1,20],
[RQ,3.5,2,3,1,14,1,20],
[RQ,3.5,2,30,18,90,2,15],
[RQ,4,2,25,16,75,1,15],
[RQ,4.5,5,7,5,30,1,40],
[RQ,2,2,3,2,10,1,3],
[RQ,4,2,25,16,75,1,15],
[RQ,3,2,3,1,14,1,20],
[RQ,4,4,25,12,50,4,10],
[RQ,2,2,20,10,60,2,6],
[RQ,3,1,14,8,40,3,8],
[RQ,3,1,8,10,35,3,8],
[RQ,4,12,2,20,95,1,12],
[RQ,2,2,4,10,30,2,10],
[RQ,2,3,1,1,5,4,8],
[RQ,1,0,0,0,1,4,2],
[RQ,1,1,0,0,2,5,1],
[RQ,1,0,0,0,1,5,1.5],
[RQ,5,3,17,17,29,5,25],
[RQ,5,3,17,17,29,5,9],
[RQ,4,1,5,2,10,5,15],
[RQ,3,3,17,17,23,5,2],
[RQ,3,0,3,3,4,2,5],
[RQ,5,2,2,1,4,5,45],
[RQ,4,3,11,1,19,5,35],
[RQ,5,3,4,4,14,5,50],
[RQ,5,2,2,2,5,5,25],
[RQ,5,1,3,3,10,5,35],
[RQ,4,2,2,2,7,5,20],
[RQ,3,3,9,4,20,5,25],
[RQ,3,3,1,1,6,4,10],
[RQ,2,3,2,1,6,4,33],
[RQ,4,3,8,1,14,4,24],
[RQ,4,3,9,1,15,4,36],
[RQ,1,1,1,0,6,4,1],
[RQ,1,1,2,0,4,4,1],
[RQ,4,0,4,2,4,4,1],
[RQ,3,2,4,10,32,4,2],
[RQ,3,3,12,4,31,4,2],
[RQ,5,4,9,6,26,4,2],
[RQ,2,1,9,9,23,4,1],
[RQ,1,1,9,9,37,4,1],
[RQ,1,1,12,0,18,4,1],
[RQ,2,1,1,0,20,4,1],
[RQ,2,1,12,0,36,4,1],
[RQ,3,2,1,0,4,4,1],
[RQ,3,2,1,0,4,4,1],
[RQ,2,2,10,0,12,4,1],
[RQ,2,2,10,10,10,4,1],
[RQ,3,1,12,12,10,4,1],
[RQ,1,0,0,0,6,4,0.5],
[RQ,1,0,0,12,8,4,0.5],
[RQ,NULL,NULL,NULL,NULL,NULL,4,8],
[RQ,2,0,4,1,16,4,6],
[RQ,2,0,5,1,19,4,6],
[RQ,4,0,5,1,19,4,4],
[RQ,2,0,1,1,7,4,1],
[RQ,1,1,3,0,16,1,4],
[RQ,2,0,1,0,3,4,6],
[RQ,4,32,0,0,224,1,12],
[RQ,3,NULL,NULL,NULL,NULL,1,6],
[RQ,1,1,10,0,7,5,6],
[RQ,2,0,6,1,22,4,4],
[RQ,2,0,6,1,22,4,4],
[RQ,2,3,50,1,235,3,7],
[RQ,2,1,3,1,27,3,2],
[RQ,3,3,6,1,59,3,3],
[RQ,2,1,2,1,23,3,3],
[RQ,2,3,13,13,147,3,4],
[RQ,3,4,12,13,153,3,5],
[RQ,4,4,14,14,166,3,6],
[RQ,2,2,13,13,137,3,2],
[RQ,3,2,2,1,33,3,6],
[RQ,2,1,4,1,31,3,2],
[RQ,1,1,4,4,46,3,1],
[RQ,3,2,4,4,56,3,4],
[RQ,4,3,3,3,57,3,4],
[RQ,3,2,4,8,76,3,3],
[RQ,1,2,1,1,29,3,2],
[RQ,3,3,6,10,104,3,5],
[RQ,2,1,0,8,50,3,3],
[RQ,1,5,0,11,105,2,0.5]
],
_tunings =[[
# vlow low nom high vhigh xhigh
#scale factors:
'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _ ],[
'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _ ],[
'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _ ],[
'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _ ],[
'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _ ]],
weighFeature = weighFeature,
_split = split,
_isCocomo = False,
ignores=[5]
)
"""
Demo code:
"""
def _usp05(): print(usp05())
#if __name__ == '__main__': eval(todo('_nasa93()')) | mit | -1,131,610,790,422,415,400 | 28.73622 | 366 | 0.431674 | false |
V-FEXrt/Pokemon-Spoof-Plus | CableClub/cable_club_trade_center.py | 1 | 1950 | from AI.team_manager import TeamManager
from cable_club_constants import TradeCenterState, Com
def reset():
global tradeCenterState, counter, eat_byte, ate_byte, choice_byte
tradeCenterState = TradeCenterState.CHOOSING_TRADE
counter = 416
eat_byte = False
ate_byte = 0x0
choice_byte = 0
reset()
def set_reset_callback(func):
global reset_to_init
reset_to_init = func
def choosing_trade_process(byte):
global counter, tradeCenterState, eat_byte, choice_byte
## Eat 'random' 96 byte
if byte == 96 and counter > 0:
counter = 0
return byte
if byte >= 96 and byte <= 101:
# TODO: 'seen first wait' solves this eating bytes problem better. Should use it instead
if not eat_byte:
choice_byte = TeamManager.trade_center.offerIndex(byte)
eat_byte = True
return choice_byte
if eat_byte:
tradeCenterState = TradeCenterState.CONFIRMING_TRADE
eat_byte = False
return byte
return byte
def confirming_trade_process(byte):
global tradeCenterState, eat_byte, ate_byte, counter
if byte == 97 or byte == 98:
eat_byte = True
ate_byte = byte
return byte
if eat_byte:
eat_byte = False
if ate_byte == 97:
# Cancelled by partner
tradeCenterState = TradeCenterState.CHOOSING_TRADE
print "Trade cancelled by Player"
if ate_byte == 98:
# Confirmed by partner
print "Trade confirmed by Player"
reset_to_init()
reset()
TeamManager.trade_center.trade_confirmed()
return byte
functionSwitch = [choosing_trade_process, confirming_trade_process]
def trade_center_process_byte(byte):
if (tradeCenterState >= len(functionSwitch)):
print "Warning: no function for Trade Center State"
return byte
return functionSwitch[tradeCenterState](byte)
| mit | -6,544,293,436,902,257,000 | 25.712329 | 96 | 0.638974 | false |
crs4/omero.biobank | test/kb/test_individual.py | 1 | 2096 | # BEGIN_COPYRIGHT
# END_COPYRIGHT
import os, unittest, logging
logging.basicConfig(level=logging.ERROR)
from bl.vl.kb import KnowledgeBase as KB
from kb_object_creator import KBObjectCreator
OME_HOST = os.getenv("OME_HOST", "localhost")
OME_USER = os.getenv("OME_USER", "root")
OME_PASS = os.getenv("OME_PASS", "romeo")
class TestKB(KBObjectCreator):
def __init__(self, name):
super(TestKB, self).__init__(name)
self.kill_list = []
def setUp(self):
self.kb = KB(driver='omero')(OME_HOST, OME_USER, OME_PASS)
def tearDown(self):
self.kill_list.reverse()
for x in self.kill_list:
self.kb.delete(x)
self.kill_list = []
def check_object(self, o, conf, otype):
try:
self.assertTrue(isinstance(o, otype))
for k in conf.keys():
v = conf[k]
# FIXME this is omero specific...
if hasattr(v, 'ome_obj'):
self.assertEqual(getattr(o, k).id, v.id)
self.assertEqual(type(getattr(o, k)), type(v))
elif hasattr(v, '_id'):
self.assertEqual(getattr(o, k)._id, v._id)
else:
self.assertEqual(getattr(o, k), v)
except:
pass
def test_individual(self):
conf, i = self.create_individual()
self.kill_list.append(i.save())
self.check_object(i, conf, self.kb.Individual)
def test_enrollment(self):
conf, e = self.create_enrollment()
self.kill_list.append(e.save())
self.check_object(e, conf, self.kb.Enrollment)
def test_enrollment_ops(self):
conf, e = self.create_enrollment()
e.save()
study = e.study
xe = self.kb.get_enrollment(study, conf['studyCode'])
self.assertTrue(not xe is None)
self.assertEqual(xe.id, e.id)
self.kb.delete(e)
self.assertEqual(self.kb.get_enrollment(study, conf['studyCode']), None)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestKB('test_individual'))
suite.addTest(TestKB('test_enrollment'))
suite.addTest(TestKB('test_enrollment_ops'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run((suite()))
| gpl-2.0 | -3,798,257,073,794,676,700 | 25.871795 | 76 | 0.640744 | false |
wger-project/wger | wger/exercises/tests/test_categories.py | 1 | 3817 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Django
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from django.urls import reverse
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
WgerAccessTestCase,
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
WgerTestCase,
)
from wger.exercises.models import ExerciseCategory
class ExerciseCategoryRepresentationTestCase(WgerTestCase):
"""
Test the representation of a model
"""
def test_representation(self):
"""
Test that the representation of an object is correct
"""
self.assertEqual("{0}".format(ExerciseCategory.objects.get(pk=1)), 'Category')
class CategoryOverviewTestCase(WgerAccessTestCase):
"""
Test that only admins see the edit links
"""
url = 'exercise:category:list'
anonymous_fail = True
user_success = 'admin'
user_fail = (
'manager1',
'manager2'
'general_manager1',
'manager3',
'manager4',
'test',
'member1',
'member2',
'member3',
'member4',
'member5',
)
class DeleteExerciseCategoryTestCase(WgerDeleteTestCase):
"""
Exercise category delete test case
"""
object_class = ExerciseCategory
url = 'exercise:category:delete'
pk = 4
user_success = 'admin'
user_fail = 'test'
class EditExerciseCategoryTestCase(WgerEditTestCase):
"""
Tests editing an exercise category
"""
object_class = ExerciseCategory
url = 'exercise:category:edit'
pk = 3
data = {'name': 'A different name'}
class AddExerciseCategoryTestCase(WgerAddTestCase):
"""
Tests adding an exercise category
"""
object_class = ExerciseCategory
url = 'exercise:category:add'
data = {'name': 'A new category'}
class ExerciseCategoryCacheTestCase(WgerTestCase):
"""
Cache test case
"""
def test_overview_cache_update(self):
"""
Test that the template cache for the overview is correctly reseted when
performing certain operations
"""
self.client.get(reverse('exercise:exercise:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
old_exercise_overview = cache.get(make_template_fragment_key('exercise-overview', [2]))
category = ExerciseCategory.objects.get(pk=2)
category.name = 'Cool category'
category.save()
self.assertFalse(cache.get(make_template_fragment_key('exercise-overview', [2])))
self.client.get(reverse('exercise:exercise:overview'))
self.client.get(reverse('exercise:muscle:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
new_exercise_overview = cache.get(make_template_fragment_key('exercise-overview', [2]))
self.assertNotEqual(old_exercise_overview, new_exercise_overview)
class ExerciseCategoryApiTestCase(api_base_test.ApiBaseResourceTestCase):
"""
Tests the exercise category overview resource
"""
pk = 2
resource = ExerciseCategory
private_resource = False
| agpl-3.0 | 3,603,420,777,804,712,400 | 27.066176 | 95 | 0.681163 | false |
rgayon/plaso | plaso/winnt/known_folder_ids.py | 1 | 17463 | # -*- coding: utf-8 -*-
"""This file contains the Windows NT Known Folder identifier definitions."""
from __future__ import unicode_literals
# For now ignore the line too long errors.
# pylint: disable=line-too-long
# For now copied from:
# https://code.google.com/p/libfwsi/wiki/KnownFolderIdentifiers
# TODO: store these in a database or equiv.
DESCRIPTIONS = {
'008ca0b1-55b4-4c56-b8a8-4de4b299d3be': 'Account Pictures',
'00bcfc5a-ed94-4e48-96a1-3f6217f21990': 'Roaming Tiles',
'0139d44e-6afe-49f2-8690-3dafcae6ffb8': '(Common) Programs',
'0482af6c-08f1-4c34-8c90-e17ec98b1e17': 'Public Account Pictures',
'054fae61-4dd8-4787-80b6-090220c4b700': 'Game Explorer (Game Tasks)',
'0762d272-c50a-4bb0-a382-697dcd729b80': 'Users (User Profiles)',
'0ac0837c-bbf8-452a-850d-79d08e667ca7': 'Computer (My Computer)',
'0d4c3db6-03a3-462f-a0e6-08924c41b5d4': 'History',
'0f214138-b1d3-4a90-bba9-27cbc0c5389a': 'Sync Setup',
'15ca69b3-30ee-49c1-ace1-6b5ec372afb5': 'Sample Playlists',
'1777f761-68ad-4d8a-87bd-30b759fa33dd': 'Favorites',
'18989b1d-99b5-455b-841c-ab7c74e4ddfc': 'Videos (My Video)',
'190337d1-b8ca-4121-a639-6d472d16972a': 'Search Results (Search Home)',
'1a6fdba2-f42d-4358-a798-b74d745926c5': 'Recorded TV',
'1ac14e77-02e7-4e5d-b744-2eb1ae5198b7': 'System32 (System)',
'1b3ea5dc-b587-4786-b4ef-bd1dc332aeae': 'Libraries',
'1e87508d-89c2-42f0-8a7e-645a0f50ca58': 'Applications',
'2112ab0a-c86a-4ffe-a368-0de96e47012e': 'Music',
'2400183a-6185-49fb-a2d8-4a392a602ba3': 'Public Videos (Common Video)',
'24d89e24-2f19-4534-9dde-6a6671fbb8fe': 'One Drive Documents',
'289a9a43-be44-4057-a41b-587a76d7e7f9': 'Sync Results',
'2a00375e-224c-49de-b8d1-440df7ef3ddc': 'Localized Resources (Directory)',
'2b0f765d-c0e9-4171-908e-08a611b84ff6': 'Cookies',
'2c36c0aa-5812-4b87-bfd0-4cd0dfb19b39': 'Original Images',
'3214fab5-9757-4298-bb61-92a9deaa44ff': 'Public Music (Common Music)',
'339719b5-8c47-4894-94c2-d8f77add44a6': 'One Drive Pictures',
'33e28130-4e1e-4676-835a-98395c3bc3bb': 'Pictures (My Pictures)',
'352481e8-33be-4251-ba85-6007caedcf9d': 'Internet Cache (Temporary Internet Files)',
'374de290-123f-4565-9164-39c4925e467b': 'Downloads',
'3d644c9b-1fb8-4f30-9b45-f670235f79c0': 'Public Downloads (Common Downloads)',
'3eb685db-65f9-4cf6-a03a-e3ef65729f3d': 'Roaming Application Data (Roaming)',
'43668bf8-c14e-49b2-97c9-747784d784b7': 'Sync Center (Sync Manager)',
'48daf80b-e6cf-4f4e-b800-0e69d84ee384': 'Libraries',
'491e922f-5643-4af4-a7eb-4e7a138d8174': 'Videos',
'4bd8d571-6d19-48d3-be97-422220080e43': 'Music (My Music)',
'4bfefb45-347d-4006-a5be-ac0cb0567192': 'Conflicts',
'4c5c32ff-bb9d-43b0-b5b4-2d72e54eaaa4': 'Saved Games',
'4d9f7874-4e0c-4904-967b-40b0d20c3e4b': 'Internet (The Internet)',
'52528a6b-b9e3-4add-b60d-588c2dba842d': 'Homegroup',
'52a4f021-7b75-48a9-9f6b-4b87a210bc8f': 'Quick Launch',
'56784854-c6cb-462b-8169-88e350acb882': 'Contacts',
'5b3749ad-b49f-49c1-83eb-15370fbd4882': 'Tree Properties',
'5cd7aee2-2219-4a67-b85d-6c9ce15660cb': 'Programs',
'5ce4a5e9-e4eb-479d-b89f-130c02886155': 'Device Metadata Store',
'5e6c858f-0e22-4760-9afe-ea3317b67173': 'Profile (User\'s name)',
'625b53c3-ab48-4ec1-ba1f-a1ef4146fc19': 'Start Menu',
'62ab5d82-fdc1-4dc3-a9dd-070d1d495d97': 'Program Data',
'6365d5a7-0f0d-45e5-87f6-0da56b6a4f7d': 'Common Files (x64)',
'69d2cf90-fc33-4fb7-9a0c-ebb0f0fcb43c': 'Slide Shows (Photo Albums)',
'6d809377-6af0-444b-8957-a3773f02200e': 'Program Files (x64)',
'6f0cd92b-2e97-45d1-88ff-b0d186b8dedd': 'Network Connections',
'724ef170-a42d-4fef-9f26-b60e846fba4f': 'Administrative Tools',
'767e6811-49cb-4273-87c2-20f355e1085b': 'One Drive Camera Roll',
'76fc4e2d-d6ad-4519-a663-37bd56068185': 'Printers',
'7b0db17d-9cd2-4a93-9733-46cc89022e7c': 'Documents',
'7b396e54-9ec5-4300-be0a-2482ebae1a26': 'Default Gadgets (Sidebar Default Parts)',
'7c5a40ef-a0fb-4bfc-874a-c0f2e0b9fa8e': 'Program Files (x86)',
'7d1d3a04-debb-4115-95cf-2f29da2920da': 'Saved Searches (Searches)',
'7e636bfe-dfa9-4d5e-b456-d7b39851d8a9': 'Templates',
'82a5ea35-d9cd-47c5-9629-e15d2f714e6e': '(Common) Startup',
'82a74aeb-aeb4-465c-a014-d097ee346d63': 'Control Panel',
'859ead94-2e85-48ad-a71a-0969cb56a6cd': 'Sample Videos',
'8983036c-27c0-404b-8f08-102d10dcfd74': 'Send To',
'8ad10c31-2adb-4296-a8f7-e4701232c972': 'Resources (Resources Directory)',
'905e63b6-c1bf-494e-b29c-65b732d3d21a': 'Program Files',
'9274bd8d-cfd1-41c3-b35e-b13f55a758f4': 'Printer Shortcuts (PrintHood)',
'98ec0e18-2098-4d44-8644-66979315a281': 'Microsoft Office Outlook (MAPI)',
'9b74b6a3-0dfd-4f11-9e78-5f7800f2e772': 'User\'s name',
'9e3995ab-1f9c-4f13-b827-48b24b6c7174': 'User Pinned',
'9e52ab10-f80d-49df-acb8-4330f5687855': 'Temporary Burn Folder (CD Burning)',
'a302545d-deff-464b-abe8-61c8648d939b': 'Libraries',
'a305ce99-f527-492b-8b1a-7e76fa98d6e4': 'Installed Updates (Application Updates)',
'a3918781-e5f2-4890-b3d9-a7e54332328c': 'Application Shortcuts',
'a4115719-d62e-491d-aa7c-e74b8be3b067': '(Common) Start Menu',
'a520a1a4-1780-4ff6-bd18-167343c5af16': 'Local Application Data Low (Local Low)',
'a52bba46-e9e1-435f-b3d9-28daa648c0f6': 'One Drive',
'a63293e8-664e-48db-a079-df759e0509f7': 'Templates',
'a75d362e-50fc-4fb7-ac2c-a8beaa314493': 'Gadgets (Sidebar Parts)',
'a77f5d77-2e2b-44c3-a6a2-aba601054a51': 'Programs',
'a990ae9f-a03b-4e80-94bc-9912d7504104': 'Pictures',
'aaa8d5a5-f1d6-4259-baa8-78e7ef60835e': 'Roamed Tile Images',
'ab5fb87b-7ce2-4f83-915d-550846c9537b': 'Camera Roll',
'ae50c081-ebd2-438a-8655-8a092e34987a': 'Recent (Recent Items)',
'b250c668-f57d-4ee1-a63c-290ee7d1aa1f': 'Sample Music',
'b4bfcc3a-db2c-424c-b029-7fe99a87c641': 'Desktop',
'b6ebfb86-6907-413c-9af7-4fc2abf07cc5': 'Public Pictures (Common Pictures)',
'b7534046-3ecb-4c18-be4e-64cd4cb7d6ac': 'Recycle Bin (Bit Bucket)',
'b7bede81-df94-4682-a7d8-57a52620b86f': 'Screenshots',
'b94237e7-57ac-4347-9151-b08c6c32d1f7': '(Common) Templates',
'b97d20bb-f46a-4c97-ba10-5e3608430854': 'Startup',
'bcb5256f-79f6-4cee-b725-dc34e402fd46': 'Implicit Application Shortcuts',
'bcbd3057-ca5c-4622-b42d-bc56db0ae516': 'Programs',
'bd85e001-112e-431e-983b-7b15ac09fff1': 'Recorded TV',
'bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968': 'Links',
'c1bae2d0-10df-4334-bedd-7aa20b227a9d': '(Common) OEM Links',
'c4900540-2379-4c75-844b-64e6faf8716b': 'Sample Pictures',
'c4aa340d-f20f-4863-afef-f87ef2e6ba25': 'Public Desktop (Common Desktop)',
'c5abbf53-e17f-4121-8900-86626fc2c973': 'Network Shortcuts (NetHood)',
'c870044b-f49e-4126-a9c3-b52a1ff411e8': 'Ringtones',
'cac52c1a-b53d-4edc-92d7-6b2e8ac19434': 'Games',
'd0384e7d-bac3-4797-8f14-cba229b392b5': '(Common) Administrative Tools',
'd20beec4-5ca8-4905-ae3b-bf251ea09b53': 'Network (Places)',
'd65231b0-b2f1-4857-a4ce-a8e7c6ea7d27': 'System32 (x86)',
'd9dc8a3b-b784-432e-a781-5a1130a75963': 'History',
'de61d971-5ebc-4f02-a3a9-6c82895e5c04': 'Add New Programs (Get Programs)',
'de92c1c7-837f-4f69-a3bb-86e631204a23': 'Playlists',
'de974d24-d9c6-4d3e-bf91-f4455120b917': 'Common Files (x86)',
'debf2536-e1a8-4c59-b6a2-414586476aea': 'Game Explorer (Public Game Tasks)',
'df7266ac-9274-4867-8d55-3bd661de872d': 'Programs and Features (Change and Remove Programs)',
'dfdf76a2-c82a-4d63-906a-5644ac457385': 'Public',
'e555ab60-153b-4d17-9f04-a5fe99fc15ec': 'Ringtones',
'ed4824af-dce4-45a8-81e2-fc7965083634': 'Public Documents (Common Documents)',
'ee32e446-31ca-4aba-814f-a5ebd2fd6d5e': 'Offline Files (CSC)',
'f1b32785-6fba-4fcf-9d55-7b8e7f157091': 'Local Application Data',
'f38bf404-1d43-42f2-9305-67de0b28fc23': 'Windows',
'f3ce0f7c-4901-4acc-8648-d5d44b04ef8f': 'User\'s Files',
'f7f1ed05-9f6d-47a2-aaae-29d317c6f066': 'Common Files',
'fd228cb7-ae11-4ae3-864c-16f3910ab8fe': 'Fonts',
'fdd39ad0-238f-46af-adb4-6c85480369c7': 'Documents (Personal)',
}
PATHS = {
'008ca0b1-55b4-4c56-b8a8-4de4b299d3be': '%APPDATA%\\Microsoft\\Windows\\AccountPictures',
'00bcfc5a-ed94-4e48-96a1-3f6217f21990': '%LOCALAPPDATA%\\Microsoft\\Windows\\RoamingTiles',
'0139d44e-6afe-49f2-8690-3dafcae6ffb8': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs',
'0482af6c-08f1-4c34-8c90-e17ec98b1e17': '%PUBLIC%\\AccountPictures',
'054fae61-4dd8-4787-80b6-090220c4b700': '%LOCALAPPDATA%\\Microsoft\\Windows\\GameExplorer',
'0762d272-c50a-4bb0-a382-697dcd729b80': '%SYSTEMDRIVE%\\Users',
'0ac0837c-bbf8-452a-850d-79d08e667ca7': '',
'0d4c3db6-03a3-462f-a0e6-08924c41b5d4': '%LOCALAPPDATA%\\Microsoft\\Windows\\ConnectedSearch\\History',
'0f214138-b1d3-4a90-bba9-27cbc0c5389a': '',
'15ca69b3-30ee-49c1-ace1-6b5ec372afb5': '%PUBLIC%\\Music\\Sample Playlists',
'1777f761-68ad-4d8a-87bd-30b759fa33dd': '%USERPROFILE%\\Favorites',
'18989b1d-99b5-455b-841c-ab7c74e4ddfc': '%USERPROFILE%\\Videos',
'190337d1-b8ca-4121-a639-6d472d16972a': '',
'1a6fdba2-f42d-4358-a798-b74d745926c5': '%PUBLIC%\\RecordedTV.library-ms',
'1ac14e77-02e7-4e5d-b744-2eb1ae5198b7': '%WINDIR%\\System32',
'1b3ea5dc-b587-4786-b4ef-bd1dc332aeae': '%APPDATA%\\Microsoft\\Windows\\Libraries',
'1e87508d-89c2-42f0-8a7e-645a0f50ca58': '',
'2112ab0a-c86a-4ffe-a368-0de96e47012e': '%APPDATA%\\Microsoft\\Windows\\Libraries\\Music.library-ms',
'2400183a-6185-49fb-a2d8-4a392a602ba3': '%PUBLIC%\\Videos',
'24d89e24-2f19-4534-9dde-6a6671fbb8fe': '%USERPROFILE%\\OneDrive\\Documents',
'289a9a43-be44-4057-a41b-587a76d7e7f9': '',
'2a00375e-224c-49de-b8d1-440df7ef3ddc': '%WINDIR%\\resources\\%CODEPAGE%',
'2b0f765d-c0e9-4171-908e-08a611b84ff6': '%APPDATA%\\Microsoft\\Windows\\Cookies',
'2c36c0aa-5812-4b87-bfd0-4cd0dfb19b39': '%LOCALAPPDATA%\\Microsoft\\Windows Photo Gallery\\Original Images',
'3214fab5-9757-4298-bb61-92a9deaa44ff': '%PUBLIC%\\Music',
'339719b5-8c47-4894-94c2-d8f77add44a6': '%USERPROFILE%\\OneDrive\\Pictures',
'33e28130-4e1e-4676-835a-98395c3bc3bb': '%USERPROFILE%\\Pictures',
'352481e8-33be-4251-ba85-6007caedcf9d': '%LOCALAPPDATA%\\Microsoft\\Windows\\Temporary Internet Files',
'374de290-123f-4565-9164-39c4925e467b': '%USERPROFILE%\\Downloads',
'3d644c9b-1fb8-4f30-9b45-f670235f79c0': '%PUBLIC%\\Downloads',
'3eb685db-65f9-4cf6-a03a-e3ef65729f3d': '%USERPROFILE%\\AppData\\Roaming',
'43668bf8-c14e-49b2-97c9-747784d784b7': '',
'48daf80b-e6cf-4f4e-b800-0e69d84ee384': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Libraries',
'491e922f-5643-4af4-a7eb-4e7a138d8174': '%APPDATA%\\Microsoft\\Windows\\Libraries\\Videos.library-ms',
'4bd8d571-6d19-48d3-be97-422220080e43': '%USERPROFILE%\\Music',
'4bfefb45-347d-4006-a5be-ac0cb0567192': '',
'4c5c32ff-bb9d-43b0-b5b4-2d72e54eaaa4': '%USERPROFILE%\\Saved Games',
'4d9f7874-4e0c-4904-967b-40b0d20c3e4b': '',
'52528a6b-b9e3-4add-b60d-588c2dba842d': '',
'52a4f021-7b75-48a9-9f6b-4b87a210bc8f': '%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch',
'56784854-c6cb-462b-8169-88e350acb882': '',
'5b3749ad-b49f-49c1-83eb-15370fbd4882': '',
'5cd7aee2-2219-4a67-b85d-6c9ce15660cb': '%LOCALAPPDATA%\\Programs',
'5ce4a5e9-e4eb-479d-b89f-130c02886155': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\DeviceMetadataStore',
'5e6c858f-0e22-4760-9afe-ea3317b67173': '%SYSTEMDRIVE%\\Users\\%USERNAME%',
'625b53c3-ab48-4ec1-ba1f-a1ef4146fc19': '%APPDATA%\\Microsoft\\Windows\\Start Menu',
'62ab5d82-fdc1-4dc3-a9dd-070d1d495d97': '%SYSTEMDRIVE%\\ProgramData',
'6365d5a7-0f0d-45e5-87f6-0da56b6a4f7d': '%PROGRAMFILES%\\Common Files',
'69d2cf90-fc33-4fb7-9a0c-ebb0f0fcb43c': '%USERPROFILE%\\Pictures\\Slide Shows',
'6d809377-6af0-444b-8957-a3773f02200e': '%SYSTEMDRIVE%\\Program Files',
'6f0cd92b-2e97-45d1-88ff-b0d186b8dedd': '',
'724ef170-a42d-4fef-9f26-b60e846fba4f': '%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs\\Administrative Tools',
'767e6811-49cb-4273-87c2-20f355e1085b': '%USERPROFILE%\\OneDrive\\Pictures\\Camera Roll',
'76fc4e2d-d6ad-4519-a663-37bd56068185': '',
'7b0db17d-9cd2-4a93-9733-46cc89022e7c': '%APPDATA%\\Microsoft\\Windows\\Libraries\\Documents.library-ms',
'7b396e54-9ec5-4300-be0a-2482ebae1a26': '%PROGRAMFILES%\\Windows Sidebar\\Gadgets',
'7c5a40ef-a0fb-4bfc-874a-c0f2e0b9fa8e': '%PROGRAMFILES% (%SYSTEMDRIVE%\\Program Files)',
'7d1d3a04-debb-4115-95cf-2f29da2920da': '%USERPROFILE%\\Searches',
'7e636bfe-dfa9-4d5e-b456-d7b39851d8a9': '%LOCALAPPDATA%\\Microsoft\\Windows\\ConnectedSearch\\Templates',
'82a5ea35-d9cd-47c5-9629-e15d2f714e6e': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp',
'82a74aeb-aeb4-465c-a014-d097ee346d63': '',
'859ead94-2e85-48ad-a71a-0969cb56a6cd': '%PUBLIC%\\Videos\\Sample Videos',
'8983036c-27c0-404b-8f08-102d10dcfd74': '%APPDATA%\\Microsoft\\Windows\\SendTo',
'8ad10c31-2adb-4296-a8f7-e4701232c972': '%WINDIR%\\Resources',
'905e63b6-c1bf-494e-b29c-65b732d3d21a': '%SYSTEMDRIVE%\\Program Files',
'9274bd8d-cfd1-41c3-b35e-b13f55a758f4': '%APPDATA%\\Microsoft\\Windows\\Printer Shortcuts',
'98ec0e18-2098-4d44-8644-66979315a281': '',
'9b74b6a3-0dfd-4f11-9e78-5f7800f2e772': '',
'9e3995ab-1f9c-4f13-b827-48b24b6c7174': '%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch\\User Pinned',
'9e52ab10-f80d-49df-acb8-4330f5687855': '%LOCALAPPDATA%\\Microsoft\\Windows\\Burn\\Burn',
'a302545d-deff-464b-abe8-61c8648d939b': '',
'a305ce99-f527-492b-8b1a-7e76fa98d6e4': '',
'a3918781-e5f2-4890-b3d9-a7e54332328c': '%LOCALAPPDATA%\\Microsoft\\Windows\\Application Shortcuts',
'a4115719-d62e-491d-aa7c-e74b8be3b067': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu',
'a520a1a4-1780-4ff6-bd18-167343c5af16': '%USERPROFILE%\\AppData\\LocalLow',
'a52bba46-e9e1-435f-b3d9-28daa648c0f6': '%USERPROFILE%\\OneDrive',
'a63293e8-664e-48db-a079-df759e0509f7': '%APPDATA%\\Microsoft\\Windows\\Templates',
'a75d362e-50fc-4fb7-ac2c-a8beaa314493': '%LOCALAPPDATA%\\Microsoft\\Windows Sidebar\\Gadgets',
'a77f5d77-2e2b-44c3-a6a2-aba601054a51': '%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs',
'a990ae9f-a03b-4e80-94bc-9912d7504104': '%APPDATA%\\Microsoft\\Windows\\Libraries\\Pictures.library-ms',
'aaa8d5a5-f1d6-4259-baa8-78e7ef60835e': '%LOCALAPPDATA%\\Microsoft\\Windows\\RoamedTileImages',
'ab5fb87b-7ce2-4f83-915d-550846c9537b': '%USERPROFILE%\\Pictures\\Camera Roll',
'ae50c081-ebd2-438a-8655-8a092e34987a': '%APPDATA%\\Microsoft\\Windows\\Recent',
'b250c668-f57d-4ee1-a63c-290ee7d1aa1f': '%PUBLIC%\\Music\\Sample Music',
'b4bfcc3a-db2c-424c-b029-7fe99a87c641': '%USERPROFILE%\\Desktop',
'b6ebfb86-6907-413c-9af7-4fc2abf07cc5': '%PUBLIC%\\Pictures',
'b7534046-3ecb-4c18-be4e-64cd4cb7d6ac': '',
'b7bede81-df94-4682-a7d8-57a52620b86f': '%USERPROFILE%\\Pictures\\Screenshots',
'b94237e7-57ac-4347-9151-b08c6c32d1f7': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Templates',
'b97d20bb-f46a-4c97-ba10-5e3608430854': '%APPDATA%\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp',
'bcb5256f-79f6-4cee-b725-dc34e402fd46': '%APPDATA%\\Microsoft\\Internet Explorer\\Quick Launch\\User Pinned\\ImplicitAppShortcuts',
'bcbd3057-ca5c-4622-b42d-bc56db0ae516': '%LOCALAPPDATA%\\Programs\\Common',
'bd85e001-112e-431e-983b-7b15ac09fff1': '',
'bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968': '%USERPROFILE%\\Links',
'c1bae2d0-10df-4334-bedd-7aa20b227a9d': '%ALLUSERSPROFILE%\\OEM Links',
'c4900540-2379-4c75-844b-64e6faf8716b': '%PUBLIC%\\Pictures\\Sample Pictures',
'c4aa340d-f20f-4863-afef-f87ef2e6ba25': '%PUBLIC%\\Desktop',
'c5abbf53-e17f-4121-8900-86626fc2c973': '%APPDATA%\\Microsoft\\Windows\\Network Shortcuts',
'c870044b-f49e-4126-a9c3-b52a1ff411e8': '%LOCALAPPDATA%\\Microsoft\\Windows\\Ringtones',
'cac52c1a-b53d-4edc-92d7-6b2e8ac19434': '',
'd0384e7d-bac3-4797-8f14-cba229b392b5': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Start Menu\\Programs\\Administrative Tools',
'd20beec4-5ca8-4905-ae3b-bf251ea09b53': '',
'd65231b0-b2f1-4857-a4ce-a8e7c6ea7d27': '%WINDIR%\\system32',
'd9dc8a3b-b784-432e-a781-5a1130a75963': '%LOCALAPPDATA%\\Microsoft\\Windows\\History',
'de61d971-5ebc-4f02-a3a9-6c82895e5c04': '',
'de92c1c7-837f-4f69-a3bb-86e631204a23': '%USERPROFILE%\\Music\\Playlists',
'de974d24-d9c6-4d3e-bf91-f4455120b917': '%PROGRAMFILES%\\Common Files',
'debf2536-e1a8-4c59-b6a2-414586476aea': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\GameExplorer',
'df7266ac-9274-4867-8d55-3bd661de872d': '',
'dfdf76a2-c82a-4d63-906a-5644ac457385': '%SYSTEMDRIVE%\\Users\\Public',
'e555ab60-153b-4d17-9f04-a5fe99fc15ec': '%ALLUSERSPROFILE%\\Microsoft\\Windows\\Ringtones',
'ed4824af-dce4-45a8-81e2-fc7965083634': '%PUBLIC%\\Documents',
'ee32e446-31ca-4aba-814f-a5ebd2fd6d5e': '',
'f1b32785-6fba-4fcf-9d55-7b8e7f157091': '%USERPROFILE%\\AppData\\Local',
'f38bf404-1d43-42f2-9305-67de0b28fc23': '%WINDIR%',
'f3ce0f7c-4901-4acc-8648-d5d44b04ef8f': '',
'f7f1ed05-9f6d-47a2-aaae-29d317c6f066': '%PROGRAMFILES%\\Common Files',
'fd228cb7-ae11-4ae3-864c-16f3910ab8fe': '%WINDIR%\\Fonts',
'fdd39ad0-238f-46af-adb4-6c85480369c7': '%USERPROFILE%\\Documents',
}
| apache-2.0 | -3,370,271,264,363,562,000 | 66.949416 | 135 | 0.721354 | false |
ke4roh/RPiNWR | tests/test_cache.py | 1 | 16156 | # -*- coding: utf-8 -*-
__author__ = 'ke4roh'
# Copyright © 2016 James E. Scarborough
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from RPiNWR.SAME import *
from RPiNWR.cache import *
from RPiNWR.VTEC import *
import pickle
import os
class TestCache(unittest.TestCase):
def test_buffer_for_radio_against_storm_system(self):
# Test to see that the correct events are reported in priority order as a storm progresses
# This test is a little long in this file, but it's somewhat readable.
alerts = [SAMEMessage("WXL58", x) for x in [
"-WXR-SVR-037183+0045-1232003-KRAH/NWS-",
"-WXR-SVR-037151+0030-1232003-KRAH/NWS-",
"-WXR-SVR-037037+0045-1232023-KRAH/NWS-",
"-WXR-SVR-037001-037151+0100-1232028-KRAH/NWS-",
"-WXR-SVR-037069-037077-037183+0045-1232045-KRAH/NWS-",
"-WXR-SVR-037001+0045-1232110-KRAH/NWS-",
"-WXR-SVR-037069-037181-037185+0045-1232116-KRAH/NWS-",
"-WXR-FFW-037125+0300-1232209-KRAH/NWS-",
"-WXR-SVA-037001-037037-037063-037069-037077-037085-037101-037105-037125-037135-037145-037151-037181-037183-037185+0600-1241854-KRAH/NWS-",
"-WXR-SVR-037001-037037-037151+0045-1242011-KRAH/NWS-",
"-WXR-SVR-037001-037037-037135+0100-1242044-KRAH/NWS-",
"-WXR-SVR-037037-037063-037135-037183+0045-1242120-KRAH/NWS-",
"-WXR-SVR-037183+0100-1242156-KRAH/NWS-",
"-WXR-TOR-037183+0015-1242204-KRAH/NWS-",
"-WXR-SVR-037101-037183+0100-1242235-KRAH/NWS-",
"-WXR-SVR-037151+0100-1242339-KRAH/NWS-",
"-WXR-SVR-037101+0100-1250011-KRAH/NWS-",
"-WXR-SVR-037125-037151+0100-1250029-KRAH/NWS-",
"-WXR-SVR-037085-037105-037183+0100-1250153-KRAH/NWS-",
"-WXR-SVR-037085-037101+0100-1250218-KRAH/NWS-"
]]
expected = """123 20:03 SVR --- SVR
123 20:08 SVR --- SVR
123 20:13 SVR --- SVR
123 20:18 SVR --- SVR
123 20:23 SVR --- SVR,SVR
123 20:28 SVR --- SVR,SVR,SVR
123 20:33 SVR --- SVR,SVR
123 20:38 SVR --- SVR,SVR
123 20:43 SVR --- SVR,SVR
123 20:48 SVR --- SVR,SVR
123 20:53 SVR --- SVR,SVR
123 20:58 SVR --- SVR,SVR
123 21:03 SVR --- SVR,SVR
123 21:08 SVR --- SVR
123 21:13 SVR --- SVR,SVR
123 21:18 SVR --- SVR,SVR,SVR
123 21:23 SVR --- SVR,SVR,SVR
123 21:28 SVR --- SVR,SVR
123 21:33 --- SVR,SVR
123 21:38 --- SVR,SVR
123 21:43 --- SVR,SVR
123 21:48 --- SVR,SVR
123 21:53 --- SVR,SVR
123 21:58 --- SVR
123 22:03 ---
123 22:08 ---
123 22:13 --- FFW
123 22:18 --- FFW
123 22:23 --- FFW
123 22:28 --- FFW
123 22:33 --- FFW
123 22:38 --- FFW
123 22:43 --- FFW
123 22:48 --- FFW
123 22:53 --- FFW
123 22:58 --- FFW
123 23:03 --- FFW
123 23:08 --- FFW
123 23:13 --- FFW
123 23:18 --- FFW
123 23:23 --- FFW
123 23:28 --- FFW
123 23:33 --- FFW
123 23:38 --- FFW
123 23:43 --- FFW
123 23:48 --- FFW
123 23:53 --- FFW
123 23:58 --- FFW
124 00:03 --- FFW
124 00:08 --- FFW
124 00:13 --- FFW
124 00:18 --- FFW
124 00:23 --- FFW
124 00:28 --- FFW
124 00:33 --- FFW
124 00:38 --- FFW
124 00:43 --- FFW
124 00:48 --- FFW
124 00:53 --- FFW
124 00:58 --- FFW
124 01:03 --- FFW
124 01:08 --- FFW
124 01:13 ---
124 01:18 ---
124 01:23 ---
124 01:28 ---
124 01:33 ---
124 01:38 ---
124 01:43 ---
124 01:48 ---
124 01:53 ---
124 01:58 ---
124 02:03 ---
124 02:08 ---
124 02:13 ---
124 02:18 ---
124 02:23 ---
124 02:28 ---
124 02:33 ---
124 02:38 ---
124 02:43 ---
124 02:48 ---
124 02:53 ---
124 02:58 ---
124 03:03 ---
124 03:08 ---
124 03:13 ---
124 03:18 ---
124 03:23 ---
124 03:28 ---
124 03:33 ---
124 03:38 ---
124 03:43 ---
124 03:48 ---
124 03:53 ---
124 03:58 ---
124 04:03 ---
124 04:08 ---
124 04:13 ---
124 04:18 ---
124 04:23 ---
124 04:28 ---
124 04:33 ---
124 04:38 ---
124 04:43 ---
124 04:48 ---
124 04:53 ---
124 04:58 ---
124 05:03 ---
124 05:08 ---
124 05:13 ---
124 05:18 ---
124 05:23 ---
124 05:28 ---
124 05:33 ---
124 05:38 ---
124 05:43 ---
124 05:48 ---
124 05:53 ---
124 05:58 ---
124 06:03 ---
124 06:08 ---
124 06:13 ---
124 06:18 ---
124 06:23 ---
124 06:28 ---
124 06:33 ---
124 06:38 ---
124 06:43 ---
124 06:48 ---
124 06:53 ---
124 06:58 ---
124 07:03 ---
124 07:08 ---
124 07:13 ---
124 07:18 ---
124 07:23 ---
124 07:28 ---
124 07:33 ---
124 07:38 ---
124 07:43 ---
124 07:48 ---
124 07:53 ---
124 07:58 ---
124 08:03 ---
124 08:08 ---
124 08:13 ---
124 08:18 ---
124 08:23 ---
124 08:28 ---
124 08:33 ---
124 08:38 ---
124 08:43 ---
124 08:48 ---
124 08:53 ---
124 08:58 ---
124 09:03 ---
124 09:08 ---
124 09:13 ---
124 09:18 ---
124 09:23 ---
124 09:28 ---
124 09:33 ---
124 09:38 ---
124 09:43 ---
124 09:48 ---
124 09:53 ---
124 09:58 ---
124 10:03 ---
124 10:08 ---
124 10:13 ---
124 10:18 ---
124 10:23 ---
124 10:28 ---
124 10:33 ---
124 10:38 ---
124 10:43 ---
124 10:48 ---
124 10:53 ---
124 10:58 ---
124 11:03 ---
124 11:08 ---
124 11:13 ---
124 11:18 ---
124 11:23 ---
124 11:28 ---
124 11:33 ---
124 11:38 ---
124 11:43 ---
124 11:48 ---
124 11:53 ---
124 11:58 ---
124 12:03 ---
124 12:08 ---
124 12:13 ---
124 12:18 ---
124 12:23 ---
124 12:28 ---
124 12:33 ---
124 12:38 ---
124 12:43 ---
124 12:48 ---
124 12:53 ---
124 12:58 ---
124 13:03 ---
124 13:08 ---
124 13:13 ---
124 13:18 ---
124 13:23 ---
124 13:28 ---
124 13:33 ---
124 13:38 ---
124 13:43 ---
124 13:48 ---
124 13:53 ---
124 13:58 ---
124 14:03 ---
124 14:08 ---
124 14:13 ---
124 14:18 ---
124 14:23 ---
124 14:28 ---
124 14:33 ---
124 14:38 ---
124 14:43 ---
124 14:48 ---
124 14:53 ---
124 14:58 ---
124 15:03 ---
124 15:08 ---
124 15:13 ---
124 15:18 ---
124 15:23 ---
124 15:28 ---
124 15:33 ---
124 15:38 ---
124 15:43 ---
124 15:48 ---
124 15:53 ---
124 15:58 ---
124 16:03 ---
124 16:08 ---
124 16:13 ---
124 16:18 ---
124 16:23 ---
124 16:28 ---
124 16:33 ---
124 16:38 ---
124 16:43 ---
124 16:48 ---
124 16:53 ---
124 16:58 ---
124 17:03 ---
124 17:08 ---
124 17:13 ---
124 17:18 ---
124 17:23 ---
124 17:28 ---
124 17:33 ---
124 17:38 ---
124 17:43 ---
124 17:48 ---
124 17:53 ---
124 17:58 ---
124 18:03 ---
124 18:08 ---
124 18:13 ---
124 18:18 ---
124 18:23 ---
124 18:28 ---
124 18:33 ---
124 18:38 ---
124 18:43 ---
124 18:48 ---
124 18:53 ---
124 18:58 SVA ---
124 19:03 SVA ---
124 19:08 SVA ---
124 19:13 SVA ---
124 19:18 SVA ---
124 19:23 SVA ---
124 19:28 SVA ---
124 19:33 SVA ---
124 19:38 SVA ---
124 19:43 SVA ---
124 19:48 SVA ---
124 19:53 SVA ---
124 19:58 SVA ---
124 20:03 SVA ---
124 20:08 SVA ---
124 20:13 SVA --- SVR
124 20:18 SVA --- SVR
124 20:23 SVA --- SVR
124 20:28 SVA --- SVR
124 20:33 SVA --- SVR
124 20:38 SVA --- SVR
124 20:43 SVA --- SVR
124 20:48 SVA --- SVR,SVR
124 20:53 SVA --- SVR,SVR
124 20:58 SVA --- SVR
124 21:03 SVA --- SVR
124 21:08 SVA --- SVR
124 21:13 SVA --- SVR
124 21:18 SVA --- SVR
124 21:23 SVR,SVA --- SVR
124 21:28 SVR,SVA --- SVR
124 21:33 SVR,SVA --- SVR
124 21:38 SVR,SVA --- SVR
124 21:43 SVR,SVA --- SVR
124 21:48 SVR,SVA ---
124 21:53 SVR,SVA ---
124 21:58 SVR,SVR,SVA ---
124 22:03 SVR,SVR,SVA ---
124 22:08 TOR,SVR,SVA ---
124 22:13 TOR,SVR,SVA ---
124 22:18 TOR,SVR,SVA ---
124 22:23 SVR,SVA ---
124 22:28 SVR,SVA ---
124 22:33 SVR,SVA ---
124 22:38 SVR,SVR,SVA ---
124 22:43 SVR,SVR,SVA ---
124 22:48 SVR,SVR,SVA ---
124 22:53 SVR,SVR,SVA ---
124 22:58 SVR,SVA ---
124 23:03 SVR,SVA ---
124 23:08 SVR,SVA ---
124 23:13 SVR,SVA ---
124 23:18 SVR,SVA ---
124 23:23 SVR,SVA ---
124 23:28 SVR,SVA ---
124 23:33 SVR,SVA ---
124 23:38 SVA ---
124 23:43 SVA --- SVR
124 23:48 SVA --- SVR
124 23:53 SVA --- SVR
124 23:58 SVA --- SVR
125 00:03 SVA --- SVR
125 00:08 SVA --- SVR
125 00:13 SVA --- SVR,SVR
125 00:18 SVA --- SVR,SVR
125 00:23 SVA --- SVR,SVR
125 00:28 SVA --- SVR,SVR
125 00:33 SVA --- SVR,SVR,SVR
125 00:38 SVA --- SVR,SVR,SVR
125 00:43 SVA --- SVR,SVR
125 00:48 SVA --- SVR,SVR
125 00:53 SVA --- SVR,SVR
125 00:58 --- SVR,SVR
125 01:03 --- SVR,SVR
125 01:08 --- SVR,SVR
125 01:13 --- SVR
125 01:18 --- SVR
125 01:23 --- SVR
125 01:28 --- SVR
125 01:33 ---
125 01:38 ---
125 01:43 ---
125 01:48 ---
125 01:53 SVR ---
125 01:58 SVR ---
125 02:03 SVR ---
125 02:08 SVR ---
125 02:13 SVR ---
125 02:18 SVR --- SVR
125 02:23 SVR --- SVR
125 02:28 SVR --- SVR
125 02:33 SVR --- SVR
125 02:38 SVR --- SVR
125 02:43 SVR --- SVR
125 02:48 SVR --- SVR
125 02:53 --- SVR
125 02:58 --- SVR
125 03:03 --- SVR
125 03:08 --- SVR
125 03:13 --- SVR
125 03:18 ---
125 03:23 ---
125 03:28 ---
125 03:33 ---
""".split("\n")
buf = MessageCache((35.73, -78.85), "037183", default_SAME_sort)
# Iterate through this storm system 5 minutes at a time
aix = 0
eix = 0
for t in range(int(alerts[0].get_start_time_sec()),
int(alerts[-1].get_start_time_sec() + alerts[-1].get_duration_sec() + 1000),
300):
while aix < len(alerts) and alerts[aix].get_start_time_sec() <= t:
buf.add_message(alerts[aix])
aix += 1
ptime = time.strftime("%j %H:%M ", time.gmtime(t))
here = buf.get_active_messages(when=t)
elsewhere = buf.get_active_messages(when=t, here=False)
stat = ptime + ",".join([x.get_event_type() for x in here]) \
+ " --- " + ",".join([x.get_event_type() for x in elsewhere])
self.assertEqual(expected[eix].strip(), stat.strip())
eix += 1
def test_net_alerts(self):
expected = """146 01:24 KGLD.TO.W.0028 --- KGLD.TO.A.0204
146 01:26 KGLD.TO.W.0028 --- KGLD.TO.A.0204
146 01:34 KGLD.TO.W.0028 --- KGLD.TO.A.0204
146 01:36 KGLD.TO.W.0028 --- KGLD.TO.A.0204
146 01:45 KGLD.TO.W.0029 --- KGLD.TO.A.0204
146 02:02 --- KGLD.TO.W.0029,KGLD.TO.A.0204
146 02:13 KGLD.TO.A.0206 --- KGLD.TO.W.0029,KGLD.TO.A.0204
146 02:17 KGLD.TO.A.0206 --- KGLD.TO.W.0030,KGLD.TO.A.0204
146 02:33 KGLD.TO.A.0206 --- KGLD.TO.W.0030,KGLD.TO.A.0204
146 02:33 KGLD.TO.A.0206 --- KGLD.TO.W.0030,KGLD.TO.A.0204
146 02:46 KGLD.TO.A.0206 --- KGLD.TO.W.0031,KGLD.TO.A.0204
146 03:04 KGLD.TO.A.0206 --- KGLD.TO.W.0031,KGLD.TO.A.0204
146 03:13 KGLD.TO.A.0206 --- KGLD.TO.W.0031,KGLD.TO.A.0204
146 03:16 KGLD.TO.A.0206 --- KGLD.TO.W.0032,KGLD.TO.A.0204
146 03:39 KGLD.TO.A.0206 --- KGLD.TO.W.0032,KGLD.TO.A.0204
146 03:50 KGLD.TO.A.0206 --- KGLD.TO.W.0032,KGLD.TO.A.0204
146 04:05 KGLD.TO.A.0206 --- KGLD.TO.A.0204
146 04:33 KGLD.TO.A.0206 --- KGLD.SV.W.0094,KGLD.TO.A.0204
146 04:55 KGLD.TO.A.0206 --- KGLD.SV.W.0094,KGLD.TO.A.0204
146 04:56 KGLD.TO.A.0206 --- KGLD.SV.W.0094,KGLD.TO.A.0204
146 05:09 KGLD.TO.A.0206 --- KGLD.SV.W.0094
146 05:10 KGLD.TO.A.0206 --- KGLD.SV.W.0094""".split("\n")
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "kgld.cap.p"), "rb") as f:
alerts = pickle.load(f)
# https://mesonet.agron.iastate.edu/vtec/#2016-O-NEW-KGLD-TO-W-0029/USCOMP-N0Q-201605250145
buf = MessageCache((40.321909, -102.718192), "008125", default_VTEC_sort)
aix = eix = 0
for t in range(alerts[0][0], alerts[-1][0] + 2):
delta = False
while aix < len(alerts) and alerts[aix][0] <= t:
for v in alerts[aix][1].vtec:
buf.add_message(v)
aix += 1
delta = True
if delta:
here = buf.get_active_messages(when=t)
display_time = time.strftime("%j %H:%M ", time.gmtime(t))
try:
elsewhere = buf.get_active_messages(when=t, here=False)
except TypeError:
# TODO fix the comparator to handle null times
print([str(x) for x in filter(lambda m: m.is_effective(t), buf._MessageCache__messages.values())])
raise
line = display_time + ",".join([x.get_event_id() for x in here]) \
+ " --- " + ",".join([x.get_event_id() for x in elsewhere])
# print(line)
self.assertEqual(expected[eix], line)
eix += 1
self.assertIsNot(0, eix, 'need assertions')
def test_not_here_with_polygon(self):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "kgld.cap.p"), "rb") as f:
alerts = pickle.load(f)
valerts = list(filter(lambda v: v.event_id == "KGLD.TO.W.0028", [item for sublist in [c.vtec for a, c in alerts]
for item in sublist]))
buf = EventMessageGroup()
buf.add_message(valerts[0])
self.assertTrue(buf.is_effective((40.321909, -102.718192), "008125", True, valerts[0].published))
self.assertFalse(buf.is_effective((40.321909, -102.718192), "008125", False, valerts[0].published))
def test_not_here_sans_polygon(self):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "kgld.cap.p"), "rb") as f:
alerts = pickle.load(f)
valerts = list(filter(lambda v: v.event_id == "KGLD.TO.A.0206", [item for sublist in [c.vtec for a, c in alerts]
for item in sublist]))
buf = EventMessageGroup()
buf.add_message(valerts[0])
self.assertTrue(buf.is_effective((40.321909, -102.718192), "008125", True, valerts[0].published))
self.assertFalse(buf.is_effective((40.321909, -102.718192), "008125", False, valerts[0].published))
| gpl-3.0 | -4,261,380,227,254,868,500 | 29.538752 | 151 | 0.492603 | false |
philanthropy-u/edx-platform | pavelib/prereqs.py | 1 | 11951 | """
Install Python and Node prerequisites.
"""
from __future__ import print_function
import hashlib
import os
import re
import sys
import subprocess
import io
from distutils import sysconfig
from paver.easy import BuildFailure, sh, task
from .utils.envs import Env
from .utils.timer import timed
PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache')
NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs"
NO_PYTHON_UNINSTALL_MESSAGE = 'NO_PYTHON_UNINSTALL is set. No attempts will be made to uninstall old Python libs.'
COVERAGE_REQ_FILE = 'requirements/edx/coverage.txt'
# If you make any changes to this list you also need to make
# a corresponding change to circle.yml, which is how the python
# prerequisites are installed for builds on circleci.com
if 'TOXENV' in os.environ:
PYTHON_REQ_FILES = ['requirements/edx/testing.txt']
else:
PYTHON_REQ_FILES = ['requirements/edx/development.txt']
# Developers can have private requirements, for local copies of github repos,
# or favorite debugging tools, etc.
if 'TOXENV' in os.environ:
PRIVATE_REQS = 'requirements/philu/testing.txt'
else:
PRIVATE_REQS = 'requirements/philu/base.txt'
if os.path.exists(PRIVATE_REQS):
PYTHON_REQ_FILES.append(PRIVATE_REQS)
def str2bool(s):
s = str(s)
return s.lower() in ('yes', 'true', 't', '1')
def no_prereq_install():
"""
Determine if NO_PREREQ_INSTALL should be truthy or falsy.
"""
return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False'))
def no_python_uninstall():
""" Determine if we should run the uninstall_python_packages task. """
return str2bool(os.environ.get('NO_PYTHON_UNINSTALL', 'False'))
def create_prereqs_cache_dir():
"""Create the directory for storing the hashes, if it doesn't exist already."""
try:
os.makedirs(PREREQS_STATE_DIR)
except OSError:
if not os.path.isdir(PREREQS_STATE_DIR):
raise
def compute_fingerprint(path_list):
"""
Hash the contents of all the files and directories in `path_list`.
Returns the hex digest.
"""
hasher = hashlib.sha1()
for path_item in path_list:
# For directories, create a hash based on the modification times
# of first-level subdirectories
if os.path.isdir(path_item):
for dirname in sorted(os.listdir(path_item)):
path_name = os.path.join(path_item, dirname)
if os.path.isdir(path_name):
hasher.update(str(os.stat(path_name).st_mtime))
# For files, hash the contents of the file
if os.path.isfile(path_item):
with io.open(path_item, "rb") as file_handle:
hasher.update(file_handle.read())
return hasher.hexdigest()
def prereq_cache(cache_name, paths, install_func):
"""
Conditionally execute `install_func()` only if the files/directories
specified by `paths` have changed.
If the code executes successfully (no exceptions are thrown), the cache
is updated with the new hash.
"""
# Retrieve the old hash
cache_filename = cache_name.replace(" ", "_")
cache_file_path = os.path.join(PREREQS_STATE_DIR, "{}.sha1".format(cache_filename))
old_hash = None
if os.path.isfile(cache_file_path):
with io.open(cache_file_path, "rb") as cache_file:
old_hash = cache_file.read()
# Compare the old hash to the new hash
# If they do not match (either the cache hasn't been created, or the files have changed),
# then execute the code within the block.
new_hash = compute_fingerprint(paths)
if new_hash != old_hash:
install_func()
# Update the cache with the new hash
# If the code executed within the context fails (throws an exception),
# then this step won't get executed.
create_prereqs_cache_dir()
with io.open(cache_file_path, "wb") as cache_file:
# Since the pip requirement files are modified during the install
# process, we need to store the hash generated AFTER the installation
post_install_hash = compute_fingerprint(paths)
cache_file.write(post_install_hash)
else:
print('{cache} unchanged, skipping...'.format(cache=cache_name))
def node_prereqs_installation():
"""
Configures npm and installs Node prerequisites
"""
# NPM installs hang sporadically. Log the installation process so that we
# determine if any packages are chronic offenders.
shard_str = os.getenv('SHARD', None)
if shard_str:
npm_log_file_path = '{}/npm-install.{}.log'.format(Env.GEN_LOG_DIR, shard_str)
else:
npm_log_file_path = '{}/npm-install.log'.format(Env.GEN_LOG_DIR)
npm_log_file = io.open(npm_log_file_path, 'wb')
npm_command = 'npm install --verbose'.split()
cb_error_text = "Subprocess return code: 1"
# Error handling around a race condition that produces "cb() never called" error. This
# evinces itself as `cb_error_text` and it ought to disappear when we upgrade
# npm to 3 or higher. TODO: clean this up when we do that.
try:
# The implementation of Paver's `sh` function returns before the forked
# actually returns. Using a Popen object so that we can ensure that
# the forked process has returned
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
proc.wait()
except BuildFailure, error_text:
if cb_error_text in error_text:
print("npm install error detected. Retrying...")
proc = subprocess.Popen(npm_command, stderr=npm_log_file)
proc.wait()
else:
raise BuildFailure(error_text)
print("Successfully installed NPM packages. Log found at {}".format(
npm_log_file_path
))
def python_prereqs_installation():
"""
Installs Python prerequisites
"""
for req_file in PYTHON_REQ_FILES:
pip_install_req_file(req_file)
def pip_install_req_file(req_file):
"""Pip install the requirements file."""
pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'
sh("{pip_cmd} -r {req_file}".format(pip_cmd=pip_cmd, req_file=req_file))
@task
@timed
def install_node_prereqs():
"""
Installs Node prerequisites
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
prereq_cache("Node prereqs", ["package.json"], node_prereqs_installation)
# To add a package to the uninstall list, just add it to this list! No need
# to touch any other part of this file.
PACKAGES_TO_UNINSTALL = [
"South", # Because it interferes with Django 1.8 migrations.
"edxval", # Because it was bork-installed somehow.
"django-storages",
"django-oauth2-provider", # Because now it's called edx-django-oauth2-provider.
"edx-oauth2-provider", # Because it moved from github to pypi
"i18n-tools", # Because now it's called edx-i18n-tools
]
@task
@timed
def uninstall_python_packages():
"""
Uninstall Python packages that need explicit uninstallation.
Some Python packages that we no longer want need to be explicitly
uninstalled, notably, South. Some other packages were once installed in
ways that were resistant to being upgraded, like edxval. Also uninstall
them.
"""
if no_python_uninstall():
print(NO_PYTHON_UNINSTALL_MESSAGE)
return
# So that we don't constantly uninstall things, use a hash of the packages
# to be uninstalled. Check it, and skip this if we're up to date.
hasher = hashlib.sha1()
hasher.update(repr(PACKAGES_TO_UNINSTALL))
expected_version = hasher.hexdigest()
state_file_path = os.path.join(PREREQS_STATE_DIR, "Python_uninstall.sha1")
create_prereqs_cache_dir()
if os.path.isfile(state_file_path):
with io.open(state_file_path) as state_file:
version = state_file.read()
if version == expected_version:
print('Python uninstalls unchanged, skipping...')
return
# Run pip to find the packages we need to get rid of. Believe it or not,
# edx-val is installed in a way that it is present twice, so we have a loop
# to really really get rid of it.
for _ in range(3):
uninstalled = False
frozen = sh("pip freeze", capture=True)
for package_name in PACKAGES_TO_UNINSTALL:
if package_in_frozen(package_name, frozen):
# Uninstall the pacakge
sh("pip uninstall --disable-pip-version-check -y {}".format(package_name))
uninstalled = True
if not uninstalled:
break
else:
# We tried three times and didn't manage to get rid of the pests.
print("Couldn't uninstall unwanted Python packages!")
return
# Write our version.
with io.open(state_file_path, "wb") as state_file:
state_file.write(expected_version)
def package_in_frozen(package_name, frozen_output):
"""Is this package in the output of 'pip freeze'?"""
# Look for either:
#
# PACKAGE-NAME==
#
# or:
#
# blah_blah#egg=package_name-version
#
pattern = r"(?mi)^{pkg}==|#egg={pkg_under}-".format(
pkg=re.escape(package_name),
pkg_under=re.escape(package_name.replace("-", "_")),
)
return bool(re.search(pattern, frozen_output))
@task
@timed
def install_coverage_prereqs():
""" Install python prereqs for measuring coverage. """
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
pip_install_req_file(COVERAGE_REQ_FILE)
@task
@timed
def install_python_prereqs():
"""
Installs Python prerequisites.
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
uninstall_python_packages()
# Include all of the requirements files in the fingerprint.
files_to_fingerprint = list(PYTHON_REQ_FILES)
# Also fingerprint the directories where packages get installed:
# ("/edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages")
files_to_fingerprint.append(sysconfig.get_python_lib())
# In a virtualenv, "-e installs" get put in a src directory.
src_dir = os.path.join(sys.prefix, "src")
if os.path.isdir(src_dir):
files_to_fingerprint.append(src_dir)
# Also fingerprint this source file, so that if the logic for installations
# changes, we will redo the installation.
this_file = __file__
if this_file.endswith(".pyc"):
this_file = this_file[:-1] # use the .py file instead of the .pyc
files_to_fingerprint.append(this_file)
prereq_cache("Python prereqs", files_to_fingerprint, python_prereqs_installation)
@task
@timed
def install_prereqs():
"""
Installs Node and Python prerequisites
"""
if no_prereq_install():
print(NO_PREREQ_MESSAGE)
return
if not str2bool(os.environ.get('SKIP_NPM_INSTALL', 'False')):
install_node_prereqs()
install_python_prereqs()
log_installed_python_prereqs()
print_devstack_warning()
def log_installed_python_prereqs():
""" Logs output of pip freeze for debugging. """
sh("pip freeze > {}".format(Env.GEN_LOG_DIR + "/pip_freeze.log"))
return
def print_devstack_warning():
if Env.USING_DOCKER: # pragma: no cover
print("********************************************************************************")
print("* WARNING: Mac users should run this from both the lms and studio shells")
print("* in docker devstack to avoid startup errors that kill your CPU.")
print("* For more details, see:")
print("* https://github.com/edx/devstack#docker-is-using-lots-of-cpu-time-when-it-should-be-idle")
print("********************************************************************************")
| agpl-3.0 | 451,403,621,817,372,500 | 32.759887 | 114 | 0.643293 | false |
shirishgoyal/rational_crowd | config/urls.py | 1 | 1607 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from rational_crowd.api.router import router
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
# url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
# url(r'^users/', include('rational_crowd.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/', include(router.urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
| mit | -2,009,427,574,063,761,700 | 43.638889 | 110 | 0.63659 | false |
RedHatQE/cfme_tests | cfme/automate/dialogs/dialog_tab.py | 1 | 2309 | import attr
from navmazing import NavigateToAttribute
from widgetastic.widget import Text
from . import AddTabView
from . import TabForm
from .dialog_box import BoxCollection
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.modeling.base import parent_of_type
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
class EditTabView(TabForm):
@property
def is_displayed(self):
return (
self.in_customization and
self.title.text == "Editing Dialog {} [Tab Information]".format(self.tab_label)
)
class DetailsTabView(TabForm):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == 'Dialog "{}"'.format(self.context['object'].tab_label)
)
@attr.s
class Tab(BaseEntity):
"""A class representing one Tab in the UI."""
tab_label = attr.ib()
tab_desc = attr.ib(default=None)
_collections = {'boxes': BoxCollection}
@property
def boxes(self):
return self.collections.boxes
@property
def tree_path(self):
return self.parent.tree_path + [self.tab_label]
@property
def dialog(self):
""" Returns parent object - Dialog"""
from .service_dialogs import Dialog
return parent_of_type(self, Dialog)
@attr.s
class TabCollection(BaseCollection):
ENTITY = Tab
@property
def tree_path(self):
return self.parent.tree_path
def create(self, tab_label=None, tab_desc=None):
""" Create tab method"""
view = navigate_to(self, "Add")
view.new_tab.click()
view.edit_tab.click()
view.fill({'tab_label': tab_label, 'tab_desc': tab_desc})
view.save_button.click()
return self.instantiate(tab_label=tab_label, tab_desc=tab_desc)
@navigator.register(TabCollection)
class Add(CFMENavigateStep):
VIEW = AddTabView
prerequisite = NavigateToAttribute('parent.parent', 'Add')
def step(self, *args, **kwargs):
self.prerequisite_view.create_tab.click()
| gpl-2.0 | -5,686,583,473,684,868,000 | 25.848837 | 91 | 0.673452 | false |
pyspace/test | pySPACE/missions/nodes/meta/same_input_layer.py | 1 | 22715 | """ Combine several other nodes together in parallel
This is useful to be combined with the
:class:`~pySPACE.missions.nodes.meta.flow_node.FlowNode`.
"""
import numpy
from pySPACE.environments.chains.node_chain import NodeChainFactory
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.prediction_vector import PredictionVector
# ensemble imports
import os
import fcntl
import fnmatch
import cPickle
import logging
from collections import defaultdict
from pySPACE.missions.nodes.meta.flow_node import FlowNode
from pySPACE.tools.filesystem import locate
class SameInputLayerNode(BaseNode):
""" Encapsulates a set of other nodes that are executed in parallel in the flow.
This node was a thin wrapper around MDP's SameInputLayer node
but is now an own implementation.
**Parameters**
:enforce_unique_names:
When combining time series channels or feature vectors,
the node adds the index of the current node to the channel names or
feature names as a prefix to enforce unique names.
(*optional, default: True*)
**Exemplary Call**
.. code-block:: yaml
-
node : Same_Input_Layer
parameters :
enforce_unique_names : True
nodes :
-
node : Time_Domain_Features
parameters :
moving_window_length : 1
-
node : STFT_Features
parameters :
frequency_band : [2.0, 8.0]
frequency_resolution : 1.0
"""
def __init__(self, nodes,enforce_unique_names=True,
store = False, **kwargs):
self.nodes = nodes # needed to find out dimensions and trainability, ...
super(SameInputLayerNode, self).__init__(**kwargs)
self.permanent_state.pop("nodes")
self.set_permanent_attributes(output_type = None,
names = None,
unique = enforce_unique_names)
@staticmethod
def node_from_yaml(layer_spec):
""" Load the specs and initialize the layer nodes """
# This node requires one parameters, namely a list of nodes
assert("parameters" in layer_spec
and "nodes" in layer_spec["parameters"]),\
"SameInputLayerNode requires specification of a list of nodes!"
# Create all nodes that are packed together in this layer
layer_nodes = []
for node_spec in layer_spec["parameters"]["nodes"]:
node_obj = BaseNode.node_from_yaml(node_spec)
layer_nodes.append(node_obj)
layer_spec["parameters"].pop("nodes")
# Create the node object
node_obj = SameInputLayerNode(nodes = layer_nodes,**layer_spec["parameters"])
return node_obj
def reset(self):
""" Also reset internal nodes """
nodes = self.nodes
for node in nodes:
node.reset()
super(SameInputLayerNode, self).reset()
self.nodes = nodes
def register_input_node(self, input_node):
""" All sub-nodes have the same input node """
super(SameInputLayerNode, self).register_input_node(input_node)
# Register the node as the input for all internal nodes
for node in self.nodes:
node.register_input_node(input_node)
def _execute(self, data):
""" Process the data through the internal nodes """
names = []
result_array = None
result_label = []
result_predictor = []
result_prediction = []
# For all node-layers
for node_index, node in enumerate(self.nodes):
# Compute node's result
node_result = node.execute(data)
# Determine the output type of the node
if self.output_type is None:
self.output_type = type(node_result)
else:
assert (self.output_type == type(node_result)), \
"SameInputLayerNode requires that all of its layers return "\
"the same type. Types found: %s %s" \
% (self.output_type, type(node_result))
# Merge the nodes' outputs depending on the type
if self.output_type == FeatureVector:
result_array = \
self.add_feature_vector(node_result, node_index,
result_array, names)
elif self.output_type == PredictionVector:
if type(node_result.label) == list:
result_label.extend(node_result.label)
else:
# a single classification is expected here
result_label.append(node_result.label)
if type(node_result.prediction) == list:
result_prediction.extend(node_result.prediction)
else:
result_prediction.append(node_result.prediction)
if type(node_result.predictor) == list:
result_predictor.extend(node_result.predictor)
else:
result_predictor.append(node_result.predictor)
else:
assert (self.output_type == TimeSeries), \
"SameInputLayerNode can not merge data of type %s." \
% self.output_type
if self.names is None and not self.unique:
names.extend(node_result.channel_names)
elif self.names is None and self.unique:
for name in node_result.channel_names:
names.append("%i_%s" % (node_index, name))
if result_array == None:
result_array = node_result
if self.dtype == None:
self.dtype = node_result.dtype
else :
result_array = numpy.concatenate((result_array,
node_result), axis=1)
# Construct output with correct type and names
if self.names is None:
self.names = names
if self.output_type == FeatureVector:
return FeatureVector(result_array, self.names)
elif self.output_type == PredictionVector:
return PredictionVector(label=result_label,
prediction=result_prediction,
predictor=result_predictor)
else:
return TimeSeries(result_array, self.names,
node_result.sampling_frequency,
node_result.start_time, node_result.end_time,
node_result.name, node_result.marker_name)
def add_feature_vector(self, data, index, result_array, names):
""" Concatenate feature vectors, ensuring unique names """
if self.names is None and self.unique:
for name in data.feature_names:
names.append("%i_%s" % (index,name))
elif self.names is None and not self.unique:
names.extend(data.feature_names)
if result_array == None:
result_array = data
else:
result_array = numpy.concatenate((result_array,data), axis=1)
return result_array
def is_trainable(self):
""" Trainable if one subnode is trainable """
for node in self.nodes:
if node.is_trainable():
return True
return False
def is_supervised(self):
""" Supervised if one subnode requires supervised training """
for node in self.nodes:
if node.is_supervised():
return True
return False
#
# def train_sweep(self, use_test_data):
# """ Train all internal nodes """
# for node in self.nodes:
# node.train_sweep(use_test_data)
def _train(self, x, *args, **kwargs):
""" Perform single training step by training the internal nodes """
for node in self.nodes:
if node.is_training():
node.train(x, *args, **kwargs)
def _stop_training(self):
""" Perform single training step by training the internal nodes """
for node in self.nodes:
if node.is_training():
node.stop_training()
def store_state(self, result_dir, index=None):
""" Stores all nodes in subdirectories of *result_dir* """
for i, node in enumerate(self.nodes):
node_dir = os.path.join(result_dir, (self.__class__.__name__+str(index).split("None")[0]+str(i)))
node.store_state(node_dir, index=i)
def _inc_train(self,data,label):
""" Forward data to retrainable nodes
So the single nodes do not need to buffer or *present_labels* does not
have to be reimplemented.
"""
for node in self.nodes:
if node.is_retrainable():
node._inc_train(data, label)
def set_run_number(self, run_number):
""" Informs all subnodes about the number of the current run """
for node in self.nodes:
node.set_run_number(run_number)
super(SameInputLayerNode, self).set_run_number(run_number)
class EnsembleNotFoundException(Exception): pass
class ClassificationFlowsLoaderNode(BaseNode):
""" Combine an ensemble of pretrained node chains
This node loads all "pickled" flows whose file names match
*ensemble_pattern* and are contained in the directory tree rooted at
*ensemble_base_dir*. If the *flow_select_list* is not empty, only the
flows with indices contained in flow_select_list are used. The index "-1"
corresponds to "all flows".
**Parameters**
:ensemble_base_dir:
The root directory under which the stored flow objects which constitute
the ensemble are stored.
:ensemble_pattern:
Pickled flows must match the given pattern to be included into the
ensemble.
:flow_select_list:
This optional parameter allows to select only a subset of the flows
that are found in ensemble_base_dir. It must be a list of indices.
Only the flows with the given index are included into the ensemble.
If -1 is contained in the list, all flows are automatically added to
the ensemble.
.. note::
The order of the flows in the ensemble is potentially random or at
least hard to predict. Thus, this parameter should not be used
to select a specific flow. In contrast, this parameter can be used
to select a certain number of flows from the available flows
(where it doesn't matter which ones). This can be useful for instance
in benchmarking experiments when one is interested in
the average performance of an ensemble of a certain size.
(*optional, default: [-1]*)
:cache_dir:
If this argument is given, all results of all ensembles are remembered
and stored in a persistent cache file in the given cache_dir. These
cached results can be later reused without actually loading and
executing the ensemble.
(*optional, default: None*)
**Exemplary Call**
.. code-block:: yaml
-
node : Ensemble_Node
parameters :
ensemble_base_dir : "/tmp/" # <- insert suitable directory here
ensemble_pattern : "flow*.pickle"
flow_select_list : "eval(range(10))"
:Author: Jan Hendrik Metzen ([email protected])
:Created: 2010/05/20
"""
def __init__(self, ensemble_base_dir, ensemble_pattern,
flow_select_list=[-1], cache_dir=None, **kwargs):
super(ClassificationFlowsLoaderNode, self).__init__(**kwargs)
# Load all flow-pickle files that match the given ensemble_pattern
# in the directory tree rooted in ensemble_base_dir
flow_pathes = tuple(locate(ensemble_pattern, ensemble_base_dir))
if -1 not in flow_select_list:
# Select only flows for ensemble whose index is contained in
# flow_select_list
flow_pathes = tuple(flow_pathes[index] for index in flow_select_list)
if len(flow_pathes) == 0:
raise EnsembleNotFoundException("No ensemble found in %s for pattern %s" %
(ensemble_base_dir, ensemble_pattern))
self.feature_names = \
map(lambda s: "_".join(s.split(os.sep)[-1].split('_')[0:2]),
flow_pathes)
self.set_permanent_attributes(ensemble = None,
flow_pathes = flow_pathes,
cache_dir = cache_dir,
cache = None,
cache_updated = False,
store = True) # always store cache
def _load_cache(self):
self.cache = defaultdict(dict)
# Check if there are cached results for this ensemble
for flow_path in self.flow_pathes:
file_path = self.cache_dir + os.sep + "ensemble_cache" + os.sep \
+ "cache_%s" % hash(flow_path)
if os.path.exists(file_path):
# Load ensemble cache
self._log("Loading flow cache from %s" % file_path)
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'r')
self.cache[flow_path] = cPickle.load(cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
def _load_ensemble(self):
self._log("Loading ensemble")
# Create a flow node for each flow pickle
flow_nodes = [FlowNode(subflow_path = flow_path)
for flow_path in self.flow_pathes]
# Create an SameInputLayer node that executes all flows independently
# with the same input
ensemble = SameInputLayerNode(flow_nodes, enforce_unique_names=True)
# We can now set the input dim and output dim
self.input_dim = ensemble.input_dim
self.output_dim = ensemble.output_dim
self.set_permanent_attributes(ensemble = ensemble)
def _train(self, data, label):
""" Trains the ensemble on the given data vector *data* """
if self.ensemble == None:
# Load ensemble since data is not cached
self._load_ensemble()
return self.ensemble.train(data, label)
def _execute(self, data):
# Compute data's hash
data_hash = hash(tuple(data.flatten()))
# Load ensemble's cache
if self.cache == None:
if self.cache_dir:
self._load_cache()
else: # Caching disabled
self.cache = defaultdict(dict)
# Try to lookup the result of this ensemble for the given data in the cache
labels = []
predictions = []
for i, flow_path in enumerate(self.flow_pathes):
if data_hash in self.cache[flow_path]:
label, prediction = self.cache[flow_path][data_hash]
else:
self.cache_updated = True
if self.ensemble == None:
# Load ensemble since data is not cached
self._load_ensemble()
node_result = self.ensemble.nodes[i].execute(data)
label = node_result.label
prediction = node_result.prediction
self.cache[flow_path][data_hash] = (label, prediction)
labels.append(label)
predictions.append(prediction)
result = PredictionVector(label=labels,
prediction=predictions,
predictor=self)
result.dim_names = self.feature_names
return result
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir* """
# Store cache if caching is enabled and cache has changed
if self.cache_dir and self.cache_updated:
if not os.path.exists(self.cache_dir + os.sep + "ensemble_cache"):
os.makedirs(self.cache_dir + os.sep + "ensemble_cache")
for flow_path in self.flow_pathes:
file_path = self.cache_dir + os.sep + "ensemble_cache" + os.sep \
+ "cache_%s" % hash(flow_path)
if os.path.exists(file_path):
self._log("Updating flow cache %s" % file_path)
# Update existing cache persistency file
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'r')
self.cache[flow_path].update(cPickle.load(cache_file))
cache_file.close()
cache_file = open(file_path, 'w')
cPickle.dump(self.cache[flow_path], cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
else:
self._log("Writing flow cache %s" % file_path)
# Create new cache persistency file
lock_file = open(file_path + ".lock", 'w')
fcntl.flock(lock_file, fcntl.LOCK_EX)
self._log("Got exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
cache_file = open(file_path, 'w')
cPickle.dump(self.cache[flow_path], cache_file)
cache_file.close()
self._log("Release exclusive lock on %s" % (file_path + ".lock"),
logging.INFO)
fcntl.flock(lock_file, fcntl.LOCK_UN)
class MultiClassLayerNode(SameInputLayerNode):
""" Wrap the one vs. rest or one vs. one scheme around the given node
The given class labels are forwarded to the internal nodes.
During training, data is relabeled.
Everything else is the same as in the base node.
Though this scheme is most important for classification it permits
other trainable algorithms to use this scheme.
**Parameters**
:class_labels:
This is the complete list of expected class labels.
It is needed to construct the necessary flows in the
initialization stage.
:node:
Specification of the wrapped node for the used scheme
As class labels , for the *1vsR* scheme,
this node has to use *REST* and *LABEL*.
*LABEL* is replaced with the different `class_labels`.
The other label should be *REST*.
For the *1vs1* scheme *LABEL1* and *LABEL2* have to be used.
:scheme:
One of *1v1* (One vs. One) or *1vR* (One vs. Rest)
.. note:: The one class approach is included by simply not giving
'REST' label to the classifier, but filtering it out.
(*optional, default:'1v1'*)
**Exemplary Call**
.. code-block:: yaml
-
node : MultiClassLayer
parameters :
class_labels : ["Target", "Standard","Artifact"]
scheme : 1vR
node :
-
node : 1SVM
parameters :
class_labels : ["LABEL","REST"]
complexity : 1
"""
@staticmethod
def node_from_yaml(layer_spec):
""" Load the specs and initialize the layer nodes """
assert("parameters" in layer_spec
and "class_labels" in layer_spec["parameters"]
and "node" in layer_spec["parameters"]),\
"Node requires specification of a node and classification labels!"
scheme = layer_spec["parameters"].pop("scheme","1vs1")
# Create all nodes that are packed together in this layer
layer_nodes = []
node_spec = layer_spec["parameters"]["node"][0]
classes = layer_spec["parameters"]["class_labels"]
if scheme=='1vR':
for label in layer_spec["parameters"]["class_labels"]:
node_obj = BaseNode.node_from_yaml(NodeChainFactory.instantiate(node_spec,{"LABEL":label}))
layer_nodes.append(node_obj)
else:
n=len(classes)
for i in range(n-1):
for j in range(i+1,n):
replace_dict = {"LABEL1":classes[i],"LABEL2":classes[j]}
node_obj = BaseNode.node_from_yaml(NodeChainFactory.instantiate(node_spec,replace_dict))
layer_nodes.append(node_obj)
layer_spec["parameters"].pop("node")
layer_spec["parameters"].pop("class_labels")
# Create the node object
node_obj = MultiClassLayerNode(nodes = layer_nodes,**layer_spec["parameters"])
return node_obj
_NODE_MAPPING = {"Ensemble_Node": ClassificationFlowsLoaderNode,
"Same_Input_Layer": SameInputLayerNode,
}
| gpl-3.0 | 4,007,869,671,616,826,000 | 41.22119 | 109 | 0.547215 | false |
googleapis/python-game-servers | google/cloud/gaming_v1/services/realms_service/async_client.py | 1 | 28046 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.gaming_v1.services.realms_service import pagers
from google.cloud.gaming_v1.types import common
from google.cloud.gaming_v1.types import realms
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import RealmsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import RealmsServiceGrpcAsyncIOTransport
from .client import RealmsServiceClient
class RealmsServiceAsyncClient:
"""A realm is a grouping of game server clusters that are
considered interchangeable.
"""
_client: RealmsServiceClient
DEFAULT_ENDPOINT = RealmsServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = RealmsServiceClient.DEFAULT_MTLS_ENDPOINT
realm_path = staticmethod(RealmsServiceClient.realm_path)
parse_realm_path = staticmethod(RealmsServiceClient.parse_realm_path)
common_billing_account_path = staticmethod(
RealmsServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
RealmsServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(RealmsServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
RealmsServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
RealmsServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
RealmsServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(RealmsServiceClient.common_project_path)
parse_common_project_path = staticmethod(
RealmsServiceClient.parse_common_project_path
)
common_location_path = staticmethod(RealmsServiceClient.common_location_path)
parse_common_location_path = staticmethod(
RealmsServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RealmsServiceAsyncClient: The constructed client.
"""
return RealmsServiceClient.from_service_account_info.__func__(RealmsServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RealmsServiceAsyncClient: The constructed client.
"""
return RealmsServiceClient.from_service_account_file.__func__(RealmsServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> RealmsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
RealmsServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(RealmsServiceClient).get_transport_class, type(RealmsServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, RealmsServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the realms service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.RealmsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = RealmsServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_realms(
self,
request: realms.ListRealmsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListRealmsAsyncPager:
r"""Lists realms in a given project and location.
Args:
request (:class:`google.cloud.gaming_v1.types.ListRealmsRequest`):
The request object. Request message for
RealmsService.ListRealms.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.services.realms_service.pagers.ListRealmsAsyncPager:
Response message for
RealmsService.ListRealms.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.ListRealmsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_realms,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListRealmsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_realm(
self,
request: realms.GetRealmRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> realms.Realm:
r"""Gets details of a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.GetRealmRequest`):
The request object. Request message for
RealmsService.GetRealm.
name (:class:`str`):
Required. The name of the realm to retrieve. Uses the
form:
``projects/{project}/locations/{location}/realms/{realm}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.types.Realm:
A realm resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.GetRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_realm,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_realm(
self,
request: realms.CreateRealmRequest = None,
*,
parent: str = None,
realm: realms.Realm = None,
realm_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new realm in a given project and location.
Args:
request (:class:`google.cloud.gaming_v1.types.CreateRealmRequest`):
The request object. Request message for
RealmsService.CreateRealm.
parent (:class:`str`):
Required. The parent resource name. Uses the form:
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
realm (:class:`google.cloud.gaming_v1.types.Realm`):
Required. The realm resource to be
created.
This corresponds to the ``realm`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
realm_id (:class:`str`):
Required. The ID of the realm
resource to be created.
This corresponds to the ``realm_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gaming_v1.types.Realm` A realm
resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, realm, realm_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.CreateRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if realm is not None:
request.realm = realm
if realm_id is not None:
request.realm_id = realm_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
realms.Realm,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def delete_realm(
self,
request: realms.DeleteRealmRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.DeleteRealmRequest`):
The request object. Request message for
RealmsService.DeleteRealm.
name (:class:`str`):
Required. The name of the realm to delete. Uses the
form:
``projects/{project}/locations/{location}/realms/{realm}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.DeleteRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def update_realm(
self,
request: realms.UpdateRealmRequest = None,
*,
realm: realms.Realm = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Patches a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.UpdateRealmRequest`):
The request object. Request message for
RealmsService.UpdateRealm.
realm (:class:`google.cloud.gaming_v1.types.Realm`):
Required. The realm to be updated. Only fields specified
in update_mask are updated.
This corresponds to the ``realm`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
https: //developers.google.com/protocol-buffers //
/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.gaming_v1.types.Realm` A realm
resource.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([realm, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = realms.UpdateRealmRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if realm is not None:
request.realm = realm
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_realm,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("realm.name", request.realm.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
realms.Realm,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
async def preview_realm_update(
self,
request: realms.PreviewRealmUpdateRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> realms.PreviewRealmUpdateResponse:
r"""Previews patches to a single realm.
Args:
request (:class:`google.cloud.gaming_v1.types.PreviewRealmUpdateRequest`):
The request object. Request message for
RealmsService.PreviewRealmUpdate.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.gaming_v1.types.PreviewRealmUpdateResponse:
Response message for
RealmsService.PreviewRealmUpdate.
"""
# Create or coerce a protobuf request object.
request = realms.PreviewRealmUpdateRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.preview_realm_update,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("realm.name", request.realm.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-game-servers",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("RealmsServiceAsyncClient",)
| apache-2.0 | -6,299,150,042,904,482,000 | 38.894737 | 171 | 0.606111 | false |
dgzzhb/GAOthello | board.py | 1 | 10425 | #!/usr/bin/env python
""" game.py Humberto Henrique Campos Pinheiro
Game logic.
"""
from config import WHITE, BLACK, EMPTY
from copy import deepcopy
class Board:
""" Rules of the game """
def __init__ ( self ):
self.board = [ [0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0], \
[0,0,0,0,0,0,0,0] ]
self.board[3][4] = BLACK
self.board[4][3] = BLACK
self.board[3][3] = WHITE
self.board[4][4] = WHITE
self.valid_moves = []
def __getitem__ ( self, i, j):
return self.board[i][j]
def lookup ( self, row, column, color ):
""" Returns the possible positions that there exists at least one straight
(horizontal, vertical, or diagonal) line between the piece specified by (row,
column, color) and another piece of the same color.
"""
if color == BLACK:
other = WHITE
else:
other = BLACK
places = []
if ( row < 0 or row > 7 or column < 0 or column > 7 ):
return places
# For each direction search for possible positions to put a piece.
# north
i = row - 1
if ( i >= 0 and self.board[i][column] == other ):
i = i - 1
while ( i >= 0 and self.board[i][column] == other ):
i = i - 1
if ( i >= 0 and self.board[i][column] == 0 ):
places = places + [( i, column)]
# northeast
i = row - 1
j = column + 1
if ( i >= 0 and j < 8 and self.board[i][j] == other ) :
i = i - 1
j = j + 1
while ( i >= 0 and j < 8 and self.board[i][j] == other ):
i = i - 1
j = j + 1
if ( i >= 0 and j < 8 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# east
j = column + 1
if ( j < 8 and self.board[row][j] == other ) :
j = j + 1
while ( j < 8 and self.board[row][j] == other ):
j = j + 1
if ( j < 8 and self.board[row][j] == 0 ):
places = places + [(row, j)]
# southeast
i = row + 1
j = column + 1
if ( i < 8 and j < 8 and self.board[i][j] == other ) :
i = i + 1
j = j + 1
while ( i < 8 and j < 8 and self.board[i][j] == other ):
i = i + 1
j = j + 1
if ( i < 8 and j < 8 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# south
i = row + 1
if ( i < 8 and self.board[i][column] == other ):
i = i + 1
while ( i < 8 and self.board[i][column] == other ):
i = i + 1
if ( i < 8 and self.board[i][column] == 0 ):
places = places + [(i, column)]
# southwest
i = row + 1
j = column - 1
if ( i < 8 and j >= 0 and self.board[i][j] == other ):
i = i + 1
j = j - 1
while ( i < 8 and j >= 0 and self.board[i][j] == other ):
i = i + 1
j = j - 1
if ( i < 8 and j >= 0 and self.board[i][j] == 0 ):
places = places + [(i, j)]
# west
j = column - 1
if ( j >= 0 and self.board[row][j] == other ):
j = j - 1
while ( j >= 0 and self.board[row][j] == other ):
j = j - 1
if ( j >= 0 and self.board[row][j] == 0 ):
places = places + [(row, j)]
# northwest
i = row - 1
j = column - 1
if ( i >= 0 and j >= 0 and self.board[i][j] == other):
i = i - 1
j = j - 1
while ( i >= 0 and j >= 0 and self.board[i][j] == other):
i = i - 1
j = j - 1
if ( i >= 0 and j >= 0 and self.board[i][j] == 0 ):
places = places + [(i, j)]
return places
def get_valid_moves ( self, color ):
""" Get the avaiable positions to put a piece of the given color. For each
piece of the given color we search its neighbours, searching for pieces of the
other color to determine if is possible to make a move. This method must be
called before apply_move."""
if color == BLACK:
other = WHITE
else:
other = BLACK
places = []
for i in range ( 8 ) :
for j in range ( 8 ) :
if self.board[i][j] == color :
places = places + self.lookup ( i, j, color )
places = list( set ( places ))
self.valid_moves = places
return places
def apply_move ( self, move, color ):
""" Determine if the move is correct and apply the changes in the game.
"""
if move in self.valid_moves:
self.board[move[0]][move[1]] = color
for i in range ( 1, 9 ):
self.flip ( i, move, color )
def flip ( self, direction, position, color ):
""" Flips (capturates) the pieces of the given color in the given direction
(1=North,2=Northeast...) from position. """
if direction == 1:
# north
row_inc = -1
col_inc = 0
elif direction == 2:
# northeast
row_inc = -1
col_inc = 1
elif direction == 3:
# east
row_inc = 0
col_inc = 1
elif direction == 4:
# southeast
row_inc = 1
col_inc = 1
elif direction == 5:
# south
row_inc = 1
col_inc = 0
elif direction == 6:
# southwest
row_inc = 1
col_inc = -1
elif direction == 7:
# west
row_inc = 0
col_inc = -1
elif direction == 8:
# northwest
row_inc = -1
col_inc = -1
places = [] # pieces to flip
i = position[0] + row_inc
j = position[1] + col_inc
if color == WHITE:
other = BLACK
else:
other = WHITE
if i in range( 8 ) and j in range( 8 ) and self.board[i][j] == other:
# assures there is at least one piece to flip
places = places + [(i,j)]
i = i + row_inc
j = j + col_inc
while i in range( 8 ) and j in range( 8 ) and self.board[i][j] == other:
# search for more pieces to flip
places = places + [(i,j)]
i = i + row_inc
j = j + col_inc
if i in range( 8 ) and j in range( 8 ) and self.board[i][j] == color:
# found a piece of the right color to flip the pieces between
for pos in places:
# flips
self.board[pos[0]][pos[1]] = color
def get_changes ( self ):
""" Return black and white counters. """
whites, blacks, empty = self.count_stones()
return ( self.board, blacks, whites )
def game_ended ( self ):
""" Is the game ended? """
# board full or wipeout
whites, blacks, empty = self.count_stones()
if whites == 0 or blacks == 0 or empty == 0:
return True
# no valid moves for both players
if self.get_valid_moves( BLACK ) == [] and self.get_valid_moves( WHITE ) == []:
return True
return False
def print_board ( self ):
for i in range ( 8 ):
print i, ' |',
for j in range ( 8 ):
if self.board[i][j] == BLACK:
print 'B',
elif self.board[i][j] == WHITE:
print 'W',
else:
print ' ',
print '|',
print
def count_stones( self ):
""" Returns the number of white pieces, black pieces and empty squares, in
this order.
"""
whites = 0
blacks = 0
empty = 0
for i in range( 8 ):
for j in range( 8 ):
if self.board[i][j] == WHITE:
whites += 1
elif self.board[i][j] == BLACK:
blacks += 1
else:
empty += 1
return whites, blacks, empty
def compare( self, otherBoard ):
""" Return a board containing only the squares that are empty in one of the boards
and not empty on the other.
"""
diffBoard = Board()
diffBoard.board[3][4] = 0
diffBoard.board[3][3] = 0
diffBoard.board[4][3] = 0
diffBoard.board[4][4] = 0
for i in range( 8 ):
for j in range( 8 ):
if otherBoard.board[i][j] != self.board[i][j]:
diffBoard.board[i][j] = otherBoard.board[i][j]
return otherBoard
def get_adjacent_count( self, color ):
""" Return how many empty squares there are on the board adjacent to the specified color."""
adjCount = 0
for x,y in [(a,b) for a in range( 8 ) for b in range( 8 ) if self.board[a][b] == color]:
for i,j in [(a,b) for a in [-1,0,1] for b in [-1,0,1]]:
if 0 <= x+i <= 7 and 0 <= y+j <= 7:
if self.board[x+i][y+j] == EMPTY:
adjCount += 1
return adjCount
def next_states( self, color ):
""" Given a player's color return all the boards resulting from moves that this player
cand do. It's implemented as an iterator.
"""
valid_moves = self.get_valid_moves( color )
for move in valid_moves:
newBoard = deepcopy( self )
newBoard.apply_move( move, color )
yield newBoard
| mit | -6,171,274,448,251,786,000 | 31.578125 | 100 | 0.424077 | false |
sbg/Mitty | mitty/simulation/sequencing/syntheticsequencer.py | 1 | 1955 | """A fully synthetic read model that allows us to produce single end or paired end reads with arbitrary
read and template lengths. It's read model format is as follows
{
'model_class': 'illumina',
'model_description': '',
'paired': True/False,
'read_length': 100,
'mean_template_length': 300,
'std_template_length': 100,
'bq_mat': [],
'cum_bq_mat': []
}
"""
import pickle
import numpy as np
def create_model(
pkl,
read_length=100, mean_template_length=500, std_template_length=100, max_tlen=1000,
bq0=30, k=200, sigma=10,
comment=''):
description = """This is a synthetic read model that generates reads
with a length of {} bp, a template length of {} +/- {} bp.
The mean base quality follows the equation:
{} * exp(- b/l * {})
where b is the base in the read and l is the length of the read.
The base quality for a given base in a given read is drawn from a gaussian with standard deviation {}
{}""".format(
read_length,
mean_template_length,
std_template_length,
bq0, k, sigma,
comment)
bq = bq0 * (1 - np.exp(- k * np.linspace(1, 0, read_length) ** 2))
one_bq_mat = np.zeros((read_length, 94), dtype=float)
for n in range(read_length):
one_bq_mat[n, :] = np.exp(- 0.5 * ((np.arange(94) - bq[n]) / sigma) ** 2)
one_cum_bq_mat = one_bq_mat.cumsum(axis=1) / one_bq_mat.sum(axis=1).clip(1)[:, None]
tlen_mat = np.exp(- 0.5 * ((np.arange(max_tlen) - mean_template_length) / std_template_length) ** 2)
tlen_mat /= tlen_mat.sum()
cum_tlen = tlen_mat.cumsum() / tlen_mat.sum()
pickle.dump({
'model_class': 'illumina',
'model_description': description,
'min_mq': 0,
'bq_mat': np.array((one_bq_mat, one_bq_mat)),
'cum_bq_mat': np.array((one_cum_bq_mat, one_cum_bq_mat)),
'tlen': tlen_mat,
'cum_tlen': cum_tlen,
'mean_rlen': read_length,
'min_rlen': read_length,
'max_rlen': read_length,
'r_cnt': 1
}, open(pkl, 'wb'))
| apache-2.0 | 9,079,642,919,832,840,000 | 31.04918 | 103 | 0.623018 | false |
CNS-OIST/STEPS_Example | publication_models/API_1/Anwar_J Neurosci_2013/extra/constants_hh.py | 1 | 2809 | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# constants_hh.py : provides a set of parameters and other constants for the
# Hodgkin-Huxley model in the above study.
# It is intended that this file is not altered.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import math
# # # # # # # # # # # # # # # # SIMULATION CONTROLS # # # # # # # # # # # # #
EF_DT = 1.0e-5 # The EField dt
NTIMEPOINTS = 5000
TIMECONVERTER = 1.0e-5
NITER = 1
############################ PARAMETERS ################################
init_pot = -65e-3
TEMPERATURE = 20.0
Q10 = 3
Qt = math.pow(Q10, ((TEMPERATURE-6.3)/10))
########## BULK RESISTIVITY ##########
Ra = 1.0
########## MEMBRANE CAPACITANCE ##########
memb_capac = 1.0e-2
# # # # # # # # # # # # # # # # # # CHANNELS # # # # # # # # # # # # # # # #
# Voltage range for gating kinetics in Volts
Vrange = [-100.0e-3, 50e-3, 1e-4]
# Hodgkin-Huxley gating kinetics
def a_n(V):
return ((0.01*(10-(V+65.))/(math.exp((10-(V+65.))/10.)-1)))
def b_n(V):
return ((0.125*math.exp(-(V+65.)/80.)))
def a_m(V):
return ((0.1*(25-(V+65.))/(math.exp((25-(V+65.))/10.)-1)))
def b_m(V):
return ((4.*math.exp(-(V+65.)/18.)))
def a_h(V):
return ((0.07*math.exp(-(V+65.)/20.)))
def b_h(V):
return ((1./(math.exp((30-(V+65.))/10.)+1)))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Potassium conductance = 0.036 S/cm2
# Sodium conductance = 0.120 S/cm2
# Potassium single-channel conductance
K_G = 20.0e-12 # Siemens
# Potassium channel density
K_ro = 18.0e12 # per square meter
# Potassium reversal potential
K_rev = -77e-3 # volts
# Sodium single-channel conductance
Na_G = 20.0e-12 # Siemens
# Sodium channel density
Na_ro = 60.0e12 # per square meter
# Sodium reversal potential
Na_rev = 50e-3 # volts
# Leak single-channel conductance
L_G = 1.0e-12 # Siemens
# Leak density
L_ro = 10.0e12 # per square meter
# Leak reveral potential
leak_rev = -50.0e-3 # volts
# A table of potassium channel initial population factors:
# n0, n1, n2, n3, n4
K_facs = [ 0.21768, 0.40513, 0.28093, 0.08647, 0.00979 ]
# A table of sodium channel initial population factors
# m0h0, m1h0, m2h0, m3h0, m0h1, m1h1, m2h1, m3h1:
Na_facs = [ 0.34412, 0.05733, 0.00327, 6.0e-05, \
0.50558, 0.08504, 0.00449, 0.00010 ]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
| gpl-2.0 | 7,726,932,273,181,009,000 | 22.805085 | 77 | 0.533286 | false |
ContinuumIO/ashiba | enaml/enaml/widgets/datetime_selector.py | 1 | 2100 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Bool, Str, Typed, ForwardTyped, observe, set_default
from enaml.core.declarative import d_
from .bounded_datetime import BoundedDatetime, ProxyBoundedDatetime
class ProxyDatetimeSelector(ProxyBoundedDatetime):
""" The abstract defintion of a proxy DatetimeSelector object.
"""
#: A reference to the DatetimeSelector declaration.
declaration = ForwardTyped(lambda: DatetimeSelector)
def set_datetime_format(self, format):
raise NotImplementedError
def set_calendar_popup(self, popup):
raise NotImplementedError
class DatetimeSelector(BoundedDatetime):
""" A widget to edit a Python datetime.datetime object.
This is a geometrically smaller control than what is provided by
Calendar.
"""
#: A python date format string to format the datetime. If None is
#: supplied (or is invalid) the system locale setting is used.
#: This may not be supported by all backends.
datetime_format = d_(Str())
#: Whether to use a calendar popup for selecting the date.
calendar_popup = d_(Bool(False))
#: A datetime selector expands freely in width by default
hug_width = set_default('ignore')
#: A reference to the ProxyDateSelector object.
proxy = Typed(ProxyDatetimeSelector)
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe(('datetime_format', 'calendar_popup'))
def _update_proxy(self, change):
""" An observer which updates the proxy with state change.
"""
# The superclass implementation is sufficient.
super(DatetimeSelector, self)._update_proxy(change)
| bsd-3-clause | -5,191,797,719,013,015,000 | 34.59322 | 79 | 0.605714 | false |
CuteLemon/Learn | NewsAPI_Scraper/db_operation.py | 1 | 1050 | import pymongo as Mongo
DB_NAME = 'localhost'
DB_PORT = 27017
TEST_JSON = {'url':'http://hello.com','content':'Lemon Tree'}
class DB():
def __init__(self,db,port):
self.client = Mongo.MongoClient(db,port)
self.db = self.client.test
self.collect = self.db.test_collect
def insert(self,c):
self.collect.insert_one(c)
def find(self,k):
return self.collect.find(k)
def delete(self,k):
return self.collect.delete_many(k)
def close(self):
self.client.close()
if __name__ == '__main__':
# Client = Mongo.MongoClient(DB,PORT)
# db = Client.test
# collect = db.test_collect
# collect.insert(TEST_JSON)
# for x in collect.find({'content':'Lemon Tree'}):
# print x
# Client.close()
print 'mongodb test start:'
db = DB(DB_NAME,DB_PORT)
db.insert(TEST_JSON)
result = db.find({'content':'Lemon Tree'})
for x in result:
print x
db.delete({'content':'Lemon Tree'})
db.close()
print 'mongodb test complete!'
| gpl-3.0 | -5,302,895,376,558,269,000 | 22.333333 | 61 | 0.592381 | false |
bbondy/brianbondy.gae | libs/werkzeug/testsuite/contrib/cache.py | 1 | 5814 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
try:
from redis.exceptions import ConnectionError as RedisConnectionError
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
except RedisConnectionError:
redis = None
except ImportError:
redis = None
try:
import pylibmc as memcache
except ImportError:
try:
from google.appengine.api import memcache
except ImportError:
try:
import memcache
except ImportError:
memcache = None
class CacheTestCase(WerkzeugTestCase):
make_cache = None
def test_generic_get_dict(self):
c = self.make_cache()
assert c.set('a', 'a')
assert c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_generic_set_many(self):
c = self.make_cache()
assert c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
assert c.set_many((i, i*i) for i in range(3))
assert c.get(2) == 4
def test_generic_set_get(self):
c = self.make_cache()
for i in range(3):
assert c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i, result
def test_generic_get_set(self):
c = self.make_cache()
assert c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_generic_get_many(self):
c = self.make_cache()
assert c.set('foo', ['bar'])
assert c.set('spam', 'eggs')
self.assert_equal(list(c.get_many('foo', 'spam')), [['bar'], 'eggs'])
def test_generic_set_many(self):
c = self.make_cache()
assert c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_generic_expire(self):
c = self.make_cache()
assert c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_generic_add(self):
c = self.make_cache()
# sanity check that add() works like set()
assert c.add('foo', 'bar')
assert c.get('foo') == 'bar'
assert not c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_generic_delete(self):
c = self.make_cache()
assert c.add('foo', 'bar')
assert c.get('foo') == 'bar'
assert c.delete('foo')
assert c.get('foo') is None
def test_generic_delete_many(self):
c = self.make_cache()
assert c.add('foo', 'bar')
assert c.add('spam', 'eggs')
assert c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_generic_inc_dec(self):
c = self.make_cache()
assert c.set('foo', 1)
assert c.inc('foo') == c.get('foo') == 2
assert c.dec('foo') == c.get('foo') == 1
assert c.delete('foo')
def test_generic_true_false(self):
c = self.make_cache()
assert c.set('foo', True)
assert c.get('foo') == True
assert c.set('bar', False)
assert c.get('bar') == False
class SimpleCacheTestCase(CacheTestCase):
make_cache = cache.SimpleCache
class FileSystemCacheTestCase(CacheTestCase):
tmp_dir = None
def make_cache(self, **kwargs):
if self.tmp_dir is None:
self.tmp_dir = tempfile.mkdtemp()
return cache.FileSystemCache(cache_dir=self.tmp_dir, **kwargs)
def teardown(self):
if self.tmp_dir is not None:
shutil.rmtree(self.tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
c = self.make_cache(threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
assert c.set(str(i), i)
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
c = self.make_cache()
assert c.set('foo', 'bar')
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) == 1
assert c.clear()
cache_files = os.listdir(self.tmp_dir)
assert len(cache_files) == 0
class RedisCacheTestCase(CacheTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
assert c._client.set(c.key_prefix + 'foo', 'Awesome')
self.assert_equal(c.get('foo'), b'Awesome')
assert c._client.set(c.key_prefix + 'foo', '42')
self.assert_equal(c.get('foo'), 42)
class MemcachedCacheTestCase(CacheTestCase):
def make_cache(self):
return cache.MemcachedCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
assert c._client.set(c.key_prefix + b'foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
if memcache is not None:
suite.addTest(unittest.makeSuite(MemcachedCacheTestCase))
return suite
| mit | 1,778,556,580,544,685,600 | 28.21608 | 91 | 0.576367 | false |
jimgong92/allezViens | connect.py | 1 | 8842 | from models import *
from run import db
import sys
import math
import hashlib
import time
from communication import sendPickNotificationEmail
'''DATABASE INSERTION/UPDATE'''
#Adds driver to database
def addDriver(id, alias, oLat, oLon, dLat, dLon, date):
url = makeURL(id)
driver = Driver(id, alias, oLat, oLon, dLat, dLon, date, url)
db.session.add(driver)
save()
return driver
#Adds passenger to database
def addPassenger(id, alias, oLat, oLon, dLat, dLon, date):
url = makeURL(id)
passenger = Passenger(id, alias, oLat, oLon, dLat, dLon, date, url)
db.session.add(passenger)
save()
return passenger
#Adds a driver to a passenger's picks
def pickDriver(driverID, passengerID, add):
driver = getDriver(driverID)
passenger = getPassenger(passengerID)
#Toggle pick based on whether driver is already in passenger's picks
#currentPicks = findPassengerPicks(passengerID)
# if (driver in currentPicks):
# passenger.unpick(driver)
# else:
# passenger.pick(driver)
if(add):
passenger.pick(driver)
else:
passenger.unpick(driver)
save()
#Adds a passenger to a driver's picks
def pickPassenger(passengerID, driverID, add):
passenger = getPassenger(passengerID)
driver = getDriver(driverID)
# currentPicks = findDriverPicks(driverID)
# if (passenger in currentPicks):
# driver.unpick(passenger)
# else:
# driver.pick(passenger)
if(add):
driver.pick(passenger)
else:
driver.unpick(passenger)
save()
#Validates driver
def validateDriver(driverID):
driver = getDriver(driverID)
driver.validateDriver()
save()
#Validates passenger
def validatePassenger(passengerID):
passenger = getPassenger(passengerID)
passenger.validatePassenger()
save()
def updatePassenger(passengerDict):
passenger = getPassenger(passengerDict['email'])
return update(passenger,passengerDict)
def updateDriver(driverDict):
driver = getDriver(driverDict['email'])
return update(driver,driverDict)
#Updates given model
def update(model, dictionary):
if(model != ''):
model.oLat = dictionary['oLat']
model.oLon = dictionary['oLon']
model.dLat = dictionary['dLat']
model.dLon = dictionary['dLon']
model.date = dictionary['date']
model.alias = dictionary['alias']
db.session.add(model)
save()
return True
else:
return False
'''DATABASE GET'''
#Retrieve driver instance by ID
def getDriver(driverID):
try:
result = Driver.query.filter_by(email=driverID).first()
except:
result = ''
finally:
return result
#Retrieve passenger instance by ID
def getPassenger(passengerID):
try:
result = Passenger.query.filter_by(email=passengerID).first()
except:
result = ''
finally:
return result
#Returns all drivers that contain passenger route and same date
#Identifies drivers whose boundary box contains the passenger's route
#PARAMS: Passenger's origin and destination coordinates
def findMatchableDrivers(oLat, oLon, dLat, dLon, date):
drivers = Driver.query.filter(Driver.date == date).all()
res = []
for i in range(len(drivers)):
minLat, maxLat = min(drivers[i].oLat, drivers[i].dLat), max(drivers[i].oLat, drivers[i].dLat)
minLon, maxLon = min(drivers[i].oLon, drivers[i].dLon), max(drivers[i].oLon, drivers[i].dLon)
if (minLat <= oLat <= maxLat and minLat <= dLat <= maxLat):
if (minLon <= oLon <= maxLon and minLon <= dLon <= maxLon):
res.append(drivers[i])
return formatResults(res)
#Returns all passengers within given bound box and same date
#Returns passengers whose coordinates are in the driver's boundary box
#PARAMS: Driver's origin and destination coordinates
def findMatchablePassengers(oLat, oLon, dLat, dLon, date):
minLat, maxLat = min(oLat, dLat), max(oLat, dLat)
minLon, maxLon = min(oLon, dLon), max(oLon, dLon)
maxLat, minLon = makeBuffer(maxLat,minLon, 5, "NW")
minLat, maxLon = makeBuffer(minLat,maxLon, 5, "SE")
passengers = Passenger.query.filter(Passenger.date == date,
Passenger.oLat >= minLat, Passenger.oLat <= maxLat,
Passenger.dLat >= minLat, Passenger.dLat <= maxLat,
Passenger.oLon >= minLon, Passenger.oLon <= maxLon,
Passenger.dLon >= minLon, Passenger.dLon <= maxLon).all()
return formatResults(passengers)
#Returns all picks by given driver
def findDriverPicks(driverID):
return getDriver(driverID).picks
#Returns all picks by given driver
def findPassengerPicks(passengerID):
return getPassenger(passengerID).picks
#Returns object with user's email, origin, destination, and pick information
def getInfoByUrl(url):
match = Driver.query.filter_by(editURL=url).all()
if(len(match)>0):
driver = match[0]
picks = findDriverPicks(driver.email)
return 'D', objectifyWithPickInfo(driver, picks)
match = Passenger.query.filter_by(editURL=url).all()
if(len(match)>0):
passenger = match[0]
picks = findPassengerPicks(passenger.email)
return 'P', objectifyWithPickInfo(passenger, picks)
return 'NA', False
#Retrieves driver's info by email
def getDriverInfo(email):
driver = getDriver(email)
picks = findDriverPicks(driver.email)
return objectifyWithPickInfo(driver,picks)
#Retrieves passenger's info by email
def getPassengerInfo(email):
passenger = getPassenger(email)
picks = findPassengerPicks(passenger.email)
return objectifyWithPickInfo(passenger,picks)
#Validates existing urls
def urlExists(url, validate):
urlType, info = getInfoByUrl(url)
if(urlType == 'P'):
if(validate):
validatePassenger(info['email'])
return True
elif(urlType == 'D'):
if(validate):
validateDriver(info['email'])
return True
else:
return False
def sendMessage(to, sender, message, fromType):
sent = True
try:
if(fromType[0].upper()=='D'):
passenger = getPassenger(to)
url = passenger.editURL
else:
driver = getDriver(to)
url = driver.editURL
sendPickNotificationEmail(to, sender, url)
except:
sent = False
finally:
return sent
'''DATABASE DELETION'''
#Deletes driver + route from database
def deleteDriver(id):
driver = getDriver(id)
db.session.delete(driver)
save()
return ''
#Deletes passenger + route from database
def deletePassenger(id):
passenger = getPassenger(id)
db.session.delete(passenger)
save()
return ''
'''HELPER FUNCTIONS'''
#Commits db session changes
def save():
print 'save function'
for obj in db.session:
print obj
try:
db.session.commit()
except:
e = sys.exc_info()[0]
print e
print 'Error in session D:'
finally:
print 'after db.session.commit()'
#Returns JSON-friendly data from a model array
def formatResults(modelArray):
res = []
for i in range(len(modelArray)):
print 'in for loop'
res.append(objectify(modelArray[i]))
return res
#Pulls model data into JSON format
def objectify(model):
obj = {
"email": model.email,
"alias": model.alias,
"origin": [float(model.oLat), float(model.oLon)],
"destination": [float(model.dLat), float(model.dLon)],
"date": model.date
}
return obj
#Extends objectify with pick information
def objectifyWithPickInfo(model, picks):
obj = objectify(model)
obj["picks"] = parseUserPicks(model, picks)
return obj
#Takes users pick information and returns array of each pick denoting either CONFIRMED or PENDING status
def parseUserPicks(user, picks):
res = []
for pick in picks:
if (user in pick.picks):
res.append({"id": pick.email, "status": "CONFIRMED"})
else:
res.append({"id": pick.email, "status": "PENDING"})
return res
#Adds buffer around location
def makeBuffer(lat,lon,miles,direction):
#This earth radius in miles may not be entirely accurate - there are various numbers and the earth is not a perfect sphere
#for the case of a buffer though, probably doesn't really matter
earthRadiusMiles = 3959
northwest = math.radians(315)
southeast = math.radians(135)
lat = math.radians(lat)
lon = math.radians(lon)
#cast as float or this breaks, because angular direction is a tiny tiny number
angularDirection = float(miles)/float(earthRadiusMiles)
if(direction=="NW"):
bearing = northwest
if(direction=="SE"):
bearing = southeast
newLat = math.asin(math.sin(lat)*math.cos(angularDirection)) + math.cos(lat)*math.sin(angularDirection)*math.cos(bearing)
newLon = lon + math.atan2(math.sin(bearing)*math.sin(angularDirection)*math.cos(lat), math.cos(angularDirection)-math.sin(lat)*math.sin(newLat))
return math.degrees(newLat), math.degrees(newLon)
#Generates unique hash for trip route urls
def makeURL(id):
id = id + time.strftime("%M%S")
hash = hashlib.md5(id).hexdigest()
url = hash[0:8]
while(urlExists(url,False)):
id = id + time.strftime("%M%S")
hash = hashlib.md5(id).hexdigest()
url = hash[0:8]
return url
| mit | 5,327,332,219,861,663,000 | 27.990164 | 146 | 0.708776 | false |
aldebaran/qibuild | python/qibuild/test/projects/usefoopymodule/test.py | 1 | 1167 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
"""
This is an equivalent of a C++ program trying to load a
Python module using libqi, but written in Python.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
def main():
""" Main Entry Point """
from_env = os.environ.get("QI_ADDITIONAL_SDK_PREFIXES")
if not from_env:
sys.exit("QI_ADDITIONAL_SDK_PREFIXES not set")
prefixes = from_env.split(os.path.pathsep)
found = False
for prefix in prefixes:
candidate = os.path.join(prefix, "share", "qi", "module", "foo.mod")
if os.path.exists(candidate):
found = True
with open(candidate, "r") as fp:
contents = fp.read()
if contents != "python\n":
sys.exit("Expected python\\n, got: " + contents)
if not found:
sys.exit("foo.mod not found")
import foo
if __name__ == "__main__":
main()
| bsd-3-clause | -454,405,756,292,281,000 | 29.710526 | 84 | 0.615253 | false |
MikeDacre/fyrd | fyrd/job.py | 1 | 56470 | # -*- coding: utf-8 -*-
"""
Class and methods to handle Job submission.
This module only defines a single object: the Job class.
"""
import os as _os
import sys as _sys
from uuid import uuid4 as _uuid
from time import sleep as _sleep
from datetime import datetime as _dt
from traceback import print_tb as _tb
# Try to use dill, revert to pickle if not found
import dill as _pickle
from six import reraise as _reraise
from six import text_type as _txt
from six import string_types as _str
from six import integer_types as _int
###############################################################################
# Our functions #
###############################################################################
from . import run as _run
from . import conf as _conf
from . import queue as _queue
from . import logme as _logme
from . import script_runners as _scrpts
from . import batch_systems as _batch
from . import ClusterError as _ClusterError
from .submission_scripts import Function as _Function
_options = _batch.options
__all__ = ['Job']
###############################################################################
# The Job Class #
###############################################################################
class Job(object):
"""Information about a single job on the cluster.
Holds information about submit time, number of cores, the job script,
and more.
Below are the core attributes and methods required to use this class,
note that this is an incomplete list.
Attributes
----------
id : str
The ID number for the job, only set once the job has been submitted
name : str
The name of the job
command : str or callable
The function or shell script that will be submitted
args : list
A list of arguments to the shell script or function in command
kwargs : dict
A dictionary of keyword arguments to the function (not shell script) in
command
state : str
A slurm-style one word description of the state of the job, one of:
- Not_Submitted
- queued
- running
- completed
- failed
submitted : bool
written : bool
done : bool
running : bool
dependencies : list
A list of dependencies associated with this job
out : str
The output of the function or a copy of stdout for a script
stdout : str
Any output to STDOUT
stderr : str
Any output to STDERR
exitcode : int
The exitcode of the running processes (the script runner if the Job is
a function).
submit_time : datetime
A datetime object for the time of submission
start : datetime
A datetime object for time execution started on the remote node.
end : datetime
A datetime object for time execution ended on the remote node.
runtime : timedelta
A timedelta object containing runtime.
files : list
A list of script files associated with this job
nodes : list
A list of nodes associated with this job
modules : list
A list of modules associated with this job
clean_files : bool
If True, auto-delete script and function files on job completion
clean_outputs : bool
If True, auto-delete script outputs and error files on job completion
kwds : dict
Keyword arguments to the batch system (e.g. mem, cores, walltime), this
is initialized by taking every additional keyword argument to the Job.
e.g. Job('echo hi', profile=large, walltime='00:20:00', mem='2GB') will
result in kwds containing {walltime: '00:20:00', mem: '2GB'}. There is
**no need to alter this manually**.
submit_args : list
List of parsed submit arguments that will be passed at runtime to the
submit function. **Generated within the Job object**, no need to set
manually, use the `kwds` attribute instead.
Methods
-------
initialize()
Use attributes to prep job for running
gen_scripts()
Create script files (but do not write them)
write(overwrite=True)
Write scripts to files
submit(wait_on_max_queue=True)
Submit the job if it is ready and the queue is sufficiently open.
resubmit(wait_on_max_queue=True)
Clean all internal states with `scrub()` and then resubmit
kill(confirm=True)
Immediately kill the currently running job
clean(delete_outputs=True, get_outputs=True)
Delete any files created by this object
scrub(confirm=True)
Clean everything and reset to an unrun state.
update(fetch_info=True)
Update our status from the queue
wait()
Block until the job is done
get()
Block until the job is done and then return the output (stdout if job
is a script), by default saves all outputs to self (i.e. .out, .stdout,
.stderr) and deletes all intermediate files before returning. If `save`
argument is `False`, does not delete the output files by default.
Notes
-----
Printing or reproducing the class will display detailed job information.
Both `wait()` and `get()` will update the queue every few seconds
(defined by the queue_update item in the config) and add queue information
to the job as they go.
If the job disappears from the queue with no information, it will be listed
as 'completed'.
All jobs have a .submission attribute, which is a Script object containing
the submission script for the job and the file name, plus a 'written' bool
that checks if the file exists.
In addition, some batch systems (e.g. SLURM) have an .exec_script
attribute, which is a Script object containing the shell command to run.
This difference is due to the fact that some SLURM systems execute multiple
lines of the submission file at the same time.
Finally, if the job command is a function, this object will also contain a
`.function` attribute, which contains the script to run the function.
"""
id = None
name = None
suffix = None
submitted = False
written = False
found = False
disappeared = False
submit_time = None
state = None
kind = None
# Arguments
kwds = None
kwargs = None
submit_args = None
# Runtime
nodes = None
cores = None
modules = None
# Files
outfile = None
errfile = None
# Scripts
submission = None
exec_script = None
function = None
imports = None
# Dependencies
dependencies = None
# Pickled output file for functions
poutfile = None
# Holds queue information in torque and slurm
queue_info = None
# Output tracking
_got_out = False
_got_stdout = False
_got_stderr = False
_got_exitcode = False
_found_files = False
_out = None
_stdout = None
_stderr = None
_exitcode = None
# Time tracking
_got_times = False
start = None
end = None
# Track update status
_updating = False
# Track preparations
initialized = False
scripts_ready = False
_kwargs = None
# Auto Cleaning
clean_files = _conf.get_option('jobs', 'clean_files')
clean_outputs = _conf.get_option('jobs', 'clean_outputs')
def __init__(self, command, args=None, kwargs=None, name=None, qtype=None,
profile=None, queue=None, **kwds):
"""Initialization function arguments.
Parameters
----------
command : function/str
The command or function to execute.
args : tuple/dict, optional
Optional arguments to add to command, particularly useful for
functions.
kwargs : dict, optional
Optional keyword arguments to pass to the command, only used for
functions.
name : str, optional
Optional name of the job. If not defined, guessed. If a job of the
same name is already queued, an integer job number (not the queue
number) will be added, ie. <name>.1
qtype : str, optional
Override the default queue type
profile : str, optional
The name of a profile saved in the conf
queue : fyrd.queue.Queue, optional
An already initiated Queue class to use.
kwds
*All other keywords are parsed into cluster keywords by the options
system.* For available keywords see `fyrd.option_help()`
"""
########################
# Sanitize arguments #
########################
_logme.log('Args pre-check: {}'.format(kwds), 'debug')
kwds = _options.check_arguments(kwds)
_logme.log('Args post-check: {}'.format(kwds), 'debug')
# Create a unique short UUID for this job
self.uuid = str(_uuid()).split('-')[0]
# Path handling
[
kwds, self.runpath, self.outpath, self.scriptpath
] = _conf.get_job_paths(kwds)
# Save command
self.command = command
self.args = args
self.kwargs = kwargs
self.profile = profile
# Get environment
if not _batch.MODE:
_batch.get_cluster_environment()
if not qtype:
qtype = _batch.MODE
if queue:
if not isinstance(queue, _queue.Queue):
raise TypeError(
'queue must be fyrd.queue.Queue is {0}'.format(type(queue))
)
self.queue = queue
else:
self.queue = _queue.default_queue(qtype)
self.batch = _batch.get_batch_system(qtype)
self.qtype = qtype
self.state = 'Not_Submitted'
# Save keywords for posterity and parsing
self.kwds = kwds
self.name = self._update_name(name)
##########################################################################
# Public Methods #
##########################################################################
################
# Properties #
################
@property
def files(self):
"""Build a list of files associated with this class."""
files = [self.submission]
if self.kind == 'script':
files.append(self.exec_script)
if self.kind == 'function':
files.append(self.function)
return files
@property
def runtime(self):
"""Return the runtime."""
if not self.done:
_logme.log('Cannot get runtime as not yet complete.' 'warn')
return None
if not self.start:
self.get_times()
return self.end-self.start
@property
def done(self):
"""Check if completed or not.
Updates the Job and Queue.
Returns
-------
done : bool
"""
# We have the same statement twice to try and avoid updating.
if self.state in _batch.DONE_STATES:
return True
if not self._updating:
self.update()
if self.state in _batch.DONE_STATES:
return True
return False
@property
def running(self):
"""Check if running or not.
Updates the Job and Queue.
Returns
-------
running : bool
"""
# We have the same statement twice to try to avoid updating.
if self.state in _batch.ACTIVE_STATES:
return True
if not self._updating:
self.update()
if self.state in _batch.ACTIVE_STATES:
return True
return False
@property
def outfiles(self):
"""A list of all outfiles associated with this Job."""
outfiles = [self.outfile, self.errfile]
if self.poutfile:
outfiles.append(self.poutfile)
return outfiles
@property
def incomplete_outfiles(self):
"""A list of all outfiles that haven't already been fetched."""
outfiles = []
if self.outfile and not self._got_stdout:
outfiles.append(self.outfile)
if self.errfile and not self._got_stderr:
outfiles.append(self.errfile)
if self.poutfile and not self._got_out:
outfiles.append(self.poutfile)
return outfiles
@property
def exitcode(self):
"""Return exitcode."""
return self.get_exitcode()
@property
def code(self):
"""Return exitcode."""
return self.get_exitcode()
@property
def out(self):
"""Return output."""
return self.get_output()
@property
def stdout(self):
"""Return output."""
return self.get_stdout()
@property
def stderr(self):
"""Return stderr."""
return self.get_stderr()
@property
def err(self):
"""Return stderr."""
return self.get_stderr()
###############################
# Core Job Handling Methods #
###############################
def initialize(self):
"""Make self runnable using set attributes."""
kwds = self.kwds
# Override autoclean state (set in config file)
if 'clean_files' in kwds:
self.clean_files = kwds.pop('clean_files')
if 'clean_outputs' in kwds:
self.clean_outputs = kwds.pop('clean_outputs')
# Set suffix
self.suffix = kwds.pop('suffix') if 'suffix' in kwds \
else _conf.get_option('jobs', 'suffix')
# Merge in profile, this includes all args from the DEFAULT profile
# as well, ensuring that those are always set at a minumum.
profile = self.profile if self.profile else 'DEFAULT'
prof = _conf.get_profile(profile)
if not prof:
raise _ClusterError('No profile found for {}'.format(profile))
for k,v in prof.args.items():
if k not in kwds:
kwds[k] = v
# Use the default profile as a backup if any arguments missing
default_args = _conf.DEFAULT_PROFILES['DEFAULT']
default_args.update(_conf.get_profile('DEFAULT').args)
for opt, arg in default_args.items():
if opt not in kwds:
_logme.log('{} not in kwds, adding from default: {}:{}'
.format(opt, opt, arg), 'debug')
kwds[opt] = arg
# Set modules
self.modules = kwds.pop('modules') if 'modules' in kwds else None
if self.modules:
self.modules = _run.opt_split(self.modules, (',', ';'))
# Make sure args are a tuple
if self.args:
self.args = tuple(_run.listify(self.args))
# In case cores are passed as None
if 'nodes' not in kwds:
kwds['nodes'] = default_args['nodes']
if 'cores' not in kwds:
kwds['cores'] = default_args['cores']
self.nodes = kwds['nodes']
self.cores = kwds['cores']
# Set output files
if 'outfile' in kwds:
pth, fle = _os.path.split(kwds['outfile'])
if not pth:
pth = self.outpath
kwds['outfile'] = _os.path.join(pth, fle)
else:
kwds['outfile'] = _os.path.join(
self.outpath, '.'.join([self.name, self.suffix, 'out']))
if 'errfile' in kwds:
pth, fle = _os.path.split(kwds['errfile'])
if not pth:
pth = self.outpath
kwds['errfile'] = _os.path.join(pth, fle)
else:
kwds['errfile'] = _os.path.join(
self.outpath, '.'.join([self.name, self.suffix, 'err']))
self.outfile = kwds['outfile']
self.errfile = kwds['errfile']
# Check and set dependencies
if 'depends' in kwds:
dependencies = _run.listify(kwds.pop('depends'))
self.dependencies = []
errmsg = 'Dependencies must be number, numeric string or Job'
for dependency in dependencies:
if not isinstance(dependency, (_str, _txt, Job)):
raise _ClusterError(errmsg)
self.dependencies.append(dependency)
# Save parsed keywords as _kwargs
self._kwargs = kwds
self.initialized = True
return self
def gen_scripts(self):
"""Create the script objects from the set parameters."""
if not self.initialized:
self.initialize()
######################################
# Command and Function Preparation #
######################################
command = self.command
args = self.args
kwargs = self.kwargs # Not self._kwargs
name = self._update_name()
kwds = self._kwargs
# Get imports
imports = kwds.pop('imports') if 'imports' in kwds else None
# Get syspaths
syspaths = kwds.pop('syspaths') if 'syspaths' in kwds else None
# Split out sys.paths from imports and set imports in self
if imports:
self.imports = []
syspaths = syspaths if syspaths else []
for i in imports:
if i.startswith('sys.path.append')\
or i.startswith('sys.path.insert'):
syspaths.append(i)
else:
self.imports.append(i)
# Function specific initialization
if callable(command):
self.kind = 'function'
script_file = _os.path.join(
self.scriptpath, '{}_func.{}.py'.format(name, self.suffix)
)
self.poutfile = self.outfile + '.func.pickle'
self.function = _Function(
file_name=script_file, function=command, args=args,
kwargs=kwargs, imports=self.imports, syspaths=syspaths,
outfile=self.poutfile
)
# Collapse the _command into a python call to the function script
executable = '#!/usr/bin/env python{}'.format(
_sys.version_info.major) if _conf.get_option(
'jobs', 'generic_python') else _sys.executable
command = '{} {}'.format(executable, self.function.file_name)
args = None
else:
self.kind = 'script'
self.poutfile = None
# Collapse args into command
command = command + ' '.join(args) if args else command
#####################
# Script Creation #
#####################
# Build execution wrapper with modules
modstr = ''
if self.modules:
for module in self.modules:
modstr += 'module load {}\n'.format(module)
# Add all of the keyword arguments at once
opt_string, submit_args = _options.options_to_string(kwds, self.qtype)
precmd = opt_string + '\n\n' + modstr
self.submit_args = submit_args
# Create queue-dependent scripts
self.submission, self.exec_script = self.batch.gen_scripts(
self, command, args, precmd, modstr
)
self.scripts_ready = True
return self
def write(self, overwrite=True):
"""Write all scripts.
Parameters
----------
overwrite : bool, optional
Overwrite existing files, defaults to True.
Returns
-------
self : Job
"""
if not self.scripts_ready:
self.gen_scripts()
_logme.log('Writing files, overwrite={}'.format(overwrite), 'debug')
self.submission.write(overwrite)
if self.exec_script:
self.exec_script.write(overwrite)
if self.function:
self.function.write(overwrite)
self.written = True
return self
def submit(self, wait_on_max_queue=True, additional_keywords=None,
max_jobs=None):
"""Submit this job.
To disable max_queue_len, set it to 0. None will allow override by
the default settings in the config file, and any positive integer will
be interpretted to be the maximum queue length.
Parameters
----------
wait_on_max_queue : bool, optional
Block until queue limit is below the maximum before submitting.
additional_keywords : dict, optional
Pass this dictionary to the batch system submission function,
not necessary.
max_jobs : int, optional
Override the maximum number of jobs to wait for
Returns
-------
self : Job
"""
if self.submitted:
_logme.log('Not submitting, already submitted.', 'warn')
return self
if not self.written:
self.write()
# Check dependencies
dependencies = []
if self.dependencies:
for depend in self.dependencies:
if isinstance(depend, Job):
if not depend.id:
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'has not been submitted',
'error'
)
return self
dependencies.append(str(depend.id))
else:
dependencies.append(str(depend))
# Wait on the queue if necessary
if wait_on_max_queue:
if not self._updating:
self.update()
self.queue.wait_to_submit(max_jobs)
# Only include queued or running dependencies
self.queue._update() # Force update
depends = []
for depend in dependencies:
dep_check = self.queue.check_dependencies(depend)
if dep_check == 'absent':
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'is not in the queue',
'error'
)
return self
elif dep_check == 'good':
_logme.log(
'Dependency {} is complete, skipping'
.format(depend), 'debug'
)
elif dep_check == 'bad':
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'has failed',
'error'
)
return self
elif dep_check == 'active':
if self.queue.jobs[depend].state == 'completeing':
continue
_logme.log('Dependency {} is {}, adding to deps'
.format(depend, self.queue.jobs[depend].state),
'debug')
depends.append(depend)
else:
# This shouldn't happen ever
raise _ClusterError('fyrd.queue.Queue.check_dependencies() ' +
'returned an unrecognized value {0}'
.format(dep_check))
self.id = self.batch.submit(
self.submission.file_name,
dependencies=depends,
job=self, args=self.submit_args,
kwds=additional_keywords
)
self.submitted = True
self.submit_time = _dt.now()
self.state = 'submitted'
if not self.submitted:
raise _ClusterError('Submission appears to have failed, this '
"shouldn't happen")
return self
def resubmit(self, wait_on_max_queue=True, cancel_running=None):
"""Attempt to auto resubmit, deletes prior files.
Parameters
----------
wait_on_max_queue : bool, optional
Block until queue limit is below the maximum before submitting.
cancel_running : bool or None, optional
If the job is currently running, cancel it before resubmitting.
If None (default), will ask the user.
To disable max_queue_len, set it to 0. None will allow override by
the default settings in the config file, and any positive integer will
be interpretted to be the maximum queue length.
Returns
-------
self : Job
"""
if self.running:
if cancel_running is None:
cancel_running = _run.get_yesno(
'Job currently running, cancel before resubmitting?', 'y'
)
if cancel_running:
self.kill(confirm=False)
self.scrub(confirm=False)
# Rerun
self.initialize()
self.gen_scripts()
self.write()
return self.submit(wait_on_max_queue)
def kill(self, confirm=True):
"""Kill the running job.
Parameters
----------
confirm : bool, optional
Returns
-------
self : Job
"""
if not self.submitted:
_logme.log('Job not submitted, cannot kill', 'warn')
return self
if self.done:
_logme.log('Job completed, cannot kill', 'warn')
return self
if confirm:
if not _run.get_yesno(
'This will terminate the running job, continue?', 'n'
):
return self
self.batch.kill(self.id)
return self
def clean(self, delete_outputs=None, get_outputs=True):
"""Delete all scripts created by this module, if they were written.
Parameters
----------
delete_outputs : bool, optional
also delete all output and err files, but get their contents first.
get_outputs : bool, optional
if delete_outputs, save outputs before deleting.
Returns
-------
self : Job
"""
_logme.log('Cleaning outputs, delete_outputs={}'
.format(delete_outputs), 'debug')
if not isinstance(delete_outputs, bool):
delete_outputs = self.clean_outputs
assert isinstance(delete_outputs, bool)
for jobfile in [self.submission, self.exec_script, self.function]:
if jobfile:
jobfile.clean()
if delete_outputs:
_logme.log('Deleting output files.', 'debug')
if get_outputs:
self.fetch_outputs(delete_files=True)
for f in self.outfiles:
if _os.path.isfile(f):
_logme.log('Deleteing {}'.format(f), 'debug')
_os.remove(f)
return self
def scrub(self, confirm=True):
"""Clean everything and reset to an unrun state.
Parameters
----------
confirm : bool, optional
Get user input before proceeding
Returns
-------
self : Job
"""
msg = ("This will delete all outputs stored in this job, as well "
"as all output files, job files, and scripts. Are you sure "
"you want to do this?")
if confirm:
_run.get_yesno(msg, default='n')
# Clean old set
self.clean(delete_outputs=True)
# Reset runtime attributes
self.initialized = False
self.scripts_ready = False
self.written = False
self.submitted = False
self.id = None
self.found = False
self.queue_info = None
self.state = 'Not_Submitted'
self._got_out = False
self._got_stdout = False
self._got_stderr = False
self._got_exitcode = False
self._out = None
self._stdout = None
self._stderr = None
self._exitcode = None
self._got_times = False
self._updating = False
self._found_files = False
self.start = None
self.end = None
return self.update()
######################
# Queue Management #
######################
def update(self, fetch_info=True):
"""Update status from the queue.
Parameters
----------
fetch_info : bool, optional
Fetch basic job info if complete.
Returns
-------
self : Job
"""
if not self._updating:
self._update(fetch_info)
else:
_logme.log('Already updating, aborting.', 'debug')
return self
def update_queue_info(self):
"""Set (and return) queue_info from the queue even if done."""
_logme.log('Updating queue_info', 'debug')
queue_info1 = self.queue[self.id]
self.queue.update()
queue_info2 = self.queue[self.id]
if queue_info2:
self.queue_info = queue_info2
elif queue_info1:
self.queue_info = queue_info1
elif self.queue_info is None and self.submitted:
_logme.log('Cannot find self in the queue and queue_info is empty',
'warn')
return self.queue_info
#################################
# Output Handling and Waiting #
#################################
def wait(self):
"""Block until job completes.
Returns
-------
success : bool or str
True if exitcode == 0, False if not, 'disappeared' if job lost from
queue.
"""
if not self.submitted:
if _conf.get_option('jobs', 'auto_submit'):
_logme.log('Auto-submitting as not submitted yet', 'debug')
self.submit()
else:
_logme.log('Cannot wait for result as job has not been ' +
'submitted', 'warn')
return False
self.update(fetch_info=False)
if not self.done:
_logme.log('Waiting for self {}'.format(self.name), 'debug')
status = self.queue.wait(self.id, return_disp=True)
if status == 'disappeared':
self.state = status
elif status is not True:
return False
else:
if not self._updating:
self.update()
if self.get_exitcode(update=False) != 0:
_logme.log('Job failed with exitcode {}'
.format(self.exitcode), 'debug')
return False
if self._wait_for_files(caution_message=False):
if not self._updating:
self.update()
if self.state == 'disappeared':
_logme.log('Job files found for disappered job, assuming '
'success', 'info')
return 'disappeared'
return True
else:
if self.state == 'disappeared':
_logme.log('Disappeared job has no output files, assuming '
'failure', 'error')
return False
def get(self, save=True, cleanup=None, delete_outfiles=None,
del_no_save=None, raise_on_error=True):
"""Block until job completed and return output of script/function.
By default saves all outputs to this class and deletes all intermediate
files.
Parameters
----------
save : bool, optional
Save all outputs to the class also (advised)
cleanup : bool, optional
Clean all intermediate files after job completes.
delete_outfiles : bool, optional
Clean output files after job completes.
del_no_save : bool, optional
Delete output files even if `save` is `False`
raise_on_error : bool, optional
If the returned output is an Exception, raise it.
Returns
-------
str
Function output if Function, else STDOUT
"""
_logme.log(('Getting outputs, cleanup={}, autoclean={}, '
'delete_outfiles={}').format(
cleanup, self.clean_files, delete_outfiles
), 'debug')
# Wait for queue
status = self.wait()
if status is not True:
if status == 'disappeared':
msg = 'Job disappeared from queue'
_logme.log(msg + ', attempting to get '
'outputs', 'debug')
else:
msg = 'Wait failed'
_logme.log(msg + ', attempting to get outputs anyway',
'debug')
try:
self.fetch_outputs(save=save, delete_files=False,
get_stats=False)
except IOError:
_logme.log(msg + ' and files could not be found, job must '
'have failed', 'error')
if raise_on_error:
raise
return
if status != 'disappeared':
return
else:
# Get output
_logme.log('Wait complete, fetching outputs', 'debug')
self.fetch_outputs(save=save, delete_files=False)
out = self.out if save else self.get_output(save=save, update=False)
if isinstance(out, tuple) and issubclass(out[0], Exception):
if raise_on_error:
_reraise(*out)
else:
_logme.log('Job failed with exception {}'.format(out))
print(_tb(out[2]))
return out
# Cleanup
if cleanup is None:
cleanup = self.clean_files
else:
assert isinstance(cleanup, bool)
if delete_outfiles is None:
delete_outfiles = self.clean_outputs
if save is False:
delete_outfiles = del_no_save if del_no_save is not None else False
if cleanup:
self.clean(delete_outputs=delete_outfiles)
return out
def get_output(self, save=True, delete_file=None, update=True,
raise_on_error=True):
"""Get output of function or script.
This is the same as stdout for a script, or the function output for
a function.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Parameters
----------
save : bool, optional
Save the output to self.out, default True. Would be a good idea to
set to False if the output is huge.
delete_file : bool, optional
Delete the output file when getting
update : bool, optional
Update job info from queue first.
raise_on_error : bool, optional
If the returned output is an Exception, raise it.
Returns
-------
output : anything
The output of the script or function. Always a string if script.
"""
_logme.log(('Getting output, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if delete_file is None:
delete_file = self.clean_outputs
if self.kind == 'script':
return self.get_stdout(save=save, delete_file=delete_file,
update=update)
if self.done and self._got_out:
_logme.log('Getting output from _out', 'debug')
return self._out
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Cannot get pickled output before job completes',
'warn')
return None
_logme.log('Getting output from {}'.format(self.poutfile), 'debug')
if _os.path.isfile(self.poutfile):
with open(self.poutfile, 'rb') as fin:
out = _pickle.load(fin)
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self.poutfile),
'debug')
_os.remove(self.poutfile)
if save:
self._out = out
self._got_out = True
if _run.is_exc(out):
_logme.log('{} failed with exception {}'.format(self, out[1]),
'error')
if raise_on_error:
_reraise(*out)
return out
else:
_logme.log('No file at {} even though job has completed!'
.format(self.poutfile), 'critical')
raise IOError('File not found: {}'.format(self.poutfile))
def get_stdout(self, save=True, delete_file=None, update=True):
"""Get stdout of function or script, same for both.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Also sets self.start and self.end from the contents of STDOUT if
possible.
Returns
-------
save : bool, optional
Save the output to self.stdout, default True. Would be a good idea
to set to False if the output is huge.
delete_file : bool, optional
Delete the stdout file when getting
update : bool, optional
Update job info from queue first.
Returns
-------
str
The contents of STDOUT, with runtime info and trailing newline
removed.
"""
if delete_file is None:
delete_file = self.clean_outputs
_logme.log(('Getting stdout, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if self.done and self._got_stdout:
_logme.log('Getting stdout from _stdout', 'debug')
return self._stdout
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Job not done, attempting to get current STDOUT ' +
'anyway', 'info')
_logme.log('Getting stdout from {}'.format(self._kwargs['outfile']),
'debug')
if _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
if stdout:
stdouts = stdout.strip().split('\n')
if len(stdouts) < 3 or stdouts[-3] != 'Done':
_logme.log('STDOUT incomplete, returning as is', 'info')
return stdout
if self.done:
self.get_times(update=False, stdout=stdout)
self.get_exitcode(update=False, stdout=stdout)
stdout = '\n'.join(stdouts[2:-3]) + '\n'
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self._kwargs['outfile']),
'debug')
_os.remove(self._kwargs['outfile'])
if save:
self._stdout = stdout
if self.done:
self._got_stdout = True
return stdout
else:
_logme.log('No file at {}, cannot get stdout'
.format(self._kwargs['outfile']), 'warn')
return None
def get_stderr(self, save=True, delete_file=None, update=True):
"""Get stderr of function or script, same for both.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Parameters
----------
save : bool, optional
Save the output to self.stdout, default True. Would be a good idea
to set to False if the output is huge.
delete_file : bool, optional
Delete the stdout file when getting
update : bool, optional
Update job info from queue first.
Returns
-------
str
The contents of STDERR, with trailing newline removed.
"""
if delete_file is None:
delete_file = self.clean_outputs
_logme.log(('Getting stderr, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if self.done and self._got_stderr:
_logme.log('Getting stderr from _stderr', 'debug')
return self._stderr
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Job not done, attempting to get current STDERR ' +
'anyway', 'info')
_logme.log('Getting stderr from {}'.format(self._kwargs['errfile']),
'debug')
if _os.path.isfile(self._kwargs['errfile']):
with open(self._kwargs['errfile']) as fin:
stderr = fin.read()
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self._kwargs['errfile']),
'debug')
_os.remove(self._kwargs['errfile'])
if save:
self._stderr = stderr
if self.done:
self._got_stderr = True
return stderr
else:
_logme.log('No file at {}, cannot get stderr'
.format(self._kwargs['errfile']), 'warn')
return None
def get_times(self, update=True, stdout=None):
"""Get stdout of function or script, same for both.
Sets self.start and self.end from the contents of STDOUT if
possible.
Parameters
----------
update : bool, optional
Update job info from queue first.
stdout : str, optional
Pass existing stdout for use
Returns
-------
start : datetime.datetime
end : datetime.datetime
"""
_logme.log('Getting times', 'debug')
if self.done and self._got_times:
_logme.log('Getting times from self.start, self.end', 'debug')
return self.start, self.end
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self._wait_for_files()
else:
_logme.log('Cannot get times until job is complete.', 'warn')
return None, None
_logme.log('Getting times from {}'.format(self._kwargs['outfile']),
'debug')
if not stdout:
if _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
else:
_logme.log('No file at {}, cannot get times'
.format(self._kwargs['outfile']), 'warn')
return None
stdouts = stdout.strip().split('\n')
if len(stdouts) < 3 or stdouts[-3] != 'Done':
_logme.log('STDOUT incomplete, cannot get times', 'warn')
return None
# Get times
timefmt = '%y-%m-%d-%H:%M:%S'
try:
self.start = _dt.strptime(stdouts[0], timefmt)
self.end = _dt.strptime(stdouts[-1], timefmt)
except ValueError as err:
_logme.log('Time parsing failed with value error; ' +
'{}. '.format(err) + 'This may be because you ' +
'are using the script running that does not ' +
'include time tracking', 'debug')
self._got_times = True
return self.start, self.end
def get_exitcode(self, update=True, stdout=None):
"""Try to get the exitcode.
Parameters
----------
update : bool, optional
Update job info from queue first.
stdout : str, optional
Pass existing stdout for use
Returns
-------
exitcode : int
"""
_logme.log('Getting exitcode', 'debug')
if self.done and self._got_exitcode:
_logme.log('Getting exitcode from _exitcode', 'debug')
return self._exitcode
if update and not self._updating and not self.done:
self.update()
if not self.done:
_logme.log('Job is not complete, no exit code yet', 'info')
return None
if self.state == 'disappeared':
_logme.log('Cannot get exitcode for disappeared job', 'debug')
return 0
code = None
if not stdout and _os.path.isfile(self._kwargs['outfile']):
with open(self._kwargs['outfile']) as fin:
stdout = fin.read()
if stdout:
stdouts = stdout.strip().split('\n')
if len(stdouts) > 3 and stdouts[-3] == 'Done':
if stdouts[-2].startswith('Code: '):
code = int(stdouts[-2].split(':')[-1].strip())
if code is None:
_logme.log('Getting exitcode from queue', 'debug')
if not self.queue_info:
self.queue_info = self.queue[self.id]
if hasattr(self.queue_info, 'exitcode'):
code = self.queue_info.exitcode
if code is None:
_logme.log('Failed to get exitcode for job', 'warn')
return None
self._exitcode = code
self._got_exitcode = True
if code != 0:
self.state = 'failed'
_logme.log('Job {} failed with exitcode {}'
.format(self.name, code), 'error')
return code
def fetch_outputs(self, save=True, delete_files=None, get_stats=True):
"""Save all outputs in their current state. No return value.
This method does not wait for job completion, but merely gets the
outputs. To wait for job completion, use `get()` instead.
Parameters
----------
save : bool, optional
Save all outputs to the class also (advised)
delete_files : bool, optional
Delete the output files when getting, only used if save is True
get_stats : bool, optional
Try to get exitcode.
"""
_logme.log('Saving outputs to self, delete_files={}'
.format(delete_files), 'debug')
if not self._updating:
self.update()
if delete_files is None:
delete_files = self.clean_outputs
if not self._got_exitcode and get_stats:
self.get_exitcode(update=False)
if not self._got_times:
self.get_times(update=False)
if save:
self.get_output(save=True, delete_file=delete_files, update=False)
self.get_stdout(save=True, delete_file=delete_files, update=False)
self.get_stderr(save=True, delete_file=delete_files, update=False)
##############################
# Minor management methods #
##############################
def get_keywords(self):
"""Return a list of the keyword arguments used to make the job."""
return self.kwds
def set_keywords(self, kwds, replace=False):
"""Set the job keywords, just updates self.kwds.
Parameters
----------
kwds : dict
Set of valid arguments.
replace : bool, optional
Overwrite the keword arguments instead of updating.
"""
kwds = _options.check_arguments(kwds)
if replace:
self.kwds = kwds
else:
for key, value in kwds.items():
self.kwds[key] = value
###############
# Internals #
###############
def _update(self, fetch_info=True):
"""Update status from the queue.
Parameters
----------
fetch_info : bool, optional
Fetch basic job info if complete.
"""
_logme.log('Updating job.', 'debug')
self._updating = True
if self.done or not self.submitted:
self._updating = False
return
self.queue.update()
if self.submitted and self.id:
queue_info = self.queue[self.id]
if queue_info:
assert self.id == queue_info.id
self.found = True
self.queue_info = queue_info
self.state = self.queue_info.state
elif self.found:
_logme.log('Job appears to have disappeared, waiting for '
'reappearance, this may take a while', 'warn')
status = self.wait()
if status == 'disappeared':
_logme.log('Job disappeared, but the output files are '
'present assuming completion', 'info')
self.state = 'completed'
self.disappeared = True
elif not status:
_logme.log('Job appears to have failed and disappeared',
'error')
# If job not found after 30 seconds, assume trouble, check for
# completion
elif self.submitted and (_dt.now()-self.submit_time).seconds > 360:
if self._wait_for_files(btme=4, caution_message=False):
self.state = 'completed'
self.disappeared = True
_logme.log('Job never appeared in the queue, but '
'outfiles still exist, assuming completion.',
'warn')
else:
self.state = 'failed'
self.disappeared = True
s = (_dt.now()-self.submit_time).seconds
_logme.log('Job not in queue after {} seconds '.format(s) +
'of searching and no outputs found, assuming '
'failure.', 'error')
elif self.submitted and (_dt.now()-self.submit_time).seconds > 30:
if self._wait_for_files(btme=1, caution_message=False):
self.state = 'completed'
self.disappeared = True
_logme.log('Job never appeared in the queue, but '
'outfiles still exist, assuming completion.',
'warn')
if self.done and fetch_info:
if self._wait_for_files(btme=1, caution_message=False):
if not self._got_exitcode:
self.get_exitcode(update=False)
if not self._got_times:
self.get_times(update=False)
self._updating = False
def _wait_for_files(self, btme=None, caution_message=False):
"""Block until files appear up to 'file_block_time' in config file.
Aborts after 2 seconds if job exit code is not 0.
Parameters
----------
btme : int, optional
Number of seconds to try for before giving up, default set in
config file.
caution_message : bool, optional
Display a message if this is taking a while.
Returns
-------
bool
True if files found
"""
if self._found_files:
_logme.log('Already found files, not waiting again', 'debug')
return True
wait_time = 0.1 # seconds
if btme:
lvl = 'debug'
else:
lvl = 'warn'
btme = _conf.get_option('jobs', 'file_block_time', 30)
start = _dt.now()
dsp = False
_logme.log('Checking for output files', 'debug')
while True:
runtime = (_dt.now() - start).seconds
if caution_message and runtime > 1:
_logme.log('Job complete.', 'info')
_logme.log('Waiting for output files to appear.', 'info')
caution_message = False
if not dsp and runtime > 20:
_logme.log('Still waiting for output files to appear',
'info')
dsp = True
count = 0
outfiles = self.incomplete_outfiles
tlen = len(outfiles)
if not outfiles:
_logme.log('No incomplete outfiles, assuming all found in ' +
'{} seconds'.format(runtime), 'debug')
break
for i in outfiles:
if _os.path.isfile(i):
count += 1
if count == tlen:
_logme.log('All output files found in {} seconds'
.format(runtime), 'debug')
break
_sleep(wait_time)
if runtime > btme:
_logme.log('Job files have not appeared for ' +
'>{} seconds'.format(btme), lvl)
return False
if not self._updating:
self.update()
if runtime > 2 and self.get_exitcode(update=False) != 0:
_logme.log('Job failed with exit code {}.'
.format(self.exitcode) + ' Cannot find files.',
'error')
return False
self._found_files = True
return True
def _update_name(self, name=None):
"""Make sure the job name is unique.
Sets
----
self.name
Parameters
----------
name : str, optional
A name override, if no provided self.name used
Returns
-------
name : str
"""
# Set name
name = name if name else self.name
if not name:
if callable(self.command):
strcmd = str(self.command).strip('<>')
parts = strcmd.split(' ')
if parts[0] == 'bound':
name = '_'.join(parts[2:3])
else:
parts.remove('function')
try:
parts.remove('built-in')
except ValueError:
pass
name = parts[0]
else:
name = self.command.split(' ')[0].split('/')[-1]
# Make sure name not in queue
if '.' not in name or not name.split('.')[-1] == self.uuid:
name = '{}.{}'.format(name, self.uuid)
self.name = name
return name
def __repr__(self):
"""Return simple job information."""
if not self.initialized:
self.initialize()
self.update()
outstr = "Job:{name}<{mode}:{qtype}".format(
name=self.name, mode=self.kind, qtype=self.qtype)
if self.submitted:
outstr += ':{}'.format(self.id)
outstr += "(command:{cmnd})".format(cmnd=self.command)
if self.submitted or self.done:
outstr += self.state.upper()
elif self.written:
outstr += "WRITTEN"
else:
outstr += "NOT_SUBMITTED"
outstr += ">"
return outstr
def __str__(self):
"""Print job name and ID + status."""
if not self._updating:
self.update()
return "Job: {name} ID: {id}, state: {state}".format(
name=self.name, id=self.id, state=self.state)
def __int__(self):
"""Return integer of ID."""
if self.id:
if str(self.id.isdigit()):
return int(id)
_logme.log('No ID yet.', 'error')
return 0
| mit | 1,620,173,737,660,842,800 | 34.249688 | 79 | 0.518399 | false |
hagabbar/pycbc_copy | pycbc/io/hdf.py | 1 | 31952 | # convenience classes for accessing hdf5 trigger files
# the 'get_column()' method is implemented parallel to
# the existing pylal.SnglInspiralUtils functions
import h5py
import numpy as np
import logging
import inspect
from lal import LIGOTimeGPS, YRJUL_SI
from pycbc_glue.ligolw import ligolw
from pycbc_glue.ligolw import table
from pycbc_glue.ligolw import lsctables
from pycbc_glue.ligolw import ilwd
from pycbc_glue.ligolw import utils as ligolw_utils
from pycbc_glue.ligolw.utils import process as ligolw_process
from pycbc import version as pycbc_version
from pycbc.tmpltbank import return_search_summary
from pycbc.tmpltbank import return_empty_sngl
from pycbc import events, conversions, pnutils
class HFile(h5py.File):
""" Low level extensions to the capabilities of reading an hdf5 File
"""
def select(self, fcn, *args, **kwds):
""" Return arrays from an hdf5 file that satisfy the given function
Parameters
----------
fcn : a function
A function that accepts the same number of argument as keys given
and returns a boolean array of the same length.
args : strings
A variable number of strings that are keys into the hdf5. These must
refer to arrays of equal length.
chunksize : {1e6, int}, optional
Number of elements to read and process at a time.
return_indices : bool, optional
If True, also return the indices of elements passing the function.
Returns
-------
values : np.ndarrays
A variable number of arrays depending on the number of keys into
the hdf5 file that are given. If return_indices is True, the first
element is an array of indices of elements passing the function.
>>> f = HFile(filename)
>>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
"""
# get references to each array
refs = {}
data = {}
for arg in args:
refs[arg] = self[arg]
data[arg] = []
return_indices = kwds.get('return_indices', False)
indices = np.array([], dtype=np.uint64)
# To conserve memory read the array in chunks
chunksize = kwds.get('chunksize', int(1e6))
size = len(refs[arg])
i = 0
while i < size:
r = i + chunksize if i + chunksize < size else size
#Read each chunks worth of data and find where it passes the function
partial = [refs[arg][i:r] for arg in args]
keep = fcn(*partial)
if return_indices:
indices = np.concatenate([indices, np.flatnonzero(keep) + i])
#store only the results that pass the function
for arg, part in zip(args, partial):
data[arg].append(part[keep])
i += chunksize
# Combine the partial results into full arrays
if len(args) == 1:
res = np.concatenate(data[args[0]])
if return_indices:
return indices, res
else:
return res
else:
res = tuple(np.concatenate(data[arg]) for arg in args)
if return_indices:
return (indices,) + res
else:
return res
class DictArray(object):
""" Utility for organizing sets of arrays of equal length.
Manages a dictionary of arrays of equal length. This can also
be instantiated with a set of hdf5 files and the key values. The full
data is always in memory and all operations create new instances of the
DictArray.
"""
def __init__(self, data=None, files=None, groups=None):
""" Create a DictArray
Parameters
----------
data: dict, optional
Dictionary of equal length numpy arrays
files: list of filenames, optional
List of hdf5 file filenames. Incompatibile with the `data` option.
groups: list of strings
List of keys into each file. Required by the files options.
"""
self.data = data
if files:
self.data = {}
for g in groups:
self.data[g] = []
for f in files:
d = HFile(f)
for g in groups:
if g in d:
self.data[g].append(d[g][:])
d.close()
for k in self.data:
if not len(self.data[k]) == 0:
self.data[k] = np.concatenate(self.data[k])
for k in self.data:
setattr(self, k, self.data[k])
def _return(self, data):
return self.__class__(data=data)
def __len__(self):
return len(self.data[self.data.keys()[0]])
def __add__(self, other):
data = {}
for k in self.data:
data[k] = np.concatenate([self.data[k], other.data[k]])
return self._return(data=data)
def select(self, idx):
""" Return a new DictArray containing only the indexed values
"""
data = {}
for k in self.data:
data[k] = self.data[k][idx]
return self._return(data=data)
def remove(self, idx):
""" Return a new DictArray that does not contain the indexed values
"""
data = {}
for k in self.data:
data[k] = np.delete(self.data[k], idx)
return self._return(data=data)
class StatmapData(DictArray):
def __init__(self, data=None, seg=None, attrs=None,
files=None):
groups = ['stat', 'time1', 'time2', 'trigger_id1', 'trigger_id2',
'template_id', 'decimation_factor', 'timeslide_id']
super(StatmapData, self).__init__(data=data, files=files, groups=groups)
if data:
self.seg=seg
self.attrs=attrs
elif files:
f = HFile(files[0], "r")
self.seg = f['segments']
self.attrs = f.attrs
def _return(self, data):
return self.__class__(data=data, attrs=self.attrs, seg=self.seg)
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
if len(self.time1) == 0 or len(self.time2) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat, self.time1, self.time2,
self.timeslide_id, interval, window)
return self.select(cid)
def save(self, outname):
f = HFile(outname, "w")
for k in self.attrs:
f.attrs[k] = self.attrs[k]
for k in self.data:
f.create_dataset(k, data=self.data[k],
compression='gzip',
compression_opts=9,
shuffle=True)
for key in self.seg.keys():
f['segments/%s/start' % key] = self.seg[key]['start'][:]
f['segments/%s/end' % key] = self.seg[key]['end'][:]
f.close()
class FileData(object):
def __init__(self, fname, group=None, columnlist=None, filter_func=None):
"""
Parameters
----------
group : string
Name of group to be read from the file
columnlist : list of strings
Names of columns to be read; if None, use all existing columns
filter_func : string
String should evaluate to a Boolean expression using attributes
of the class instance derived from columns: ex. 'self.snr < 6.5'
"""
if not fname: raise RuntimeError("Didn't get a file!")
self.fname = fname
self.h5file = HFile(fname, "r")
if group is None:
if len(self.h5file.keys()) == 1:
group = self.h5file.keys()[0]
else:
raise RuntimeError("Didn't get a group!")
self.group_key = group
self.group = self.h5file[group]
self.columns = columnlist if columnlist is not None \
else self.group.keys()
self.filter_func = filter_func
self._mask = None
def close(self):
self.h5file.close()
@property
def mask(self):
"""
Create a mask implementing the requested filter on the datasets
Returns
-------
array of Boolean
True for dataset indices to be returned by the get_column method
"""
if self.filter_func is None:
raise RuntimeError("Can't get a mask without a filter function!")
else:
# only evaluate if no previous calculation was done
if self._mask is None:
# get required columns into the namespace as numpy arrays
for column in self.columns:
if column in self.filter_func:
setattr(self, column, self.group[column][:])
self._mask = eval(self.filter_func)
return self._mask
def get_column(self, col):
"""
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested
"""
# catch corner case with an empty file (group with no datasets)
if not len(self.group.keys()):
return np.array([])
vals = self.group[col]
if self.filter_func:
return vals[self.mask]
else:
return vals[:]
class DataFromFiles(object):
def __init__(self, filelist, group=None, columnlist=None, filter_func=None):
self.files = filelist
self.group = group
self.columns = columnlist
self.filter_func = filter_func
def get_column(self, col):
"""
Loop over files getting the requested dataset values from each
Parameters
----------
col : string
Name of the dataset to be returned
Returns
-------
numpy array
Values from the dataset, filtered if requested and
concatenated in order of file list
"""
logging.info('getting %s' % col)
vals = []
for f in self.files:
d = FileData(f, group=self.group, columnlist=self.columns,
filter_func=self.filter_func)
vals.append(d.get_column(col))
# Close each file since h5py has an upper limit on the number of
# open file objects (approx. 1000)
d.close()
logging.info('- got %i values' % sum(len(v) for v in vals))
return np.concatenate(vals)
class SingleDetTriggers(object):
"""
Provides easy access to the parameters of single-detector CBC triggers.
"""
# FIXME: Some of these are optional and should be kwargs.
def __init__(self, trig_file, bank_file, veto_file, segment_name, filter_func, detector):
logging.info('Loading triggers')
self.trigs_f = HFile(trig_file, 'r')
self.trigs = self.trigs_f[detector]
if bank_file:
logging.info('Loading bank')
self.bank = HFile(bank_file, 'r')
else:
logging.info('No bank file given')
# empty dict in place of non-existent hdf file
self.bank = {}
if veto_file:
logging.info('Applying veto segments')
# veto_mask is an array of indices into the trigger arrays
# giving the surviving triggers
logging.info('%i triggers before vetoes',
len(self.trigs['end_time'][:]))
self.veto_mask, _ = events.veto.indices_outside_segments(
self.trigs['end_time'][:], [veto_file],
ifo=detector, segment_name=segment_name)
logging.info('%i triggers remain after vetoes',
len(self.veto_mask))
else:
self.veto_mask = np.arange(len(self.trigs['end_time']))
if filter_func:
# get required columns into the namespace with dummy attribute
# names to avoid confusion with other class properties
for c in self.trigs.keys():
if c in filter_func:
setattr(self, '_'+c, self.trigs[c][:])
for c in self.bank.keys():
if c in filter_func:
# get template parameters corresponding to triggers
setattr(self, '_'+c,
np.array(self.bank[c])[self.trigs['template_id'][:]])
self.filter_mask = eval(filter_func.replace('self.', 'self._'))
# remove the dummy attributes
for c in self.trigs.keys() + self.bank.keys():
if c in filter_func: delattr(self, '_'+c)
self.boolean_veto = np.in1d(np.arange(len(self.trigs['end_time'])),
self.veto_mask, assume_unique=True)
self.mask = np.logical_and(self.boolean_veto, self.filter_mask)
logging.info('%i triggers remain after cut on %s',
len(self.trigs['end_time'][self.mask]), filter_func)
else:
self.mask = self.veto_mask
def checkbank(self, param):
if self.bank == {}:
return RuntimeError("Can't get %s values without a bank file"
% param)
@classmethod
def get_param_names(cls):
"""Returns a list of plottable CBC parameter variables"""
return [m[0] for m in inspect.getmembers(cls) \
if type(m[1]) == property]
def mask_to_n_loudest_clustered_events(self, n_loudest=10,
ranking_statistic="newsnr",
cluster_window=10):
"""Edits the mask property of the class to point to the N loudest
single detector events as ranked by ranking statistic. Events are
clustered so that no more than 1 event within +/- cluster-window will
be considered."""
# If this becomes memory intensive we can optimize
if ranking_statistic == "newsnr":
stat = self.newsnr
# newsnr doesn't return an array if len(stat) == 1
if len(self.snr) == 1:
stat = np.array([stat])
self.stat_name = "Reweighted SNR"
elif ranking_statistic == "newsnr_sgveto":
stat = self.newsnr_sgveto
# newsnr doesn't return an array if len(stat) == 1
if len(self.snr) == 1:
stat = np.array([stat])
self.stat_name = "Reweighted SNR (+sgveto)"
elif ranking_statistic == "snr":
stat = self.snr
self.stat_name = "SNR"
else:
err_msg = "Don't recognize statistic %s." % (ranking_statistic)
raise ValueError(err_msg)
times = self.end_time
index = stat.argsort()[::-1]
new_times = []
new_index = []
for curr_idx in index:
curr_time = times[curr_idx]
for time in new_times:
if abs(curr_time - time) < cluster_window:
break
else:
# Only get here if no other triggers within cluster window
new_index.append(curr_idx)
new_times.append(curr_time)
if len(new_index) >= n_loudest:
break
index = np.array(new_index)
self.stat = stat[index]
if self.mask.dtype == 'bool':
orig_indices = self.mask.nonzero()[0][index]
self.mask = np.in1d(np.arange(len(self.mask)), orig_indices,
assume_unique=True)
else:
self.mask = self.mask[index]
@property
def template_id(self):
return np.array(self.trigs['template_id'])[self.mask]
@property
def mass1(self):
self.checkbank('mass1')
return np.array(self.bank['mass1'])[self.template_id]
@property
def mass2(self):
self.checkbank('mass2')
return np.array(self.bank['mass2'])[self.template_id]
@property
def spin1z(self):
self.checkbank('spin1z')
return np.array(self.bank['spin1z'])[self.template_id]
@property
def spin2z(self):
self.checkbank('spin2z')
return np.array(self.bank['spin2z'])[self.template_id]
@property
def spin2x(self):
self.checkbank('spin2x')
return np.array(self.bank['spin2x'])[self.template_id]
@property
def spin2y(self):
self.checkbank('spin2y')
return np.array(self.bank['spin2y'])[self.template_id]
@property
def spin1x(self):
self.checkbank('spin1x')
return np.array(self.bank['spin1x'])[self.template_id]
@property
def spin1y(self):
self.checkbank('spin1y')
return np.array(self.bank['spin1y'])[self.template_id]
@property
def inclination(self):
self.checkbank('inclination')
return np.array(self.bank['inclination'])[self.template_id]
@property
def f_lower(self):
self.checkbank('f_lower')
return np.array(self.bank['f_lower'])[self.template_id]
@property
def mtotal(self):
return self.mass1 + self.mass2
@property
def mchirp(self):
return conversions.mchirp_from_mass1_mass2(self.mass1, self.mass2)
@property
def eta(self):
return conversions.eta_from_mass1_mass2(self.mass1, self.mass2)
@property
def effective_spin(self):
# FIXME assumes aligned spins
return conversions.chi_eff(self.mass1, self.mass2,
self.spin1z, self.spin2z)
# IMPROVEME: would like to have a way to access all get_freq and/or
# other pnutils.* names rather than hard-coding each one
# - eg make this part of a fancy interface to the bank file ?
@property
def f_seobnrv2_peak(self):
return pnutils.get_freq('fSEOBNRv2Peak', self.mass1, self.mass2,
self.spin1z, self.spin2z)
@property
def f_seobnrv4_peak(self):
return pnutils.get_freq('fSEOBNRv4Peak', self.mass1, self.mass2,
self.spin1z, self.spin2z)
@property
def end_time(self):
return np.array(self.trigs['end_time'])[self.mask]
@property
def template_duration(self):
return np.array(self.trigs['template_duration'])[self.mask]
@property
def snr(self):
return np.array(self.trigs['snr'])[self.mask]
@property
def sgchisq(self):
return np.array(self.trigs['sg_chisq'])[self.mask]
@property
def u_vals(self):
return np.array(self.trigs['u_vals'])[self.mask]
@property
def rchisq(self):
return np.array(self.trigs['chisq'])[self.mask] \
/ (np.array(self.trigs['chisq_dof'])[self.mask] * 2 - 2)
@property
def newsnr(self):
return events.newsnr(self.snr, self.rchisq)
@property
def newsnr_sgveto(self):
return events.newsnr_sgveto(self.snr, self.rchisq, self.sgchisq)
def get_column(self, cname):
if hasattr(self, cname):
return getattr(self, cname)
else:
return np.array(self.trigs[cname])[self.mask]
class ForegroundTriggers(object):
# FIXME: A lot of this is hardcoded to expect two ifos
def __init__(self, coinc_file, bank_file, sngl_files=None, n_loudest=None,
group='foreground'):
self.coinc_file = FileData(coinc_file, group=group)
self.sngl_files = {}
if sngl_files is not None:
for file in sngl_files:
curr_dat = FileData(file)
curr_ifo = curr_dat.group_key
self.sngl_files[curr_ifo] = curr_dat
self.bank_file = HFile(bank_file, "r")
self.n_loudest = n_loudest
self._sort_arr = None
self._template_id = None
self._trig_ids = None
@property
def sort_arr(self):
if self._sort_arr is None:
ifar = self.coinc_file.get_column('ifar')
sorting = ifar.argsort()[::-1]
if self.n_loudest:
sorting = sorting[:self.n_loudest]
self._sort_arr = sorting
return self._sort_arr
@property
def template_id(self):
if self._template_id is None:
template_id = self.get_coincfile_array('template_id')
self._template_id = template_id
return self._template_id
@property
def trig_id(self):
if self._trig_ids is not None:
return self._trig_ids
self._trig_ids = {}
# FIXME: There is no clear mapping from trig_id to ifo. This is bad!!!
# for now a hack is in place.
ifo1 = self.coinc_file.h5file.attrs['detector_1']
ifo2 = self.coinc_file.h5file.attrs['detector_2']
trigid1 = self.get_coincfile_array('trigger_id1')
trigid2 = self.get_coincfile_array('trigger_id2')
self._trig_ids[ifo1] = trigid1
self._trig_ids[ifo2] = trigid2
return self._trig_ids
def get_coincfile_array(self, variable):
return self.coinc_file.get_column(variable)[self.sort_arr]
def get_bankfile_array(self, variable):
try:
return self.bank_file[variable][:][self.template_id]
except IndexError:
if len(self.template_id) == 0:
return np.array([])
raise
def get_snglfile_array_dict(self, variable):
return_dict = {}
for ifo in self.sngl_files.keys():
try:
curr = self.sngl_files[ifo].get_column(variable)[\
self.trig_id[ifo]]
except IndexError:
if len(self.trig_id[ifo]) == 0:
curr = np.array([])
else:
raise
return_dict[ifo] = curr
return return_dict
def to_coinc_xml_object(self, file_name):
# FIXME: This function will only work with two ifos!!
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
ifos = [ifo for ifo in self.sngl_files.keys()]
proc_id = ligolw_process.register_to_xmldoc(outdoc, 'pycbc',
{}, ifos=ifos, comment='', version=pycbc_version.git_hash,
cvs_repository='pycbc/'+pycbc_version.git_branch,
cvs_entry_time=pycbc_version.date).process_id
search_summ_table = lsctables.New(lsctables.SearchSummaryTable)
coinc_h5file = self.coinc_file.h5file
start_time = coinc_h5file['segments']['coinc']['start'][:].min()
end_time = coinc_h5file['segments']['coinc']['end'][:].max()
num_trigs = len(self.sort_arr)
search_summary = return_search_summary(start_time, end_time,
num_trigs, ifos)
search_summ_table.append(search_summary)
outdoc.childNodes[0].appendChild(search_summ_table)
sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
coinc_def_table = lsctables.New(lsctables.CoincDefTable)
coinc_event_table = lsctables.New(lsctables.CoincTable)
coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
time_slide_table = lsctables.New(lsctables.TimeSlideTable)
# Set up time_slide table
time_slide_id = lsctables.TimeSlideID(0)
for ifo in ifos:
time_slide_row = lsctables.TimeSlide()
time_slide_row.instrument = ifo
time_slide_row.time_slide_id = time_slide_id
time_slide_row.offset = 0
time_slide_row.process_id = proc_id
time_slide_table.append(time_slide_row)
# Set up coinc_definer table
coinc_def_id = lsctables.CoincDefID(0)
coinc_def_row = lsctables.CoincDef()
coinc_def_row.search = "inspiral"
coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincidences"
coinc_def_row.coinc_def_id = coinc_def_id
coinc_def_row.search_coinc_type = 0
coinc_def_table.append(coinc_def_row)
bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z']
bank_col_vals = {}
for name in bank_col_names:
bank_col_vals[name] = self.get_bankfile_array(name)
coinc_event_names = ['ifar', 'time1', 'fap', 'stat']
coinc_event_vals = {}
for name in coinc_event_names:
coinc_event_vals[name] = self.get_coincfile_array(name)
sngl_col_names = ['snr', 'chisq', 'chisq_dof', 'bank_chisq',
'bank_chisq_dof', 'cont_chisq', 'cont_chisq_dof',
'end_time', 'template_duration', 'coa_phase',
'sigmasq']
sngl_col_vals = {}
for name in sngl_col_names:
sngl_col_vals[name] = self.get_snglfile_array_dict(name)
for idx in xrange(len(self.sort_arr)):
# Set up IDs and mapping values
coinc_id = lsctables.CoincID(idx)
# Set up sngls
# FIXME: As two-ifo is hardcoded loop over all ifos
sngl_combined_mchirp = 0
sngl_combined_mtot = 0
for ifo in ifos:
sngl_id = self.trig_id[ifo][idx]
event_id = lsctables.SnglInspiralID(sngl_id)
sngl = return_empty_sngl()
sngl.event_id = event_id
sngl.ifo = ifo
for name in sngl_col_names:
val = sngl_col_vals[name][ifo][idx]
if name == 'end_time':
sngl.set_end(LIGOTimeGPS(val))
else:
setattr(sngl, name, val)
for name in bank_col_names:
val = bank_col_vals[name][idx]
setattr(sngl, name, val)
sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
sngl.mass1, sngl.mass2)
sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
sngl.mass1, sngl.mass2)
sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
sngl_combined_mchirp += sngl.mchirp
sngl_combined_mtot += sngl.mtotal
sngl_inspiral_table.append(sngl)
# Set up coinc_map entry
coinc_map_row = lsctables.CoincMap()
coinc_map_row.table_name = 'sngl_inspiral'
coinc_map_row.coinc_event_id = coinc_id
coinc_map_row.event_id = event_id
coinc_event_map_table.append(coinc_map_row)
sngl_combined_mchirp = sngl_combined_mchirp / len(ifos)
sngl_combined_mtot = sngl_combined_mtot / len(ifos)
# Set up coinc inspiral and coinc event tables
coinc_event_row = lsctables.Coinc()
coinc_inspiral_row = lsctables.CoincInspiral()
coinc_event_row.coinc_def_id = coinc_def_id
coinc_event_row.nevents = len(ifos)
coinc_event_row.instruments = ','.join(ifos)
coinc_inspiral_row.set_ifos(ifos)
coinc_event_row.time_slide_id = time_slide_id
coinc_event_row.process_id = proc_id
coinc_event_row.coinc_event_id = coinc_id
coinc_inspiral_row.coinc_event_id = coinc_id
coinc_inspiral_row.mchirp = sngl_combined_mchirp
coinc_inspiral_row.mass = sngl_combined_mtot
coinc_inspiral_row.set_end(\
LIGOTimeGPS(coinc_event_vals['time1'][idx]))
coinc_inspiral_row.snr = coinc_event_vals['stat'][idx]
coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx]
coinc_inspiral_row.combined_far = 1./coinc_event_vals['ifar'][idx]
# Transform to Hz
coinc_inspiral_row.combined_far = \
coinc_inspiral_row.combined_far / YRJUL_SI
coinc_event_row.likelihood = 0.
coinc_inspiral_row.minimum_duration = 0.
coinc_event_table.append(coinc_event_row)
coinc_inspiral_table.append(coinc_inspiral_row)
outdoc.childNodes[0].appendChild(coinc_def_table)
outdoc.childNodes[0].appendChild(coinc_event_table)
outdoc.childNodes[0].appendChild(coinc_event_map_table)
outdoc.childNodes[0].appendChild(time_slide_table)
outdoc.childNodes[0].appendChild(coinc_inspiral_table)
outdoc.childNodes[0].appendChild(sngl_inspiral_table)
ligolw_utils.write_filename(outdoc, file_name)
chisq_choices = ['traditional', 'cont', 'bank', 'max_cont_trad', 'sg',
'max_bank_cont', 'max_bank_trad', 'max_bank_cont_trad']
def get_chisq_from_file_choice(hdfile, chisq_choice):
f = hdfile
if chisq_choice in ['traditional','max_cont_trad', 'max_bank_trad',
'max_bank_cont_trad']:
trad_chisq = f['chisq'][:]
# We now need to handle the case where chisq is not actually calculated
# 0 is used as a sentinel value
trad_chisq_dof = f['chisq_dof'][:]
trad_chisq /= (trad_chisq_dof * 2 - 2)
if chisq_choice in ['cont', 'max_cont_trad', 'max_bank_cont',
'max_bank_cont_trad']:
cont_chisq = f['cont_chisq'][:]
cont_chisq_dof = f['cont_chisq_dof'][:]
cont_chisq /= cont_chisq_dof
if chisq_choice in ['bank', 'max_bank_cont', 'max_bank_trad',
'max_bank_cont_trad']:
bank_chisq = f['bank_chisq'][:]
bank_chisq_dof = f['bank_chisq_dof'][:]
bank_chisq /= bank_chisq_dof
if chisq_choice == 'sg':
chisq = f['sg_chisq'][:]
elif chisq_choice == 'traditional':
chisq = trad_chisq
elif chisq_choice == 'cont':
chisq = cont_chisq
elif chisq_choice == 'bank':
chisq = bank_chisq
elif chisq_choice == 'max_cont_trad':
chisq = np.maximum(trad_chisq, cont_chisq)
elif chisq_choice == 'max_bank_cont':
chisq = np.maximum(bank_chisq, cont_chisq)
elif chisq_choice == 'max_bank_trad':
chisq = np.maximum(bank_chisq, trad_chisq)
elif chisq_choice == 'max_bank_cont_trad':
chisq = np.maximum(np.maximum(bank_chisq, cont_chisq), trad_chisq)
else:
err_msg="Do not recognized --chisq-choice %s" % chisq_choice
raise ValueError(err_msg)
return chisq
def save_dict_to_hdf5(dic, filename):
"""
Parameters
----------
dic:
python dictionary to be converted to hdf5 format
filename:
desired name of hdf5 file
"""
with h5py.File(filename, 'w') as h5file:
recursively_save_dict_contents_to_group(h5file, '/', dic)
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Parameters
----------
h5file:
h5py file to be written to
path:
path within h5py file to saved dictionary
dic:
python dictionary to be converted to hdf5 format
"""
for key, item in dic.items():
if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes, tuple, list)):
h5file[path + str(key)] = item
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
else:
raise ValueError('Cannot save %s type'%type(item))
| gpl-3.0 | -1,520,289,452,508,774,400 | 35.981481 | 93 | 0.559339 | false |
zbyte64/django-dockitcms | dockitcms/widgetblock/fields.py | 1 | 1521 | from django import forms
from dockit import schema
from dockitcms.fields import BaseFieldEntry, ListFieldMixin
from dockitcms.widgetblock.models import Widget
class WidgetField(BaseFieldEntry):
field_class = schema.SchemaField
def get_field_kwargs(self):
kwargs = dict(super(WidgetField, self).get_field_kwargs())
kwargs['schema'] = Widget
return kwargs
class Meta:
typed_key = 'WidgetField'
class ListWidgetField(ListFieldMixin, WidgetField):
def get_list_field_kwargs(self):
subfield = WidgetField.create_field(self)
return {'subfield': subfield}
class Meta:
typed_key = 'ListWidgetField'
class VisibleSchemaTypeField(schema.SchemaTypeField):
form_field_class = forms.ChoiceField
form_widget_class = forms.Select
def formfield_kwargs(self, **kwargs):
kwargs = super(VisibleSchemaTypeField, self).formfield_kwargs(**kwargs)
kwargs['choices'] = self.get_choices()
return kwargs
class TypedWidgetField(BaseFieldEntry):
widget_type = VisibleSchemaTypeField(schemas=Widget._meta.fields['widget_type'].schemas)
field_class = schema.SchemaField
def get_field_kwargs(self):
kwargs = dict(super(TypedWidgetField, self).get_field_kwargs())
kwargs.pop('widget_type', None)
kwargs['schema'] = Widget._meta.fields['widget_type'].schemas.get(self.widget_type, Widget)
return kwargs
class Meta:
typed_key = 'TypedWidgetField'
| bsd-3-clause | 1,446,314,102,255,688,400 | 28.823529 | 99 | 0.687048 | false |
mhrivnak/pulp | client_lib/pulp/client/launcher.py | 1 | 8499 | """
Entry point for both the admin and consumer clients. The config file location
is passed in and its contents are used to drive the rest of the client execution.
"""
import errno
from gettext import gettext as _
import logging
import logging.handlers
from optparse import OptionParser
import os
import stat
import sys
from okaara.prompt import COLOR_CYAN, COLOR_LIGHT_CYAN
from pulp.bindings.bindings import Bindings
from pulp.bindings.server import PulpConnection
from pulp.client import constants
from pulp.client.extensions.core import PulpPrompt, PulpCli, ClientContext, WIDTH_TERMINAL
from pulp.client.extensions.exceptions import ExceptionHandler
import pulp.client.extensions.loader as extensions_loader
from pulp.common.config import Config
def main(config, exception_handler_class=ExceptionHandler):
"""
Entry point into the launcher. Any extra necessary values will be pulled
from the given configuration files.
@param config: The CLI configuration.
@type config: Config
@return: exit code suitable to return to the shell launching the client
"""
ensure_user_pulp_dir()
# Command line argument handling
parser = OptionParser()
parser.disable_interspersed_args()
parser.add_option('-u', '--username', dest='username', action='store', default=None,
help=_('username for the Pulp server; if used will bypass the stored '
'certificate and override a username specified in ~/.pulp/admin.conf'))
parser.add_option('-p', '--password', dest='password', action='store', default=None,
help=_('password for the Pulp server; must be used with --username. '
'if used will bypass the stored certificate and override a password '
'specified in ~/.pulp/admin.conf'))
parser.add_option('--config', dest='config', default=None,
help=_('absolute path to the configuration file'))
parser.add_option('--map', dest='print_map', action='store_true', default=False,
help=_('prints a map of the CLI sections and commands'))
parser.add_option(
'-v', dest='verbose', action='count',
help=_('enables verbose output; use twice for increased verbosity with debug information'))
options, args = parser.parse_args()
# Configuration and Logging
if options.config is not None:
config.update(Config(options.config))
logger = _initialize_logging(verbose=options.verbose)
# General UI pieces
prompt = _create_prompt(config)
exception_handler = exception_handler_class(prompt, config)
# REST Bindings
username = options.username
password = options.password
if not username and not password:
# Try to get username/password from config if not explicitly set. username and password are
# not included by default so we need to catch KeyError Exceptions.
try:
username = config['auth']['username']
password = config['auth']['password']
except KeyError:
pass
if username and not password:
prompt_msg = 'Enter password: '
password = prompt.prompt_password(_(prompt_msg))
if password is prompt.ABORT:
prompt.render_spacer()
prompt.write(_('Login cancelled'))
sys.exit(os.EX_NOUSER)
server = _create_bindings(config, logger, username, password, verbose=options.verbose)
# Client context
context = ClientContext(server, config, logger, prompt, exception_handler)
cli = PulpCli(context)
context.cli = cli
# Load extensions into the UI in the context
extensions_dir = config['filesystem']['extensions_dir']
extensions_dir = os.path.expanduser(extensions_dir)
role = config['client']['role']
try:
extensions_loader.load_extensions(extensions_dir, context, role)
except extensions_loader.LoadFailed, e:
prompt.write(
_('The following extensions failed to load: %(f)s' % {'f': ', '.join(e.failed_packs)}))
prompt.write(_('More information on the failures may be found by using -v option one or '
'more times'))
return os.EX_OSFILE
# Launch the appropriate UI (add in shell support here later)
if options.print_map:
cli.print_cli_map(section_color=COLOR_LIGHT_CYAN, command_color=COLOR_CYAN)
return os.EX_OK
else:
code = cli.run(args)
return code
def ensure_user_pulp_dir():
"""
Creates ~/.pulp/ if it doesn't already exist.
Writes a warning to stderr if ~/.pulp/ has unsafe permissions.
This has to be run before the prompt object gets created, hence the old-school error reporting.
Several other places try to access ~/.pulp, both from pulp-admin and pulp-consumer. The best
we can do in order to create it once with the right permissions is to do call this function
early.
"""
path = os.path.expanduser(constants.USER_CONFIG_DIR)
# 0700
desired_mode = stat.S_IRUSR + stat.S_IWUSR + stat.S_IXUSR
try:
stats = os.stat(path)
actual_mode = stat.S_IMODE(stats.st_mode)
if actual_mode != desired_mode:
sys.stderr.write(_('Warning: path should have mode 0700 because it may contain '
'sensitive information: %(p)s\n\n' % {'p': path}))
except Exception, e:
# if it doesn't exist, make it
if isinstance(e, OSError) and e.errno == errno.ENOENT:
try:
os.mkdir(path, 0700)
except Exception, e:
sys.stderr.write(_('Failed to create path %(p)s: %(e)s\n\n' %
{'p': path, 'e': str(e)}))
sys.exit(1)
else:
sys.stderr.write(_('Failed to access path %(p)s: %(e)s\n\n' % {'p': path, 'e': str(e)}))
sys.exit(1)
def _initialize_logging(verbose=None):
"""
@return: configured cli logger
"""
cli_log_handler = logging.StreamHandler(sys.stderr)
cli_log_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
cli_logger = logging.getLogger('pulp')
cli_logger.addHandler(cli_log_handler)
if not verbose:
cli_logger.setLevel(logging.FATAL)
elif verbose == 1:
cli_logger.setLevel(logging.INFO)
else:
cli_logger.setLevel(logging.DEBUG)
return cli_logger
def _create_bindings(config, cli_logger, username, password, verbose=None):
"""
@return: bindings with a fully configured Pulp connection
@rtype: pulp.bindings.bindings.Bindings
"""
# Extract all of the necessary values
hostname = config['server']['host']
port = int(config['server']['port'])
cert_dir = config['filesystem']['id_cert_dir']
cert_name = config['filesystem']['id_cert_filename']
cert_dir = os.path.expanduser(cert_dir) # this will likely be in a user directory
cert_filename = os.path.join(cert_dir, cert_name)
# If the certificate doesn't exist, don't pass it to the connection creation
if not os.path.exists(cert_filename):
cert_filename = None
api_logger = None
if verbose and verbose > 1:
api_log_handler = logging.StreamHandler(sys.stderr)
api_log_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
api_logger = logging.getLogger('call_log')
api_logger.addHandler(api_log_handler)
api_logger.setLevel(logging.INFO)
# Create the connection and bindings
verify_ssl = config.parse_bool(config['server']['verify_ssl'])
ca_path = config['server']['ca_path']
conn = PulpConnection(
hostname, port, username=username, password=password, cert_filename=cert_filename,
logger=cli_logger, api_responses_logger=api_logger, verify_ssl=verify_ssl,
ca_path=ca_path)
bindings = Bindings(conn)
return bindings
def _create_prompt(config):
"""
@return: prompt instance to pass throughout the UI
@rtype: PulpPrompt
"""
enable_color = config.parse_bool(config['output']['enable_color'])
fallback_wrap = int(config['output']['wrap_width'])
if config.parse_bool(config['output']['wrap_to_terminal']):
wrap = WIDTH_TERMINAL
else:
wrap = fallback_wrap
prompt = PulpPrompt(enable_color=enable_color, wrap_width=wrap, fallback_wrap=fallback_wrap)
return prompt
| gpl-2.0 | -3,902,464,655,006,538,000 | 35.952174 | 100 | 0.649488 | false |
BackupTheBerlios/espressopp | examples/hadress/hadressFEC/hadressDensityFEC.py | 1 | 8414 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# relevant imports
import sys
import time
import espresso
import mpi4py.MPI as MPI
import Tetracryst # Preparation of tetrahedral crystal and constuctions of bonds in tetrahedral liquid
from espresso import Real3D, Int3D
from espresso.tools import decomp
from espresso.tools import timers
# integration steps, cutoff, skin, AdResS specifications
steps = 1000
timestep = 0.0005
intervals = 100
rc = 4.5 # cutoff coarse-grained potential
rca = 1.122462048309373 # cutoff atomistic potential (cutoff (2^(1/6)), WCA)
skin = 0.4
# Parameters for the thermostat
#gamma = 2.0
#temp = 1.0
# Parameters for size of AdResS dimensions
ex_size = 5.0
hy_size = 5.0
# read equilibrated configuration file
pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = espresso.tools.readxyz("equilibrated_conf.xyz")
# Table for coarse-grained potential
tabCG = "table_potential.dat"
# FEC compensation table
tabFEC = "table_FEC_Gibbs.dat"
# number of CG particles
num_particlesCG = len(x)/4
# number of AT particles
num_particles = len(x)
# set up the system
sys.stdout.write('Setting up simulation ...\n')
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espresso.System()
system.rng = espresso.esutil.RNG()
system.bc = espresso.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
# (H-)AdResS domain decomposition
system.storage = espresso.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
# prepare AT particles
allParticlesAT = []
allParticles = []
tuples = []
for pidAT in range(num_particles):
allParticlesAT.append([pidAT, # add here these particles just temporarily
Real3D(x[pidAT], y[pidAT], z[pidAT]), # position
Real3D(vx[pidAT], vy[pidAT], vz[pidAT]), # velocity
Real3D(0, 0, 0), # force
1, 1.0, 1]) # type, mass, is AT particle
# create CG particles
for pidCG in range(num_particlesCG):
# we put CG molecule in first atom, later CG molecules will be positioned in the center
cmp = espresso.tools.AdressSetCG(4, pidCG, allParticlesAT)
# Preparation of tuples (tuples define, which atoms belong to which CG molecules)
tmptuple = [pidCG+num_particles]
for pidAT2 in range(4):
pid = pidCG*4+pidAT2
tmptuple.append(pid)
# append CG particles
allParticles.append([pidCG+num_particles, # CG particle has to be added first!
Real3D(cmp[0], cmp[1], cmp[2]), # pos
Real3D(0, 0, 0), # vel
Real3D(0, 0, 0), # force
0, 4.0, 0]) # type, mass, is not AT particle
# append AT particles
for pidAT in range(4):
pid = pidCG*4+pidAT
allParticles.append([pid, # now the AT particles can be added
(allParticlesAT[pid])[1], # pos
(allParticlesAT[pid])[2], # vel
(allParticlesAT[pid])[3], # force
(allParticlesAT[pid])[4], # type
(allParticlesAT[pid])[5], # mass
(allParticlesAT[pid])[6]]) # is AT particle
# append tuple to tuplelist
tuples.append(tmptuple)
# add particles to system
system.storage.addParticles(allParticles, "id", "pos", "v", "f", "type", "mass", "adrat")
# create FixedTupleList object
ftpl = espresso.FixedTupleListAdress(system.storage)
# and add the tuples
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
# add bonds between AT particles
fpl = espresso.FixedPairListAdress(system.storage, ftpl)
bonds = Tetracryst.makebonds(len(x))
fpl.addBonds(bonds)
# decompose after adding tuples and bonds
print "Added tuples and bonds, decomposing now ..."
system.storage.decompose()
print "done decomposing"
# AdResS Verlet list
vl = espresso.VerletListAdress(system, cutoff=rc, adrcut=rc,
dEx=ex_size, dHy=hy_size,
adrCenter=[Lx/2, Ly/2, Lz/2])
# non-bonded potentials
# LJ Capped WCA between AT and tabulated potential between CG particles
interNB = espresso.interaction.VerletListHadressLennardJones(vl, ftpl) # Here we need specific (H-)AdResS interaction type
potWCA = espresso.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=rca)
potCG = espresso.interaction.Tabulated(itype=3, filename=tabCG, cutoff=rc) # CG
interNB.setPotentialAT(type1=1, type2=1, potential=potWCA) # AT
interNB.setPotentialCG(type1=0, type2=0, potential=potCG) # CG
system.addInteraction(interNB)
# bonded potentials
# Quartic potential between AT particles
potQuartic = espresso.interaction.Quartic(K=75.0, r0=1.0)
interQuartic = espresso.interaction.FixedPairListQuartic(system, fpl, potQuartic)
system.addInteraction(interQuartic)
# VelocityVerlet integrator
integrator = espresso.integrator.VelocityVerlet(system)
integrator.dt = timestep
# add AdResS extension
adress = espresso.integrator.Adress(system, vl, ftpl)
integrator.addExtension(adress)
# add Langevin thermostat extension
#langevin = espresso.integrator.LangevinThermostat(system)
#langevin.gamma = gamma
#langevin.temperature = temp
#langevin.adress = True # enable AdResS!
#integrator.addExtension(langevin)
# add TDF (dummy, just testing)
fec = espresso.integrator.FreeEnergyCompensation(system, center=[Lx/2, Ly/2, Lz/2])
fec.addForce(itype=3, filename=tabFEC, type=0)
integrator.addExtension(fec)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
espresso.tools.AdressDecomp(system, integrator)
# system information
print ''
print 'AdResS Center =', [Lx/2, Ly/2, Lz/2]
print 'number of AT particles =', num_particles
print 'number of CG particles =', num_particlesCG
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
temperature = espresso.analysis.Temperature(system)
fmt = '%5d %8.4f %12.3f %12.3f %12.3f %12.3f %12.3f\n'
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
Ecorr = fec.computeCompEnergy()
sys.stdout.write(' step Temp etotal enonbonded ebonded ekinetic ecorrection\n')
sys.stdout.write(fmt % (0, T, Ek + Ep + Eb + Ecorr, Ep, Eb, Ek, Ecorr))
# Density profile preparation
density_array_total = []
Adds = 0.0
densityprofilegrid = 100
# Timer, Steps
start_time = time.clock()
nsteps = steps / intervals
# integration and on the fly analysis
for s in range(1, intervals + 1):
integrator.run(nsteps)
step = nsteps * s
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
Ecorr = fec.computeCompEnergy()
# calculate density profile
if s > 10:
densityprofile = espresso.analysis.XDensity(system)
density_array = densityprofile.compute(densityprofilegrid)
for i in range(len(density_array)):
if(i>=len(density_array_total)):
density_array_total.append(density_array[i])
else:
density_array_total[i] += density_array[i]
Adds += 1.0
sys.stdout.write(fmt % (step, T, Ek + Ep + Eb + Ecorr, Ep, Eb, Ek, Ecorr))
# correct the density profile according to number of samples
for i in range(len(density_array_total)):
density_array_total[i] /= Adds
# printing density profile
nameFile = 'density_profile_Gibbs.dat'
print ''
print "Printing the density profile to %s\n" %nameFile
tempFile = open (nameFile, 'w')
fmt = ' %12.8f %12.8f\n'
dr = Lx / float(densityprofilegrid)
for i in range( len(density_array_total) ):
tempFile.write(fmt % ( (i+0.5)*dr, density_array_total[i] ))
tempFile.close()
# simulation information
end_time = time.clock()
timers.show(integrator.getTimers(), precision=3)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(num_particles)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
| gpl-3.0 | 402,461,459,966,892,200 | 32.388889 | 122 | 0.686475 | false |
Rudd-O/cloud-tool | cloudapis/cloud.py | 1 | 2976 | '''Implements the Cloud.com API'''
from cloudtool.utils import describe
import urllib
import urllib2
import os
import xml.dom.minidom
class CloudAPI:
@describe("server", "Management Server host name or address")
@describe("responseformat", "Response format: xml or json")
def __init__(self,
server="127.0.0.1:8096",
responseformat="xml",
):
self.__dict__.update(locals())
def _make_request(self,command,parameters=None):
'''Command is a string, parameters is a dictionary'''
if ":" in self.server:
host,port = self.server.split(":")
port = int(port)
else:
host = self.server
port = 8096
url = "http://" + self.server + "/?"
if not parameters: parameters = {}
parameters["command"] = command
parameters["response"] = self.responseformat
querystring = urllib.urlencode(parameters)
url += querystring
f = urllib2.urlopen(url)
data = f.read()
return data
def load_dynamic_methods():
'''creates smart function objects for every method in the commands.xml file'''
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE: rc.append(node.data)
return ''.join(rc)
# FIXME figure out installation and packaging
xmlfile = os.path.join(os.path.dirname(__file__),"commands.xml")
dom = xml.dom.minidom.parse(xmlfile)
for cmd in dom.getElementsByTagName("command"):
name = getText(cmd.getElementsByTagName('name')[0].childNodes).strip()
assert name
description = cmd.getElementsByTagName('name')[0].getAttribute("description")
if description: description = '"""%s"""' % description
else: description = ''
arguments = []
options = []
descriptions = []
for param in cmd.getElementsByTagName('arg'):
argname = getText(param.childNodes).strip()
assert argname
required = param.getAttribute("required").strip()
if required == 'true': required = True
elif required == 'false': required = False
else: raise AssertionError, "Not reached"
if required: arguments.append(argname)
options.append(argname)
description = param.getAttribute("description").strip()
if description: descriptions.append( (argname,description) )
funcparams = ["self"] + [ "%s=None"%o for o in options ]
funcparams = ", ".join(funcparams)
code = """
def %s(%s):
%s
parms = locals()
del parms["self"]
for arg in %r:
if locals()[arg] is None:
raise TypeError, "%%s is a required option"%%arg
for k,v in parms.items():
if v is None: del parms[k]
output = self._make_request("%s",parms)
print output
"""%(name,funcparams,description,arguments,name)
namespace = {}
exec code.strip() in namespace
func = namespace[name]
for argname,description in descriptions:
func = describe(argname,description)(func)
yield (name,func)
for name,meth in load_dynamic_methods(): setattr(CloudAPI,name,meth)
implementor = CloudAPI
del name,meth,describe,load_dynamic_methods
| gpl-3.0 | -2,088,494,372,221,547,500 | 25.336283 | 79 | 0.673387 | false |
99cloud/keystone_register | openstack_dashboard/api/nova.py | 1 | 18812 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Openstack, LLC
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import ugettext as _
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1 import security_group_rules as nova_rules
from novaclient.v1_1.security_groups import SecurityGroup as NovaSecurityGroup
from novaclient.v1_1.servers import REBOOT_HARD, REBOOT_SOFT
from horizon.conf import HORIZON_CONFIG
from horizon.utils.memoized import memoized
from openstack_dashboard.api.base import (APIResourceWrapper, QuotaSet,
APIDictWrapper, url_for)
from openstack_dashboard.api import network
LOG = logging.getLogger(__name__)
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
class VNCConsole(APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_vnc_console method.
"""
_attrs = ['url', 'type']
class SPICEConsole(APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_spice_console method.
"""
_attrs = ['url', 'type']
class Server(APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server
Preserves the request info so image name can later be retrieved
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name',
'tenant_id', 'user_id', 'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state', 'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:host']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
@property
def image_name(self):
import glanceclient.exc as glance_exceptions
from openstack_dashboard.api import glance
try:
image = glance.image_get(self.request, self.image['id'])
return image.name
except glance_exceptions.ClientException:
return "(not found)"
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
def reboot(self, hardness=REBOOT_HARD):
novaclient(self.request).servers.reboot(self.id, hardness)
class NovaUsage(APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py."""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': getattr(self, "total_vcpus_usage", 0),
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] is None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] is None)
@property
def vcpu_hours(self):
return getattr(self, "total_hours", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
class SecurityGroup(APIResourceWrapper):
"""Wrapper around novaclient.security_groups.SecurityGroup which wraps its
rules in SecurityGroupRule objects and allows access to them.
"""
_attrs = ['id', 'name', 'description', 'tenant_id']
@property
def rules(self):
"""Wraps transmitted rule info in the novaclient rule class."""
if "_rules" not in self.__dict__:
manager = nova_rules.SecurityGroupRuleManager
self._rules = [nova_rules.SecurityGroupRule(manager, rule)
for rule in self._apiresource.rules]
return self.__dict__['_rules']
@rules.setter
def rules(self, value):
self._rules = value
class SecurityGroupRule(APIResourceWrapper):
""" Wrapper for individual rules in a SecurityGroup. """
_attrs = ['id', 'ip_protocol', 'from_port', 'to_port', 'ip_range', 'group']
def __unicode__(self):
if 'name' in self.group:
vals = {'from': self.from_port,
'to': self.to_port,
'group': self.group['name']}
return _('ALLOW %(from)s:%(to)s from %(group)s') % vals
else:
vals = {'from': self.from_port,
'to': self.to_port,
'cidr': self.ip_range['cidr']}
return _('ALLOW %(from)s:%(to)s from %(cidr)s') % vals
class FlavorExtraSpec(object):
def __init__(self, flavor_id, key, val):
self.flavor_id = flavor_id
self.id = key
self.key = key
self.value = val
class FloatingIp(APIResourceWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id', 'pool']
def __init__(self, fip):
fip.__setattr__('port_id', fip.instance_id)
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(APIDictWrapper):
def __init__(self, pool):
pool_dict = {'id': pool.name,
'name': pool.name}
super(FloatingIpPool, self).__init__(pool_dict)
class FloatingIpTarget(APIDictWrapper):
def __init__(self, server):
server_dict = {'name': '%s (%s)' % (server.name, server.id),
'id': server.id}
super(FloatingIpTarget, self).__init__(server_dict)
class FloatingIpManager(network.FloatingIpManager):
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list_pools(self):
return [FloatingIpPool(pool)
for pool in self.client.floating_ip_pools.list()]
def list(self):
return [FloatingIp(fip)
for fip in self.client.floating_ips.list()]
def get(self, floating_ip_id):
return FloatingIp(self.client.floating_ips.get(floating_ip_id))
def allocate(self, pool):
return FloatingIp(self.client.floating_ips.create(pool=pool))
def release(self, floating_ip_id):
self.client.floating_ips.delete(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# In Nova implied port_id is instance_id
server = self.client.servers.get(port_id)
fip = self.client.floating_ips.get(floating_ip_id)
self.client.servers.add_floating_ip(server.id, fip.ip)
def disassociate(self, floating_ip_id, port_id):
fip = self.client.floating_ips.get(floating_ip_id)
server = self.client.servers.get(fip.instance_id)
self.client.servers.remove_floating_ip(server.id, fip.ip)
def list_targets(self):
return [FloatingIpTarget(s) for s in self.client.servers.list()]
def get_target_id_by_instance(self, instance_id):
return instance_id
def is_simple_associate_supported(self):
return HORIZON_CONFIG["simple_ip_management"]
def novaclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug('novaclient connection created using token "%s" and url "%s"' %
(request.user.token.id, url_for(request, 'compute')))
c = nova_client.Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=url_for(request, 'compute'),
insecure=insecure,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = url_for(request, 'compute')
return c
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(instance_id,
console_type)['console'])
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
def flavor_create(request, name, memory, vcpu, disk, ephemeral=0, swap=0,
metadata=None):
flavor = novaclient(request).flavors.create(name, memory, vcpu, disk,
ephemeral=ephemeral,
swap=swap)
if (metadata):
flavor_extra_set(request, flavor.id, metadata)
return flavor
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
def flavor_get(request, flavor_id):
return novaclient(request).flavors.get(flavor_id)
@memoized
def flavor_list(request):
"""Get the list of available instance sizes (flavors)."""
return novaclient(request).flavors.list()
def flavor_get_extras(request, flavor_id, raw=False):
"""Get flavor extra specs."""
flavor = novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()]
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return novaclient(request).keypairs.list()
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping, nics=None,
instance_count=1):
return Server(novaclient(request).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
nics=nics,
min_count=instance_count), request)
def server_delete(request, instance):
novaclient(request).servers.delete(instance)
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
def server_list(request, search_opts=None, all_tenants=False):
if search_opts is None:
search_opts = {}
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
return [Server(s, request)
for s in novaclient(request).servers.list(True, search_opts)]
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
def server_security_groups(request, instance_id):
"""Gets security groups of an instance."""
# TODO(gabriel): This needs to be moved up to novaclient, and should
# be removed once novaclient supports this call.
security_groups = []
nclient = novaclient(request)
resp, body = nclient.client.get('/servers/%s/os-security-groups'
% instance_id)
if body:
# Wrap data in SG objects as novaclient would.
sg_objs = [NovaSecurityGroup(nclient.security_groups, sg, loaded=True)
for sg in body.get('security_groups', [])]
# Then wrap novaclient's object with our own. Yes, sadly wrapping
# with two layers of objects is necessary.
security_groups = [SecurityGroup(sg) for sg in sg_objs]
# Package up the rules, as well.
for sg in security_groups:
rule_objects = [SecurityGroupRule(rule) for rule in sg.rules]
sg.rules = rule_objects
return security_groups
def server_add_security_group(request, instance_id, security_group_name):
return novaclient(request).servers.add_security_group(instance_id,
security_group_name)
def server_remove_security_group(request, instance_id, security_group_name):
return novaclient(request).servers.remove_security_group(
instance_id,
security_group_name)
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
def server_reboot(request, instance_id, hardness=REBOOT_HARD):
server = server_get(request, instance_id)
server.reboot(hardness)
def server_update(request, instance_id, name):
response = novaclient(request).servers.update(instance_id, name=name)
# TODO(gabriel): servers.update method doesn't return anything. :-(
if response is None:
return True
else:
return response
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
def tenant_quota_get(request, tenant_id):
return QuotaSet(novaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
novaclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return QuotaSet(novaclient(request).quotas.defaults(tenant_id))
def usage_get(request, tenant_id, start, end):
return NovaUsage(novaclient(request).usage.get(tenant_id, start, end))
def usage_list(request, start, end):
return [NovaUsage(u) for u in
novaclient(request).usage.list(start, end, True)]
def security_group_list(request):
return [SecurityGroup(g) for g
in novaclient(request).security_groups.list()]
def security_group_get(request, sg_id):
return SecurityGroup(novaclient(request).security_groups.get(sg_id))
def security_group_create(request, name, desc):
return SecurityGroup(novaclient(request).security_groups.create(name,
desc))
def security_group_delete(request, security_group_id):
novaclient(request).security_groups.delete(security_group_id)
def security_group_rule_create(request, parent_group_id, ip_protocol=None,
from_port=None, to_port=None, cidr=None,
group_id=None):
sg = novaclient(request).security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id)
return SecurityGroupRule(sg)
def security_group_rule_delete(request, security_group_rule_id):
novaclient(request).security_group_rules.delete(security_group_rule_id)
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
def get_x509_credentials(request):
return novaclient(request).certs.create()
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api.cinder import cinderclient
volumes = novaclient(request).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinderclient(request).volumes.get(volume.id)
volume.name = volume_data.display_name
return volumes
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
for limit in limits:
# -1 is used to represent unlimited quotas
if limit.value == -1:
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
| apache-2.0 | 3,814,561,238,036,676,000 | 32.895495 | 79 | 0.633638 | false |
VcamX/grpc | src/python/grpcio/tests/unit/framework/common/test_control.py | 1 | 3314 | # Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for instructing systems under test to block or fail."""
import abc
import contextlib
import threading
import six
class Defect(Exception):
"""Simulates a programming defect raised into in a system under test.
Use of a standard exception type is too easily misconstrued as an actual
defect in either the test infrastructure or the system under test.
"""
class Control(six.with_metaclass(abc.ABCMeta)):
"""An object that accepts program control from a system under test.
Systems under test passed a Control should call its control() method
frequently during execution. The control() method may block, raise an
exception, or do nothing, all according to the enclosing test's desire for
the system under test to simulate hanging, failing, or functioning.
"""
@abc.abstractmethod
def control(self):
"""Potentially does anything."""
raise NotImplementedError()
class PauseFailControl(Control):
"""A Control that can be used to pause or fail code under control."""
def __init__(self):
self._condition = threading.Condition()
self._paused = False
self._fail = False
def control(self):
with self._condition:
if self._fail:
raise Defect()
while self._paused:
self._condition.wait()
@contextlib.contextmanager
def pause(self):
"""Pauses code under control while controlling code is in context."""
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self):
"""Fails code under control while controlling code is in context."""
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
| bsd-3-clause | 1,660,540,332,937,331,200 | 33.884211 | 76 | 0.737477 | false |
hychen/boliau | boliau/plugins/lp_cli/actionlib.py | 1 | 6379 | #!/usr/bin/env python
# -*- coding: utf-8 -*
#
# File: lp_cli.py
#
# Copyright (C) 2012 Hsin-Yi Chen (hychen)
# Author(s): Hsin-Yi Chen (hychen) <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import logging
from boliau import actionlib
from launchpadlib.launchpad import Launchpad
# -----------------------------------------------------------------------
# Global Variables
# -----------------------------------------------------------------------
LP_VALIDATE_BUGTASK_STATUS={
'In Progress': 100,
'Triaged': 90,
'Confirmed': 80,
'New': 70,
'Incomplete (with response)': 60,
'Incomplete (without response)': 50,
'Incomplete': 40,
'Fix Committed': 30,
'Fix Released': 20,
'Won\'t Fix': 10,
'Invalid': 0,
'Opinion': 0}
LP_VALIDATE_BUGTASK_IMPORTANCE={
'Critical': 5,
'High': 4,
'Medium': 3,
'Low': 2,
'Wishlist': 1,
'Undecided': 0}
LP_VALIDATE_BRANCH_STATUS=(
'Experimental',
'Development',
'Mature',
'Merged',
'Abandoned')
class LaunchpadDatabase(object):
lp = None
LP_VALIDATE_BUGTASK_STATUS = LP_VALIDATE_BUGTASK_STATUS
LP_VALIDATE_BUGTASK_IMPORTANCE = LP_VALIDATE_BUGTASK_IMPORTANCE
def connect(self):
if not self.lp:
system = os.getenv('LPSYSTEM') or 'production'
cachedir = os.path.expanduser("~/.launchpadlib/cache")
self.lp = Launchpad.login_with('lp-cli', system, cachedir)
return self.lp
def get(self, entry_type, entry_id):
self.connect()
if entry_type != 'people':
entry_type = entry_type+'s'
try:
return getattr(self.lp, entry_type)[entry_id]
except KeyError as e:
logging.debug(e)
return None
def load_lp_objects(self, opts):
if opts.get('assignee'):
opts['assignee'] = self.get('people', opts['assignee'])
return opts
class _StartAction(object):
def __init__(self):
self.db = LaunchpadDatabase()
self.acc = actionlib.Mission(self.db)
# -----------------------------------------------------------------------
# Action Classes
# -----------------------------------------------------------------------
class Get(_StartAction):
desc = """
Get a Launchpad Entry.
"""
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
entry_type = opts.pop('entry_type')
entry_id = opts.pop('entry_id')
self.acc.add_task(repr(self.__class__),
self.maintask,
entry_type, entry_id,
**opts)
return self.acc
def maintask(db, entry_type, entry_id, **opts):
return db.get(entry_type, entry_id)
class FindBugTasks(_StartAction):
desc = """
Search Bug Tasks of the entry.
"""
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
entry_type = opts.pop('entry_type')
entry_id = opts.pop('entry_id')
self.acc.add_task(repr(self.__class__),
self.maintask,
entry_type, entry_id,
**opts)
return self.acc
def maintask(db, entry_type, entry_id, **opts):
entry = db.get(entry_type, entry_id)
# handling milestone.
if entry and entry_type == 'project' and opts.get('milestone'):
opts['milestone'] = entry.getMilestone(name=opts['milestone'])
# handling status.
if 'Todo' in opts['status'] and 'All' in opts['status']:
raise Exception("Todo and All are confilict.")
if 'All' in opts['status']:
opts['status'] = db.LP_VALIDATE_BUGTASK_STATUS.keys()
elif 'Todo' in opts['status']:
opts['status'] = filter(lambda e: e not in ('Invalid',
'Won\'t Fix',
'Fix Committed',
'Fix Released',
'Opinion',),
db.LP_VALIDATE_BUGTASK_STATUS.keys())
opts = db.load_lp_objects(opts)
return entry.searchTasks(**opts)
class FindPackages(_StartAction):
desc = 'Find packages'
link_type = 'None -> Mission'
data_type = 'Any -> Any'
def __call__(self, **opts):
ppa = opts.pop('ppa').replace('ppa:', '')
ppa_owner, ppa_name = ppa.split('/')
self.acc.add_task(repr(self.__class__),
self.maintask,
ppa_owner, ppa_name,
**opts)
return self.acc
def maintask(db, ppa_onwer, ppa_name, **opts):
people = db.get('people', ppa_onwer)
if not people:
people = db.get('team', ppa_onwer)
archive = people.getPPAByName(name=ppa_name)
return archive.getPublishedSources(status='Published')
| mit | 4,258,922,284,468,343,000 | 33.112299 | 77 | 0.521555 | false |
atiro/nikola | nikola/plugins/compile/ipynb.py | 1 | 6893 | # -*- coding: utf-8 -*-
# Copyright © 2013-2016 Damián Avila, Chris Warrick and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on nbconvert."""
from __future__ import unicode_literals, print_function
import io
import os
import sys
try:
from nbconvert.exporters import HTMLExporter
import nbformat
current_nbformat = nbformat.current_nbformat
from jupyter_client import kernelspec
from traitlets.config import Config
flag = True
ipy_modern = True
except ImportError:
try:
import IPython
from IPython.nbconvert.exporters import HTMLExporter
if IPython.version_info[0] >= 3: # API changed with 3.0.0
from IPython import nbformat
current_nbformat = nbformat.current_nbformat
from IPython.kernel import kernelspec
ipy_modern = True
else:
import IPython.nbformat.current as nbformat
current_nbformat = 'json'
kernelspec = None
ipy_modern = False
from IPython.config import Config
flag = True
except ImportError:
flag = None
ipy_modern = None
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER
from nikola.shortcodes import apply_shortcodes
class CompileIPynb(PageCompiler):
"""Compile IPynb into HTML."""
name = "ipynb"
friendly_name = "Jupyter/IPython Notebook"
demote_headers = True
default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'
def set_site(self, site):
"""Set Nikola site."""
self.logger = get_logger('compile_ipynb', STDERR_HANDLER)
super(CompileIPynb, self).set_site(site)
def compile_html_string(self, source, is_two_file=True):
"""Export notebooks as HTML strings."""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
HTMLExporter.default_template = 'basic'
c = Config(self.site.config['IPYNB_CONFIG'])
exportHtml = HTMLExporter(config=c)
with io.open(source, "r", encoding="utf8") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
(body, resources) = exportHtml.from_notebook_node(nb_json)
return body
def compile_html(self, source, dest, is_two_file=True):
"""Compile source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
with io.open(dest, "w+", encoding="utf8") as out_file:
output = self.compile_html_string(source, is_two_file)
output = apply_shortcodes(output, self.site.shortcode_registry, self.site, source)
out_file.write(output)
def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
"""Read metadata directly from ipynb file.
As ipynb file support arbitrary metadata as json, the metadata used by Nikola
will be assume to be in the 'nikola' subfield.
"""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
source = post.source_path
with io.open(source, "r", encoding="utf8") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
# Metadata might not exist in two-file posts or in hand-crafted
# .ipynb files.
return nb_json.get('metadata', {}).get('nikola', {})
def create_post(self, path, **kw):
"""Create a new post."""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
kernel = kw.pop('ipython_kernel', None)
# is_page is not needed to create the file
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if content.startswith("{"):
# imported .ipynb file, guaranteed to start with "{" because it’s JSON.
nb = nbformat.reads(content, current_nbformat)
else:
if ipy_modern:
nb = nbformat.v4.new_notebook()
nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
else:
nb = nbformat.new_notebook()
nb["worksheets"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]
if kernelspec is not None:
if kernel is None:
kernel = self.default_kernel
self.logger.notice('No kernel specified, assuming "{0}".'.format(kernel))
IPYNB_KERNELS = {}
ksm = kernelspec.KernelSpecManager()
for k in ksm.find_kernel_specs():
IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()
IPYNB_KERNELS[k]['name'] = k
del IPYNB_KERNELS[k]['argv']
if kernel not in IPYNB_KERNELS:
self.logger.error('Unknown kernel "{0}". Maybe you mispelled it?'.format(kernel))
self.logger.info("Available kernels: {0}".format(", ".join(sorted(IPYNB_KERNELS))))
raise Exception('Unknown kernel "{0}"'.format(kernel))
nb["metadata"]["kernelspec"] = IPYNB_KERNELS[kernel]
else:
# Older IPython versions don’t need kernelspecs.
pass
if onefile:
nb["metadata"]["nikola"] = metadata
with io.open(path, "w+", encoding="utf8") as fd:
if ipy_modern:
nbformat.write(nb, fd, 4)
else:
nbformat.write(nb, fd, 'ipynb')
| mit | -6,981,850,663,584,074,000 | 39.040698 | 114 | 0.623203 | false |
rangertaha/salt-manager | salt-manager/webapp/apps/management/commands/bshell.py | 1 | 3463 | #!/usr/bin/env python
"""
"""
import os
from optparse import make_option
from django.core.management.base import NoArgsCommand
def starting_imports():
from django.db.models.loading import get_models
for m in get_models():
exec "from %s import %s" % (m.__module__, m.__name__)
from datetime import datetime, timedelta
sdt = datetime.today().date()
edt = sdt + timedelta(days=1)
return locals()
def start_plain_shell(use_plain):
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
def start_ipython_shell():
from IPython.Shell import IPShell
import IPython
# Explicitly pass an empty list as arguments, because otherwise IPython
# would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
def start_bpython_shell():
from bpython import cli
cli.main(args=[], locals_=starting_imports())
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
make_option('--ipython', action='store_true', dest='ipython',
help='Tells Django to use ipython.'),
make_option('--bpython', action='store_true', dest='bpython',
help='Tells Django to use bpython.'),
)
help = "Runs a Python interactive interpreter. Tries to use bPython, if it's available."
requires_model_validation = False
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
use_ipython = options.get('ipython', False)
use_bpython = options.get('bpython', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
elif use_ipython:
start_ipython_shell()
elif use_bpython:
start_bpython_shell()
else:
start_bpython_shell()
except ImportError:
# fallback to plain shell if we encounter an ImportError
start_plain_shell(use_plain)
| mit | -6,263,867,662,607,562,000 | 34.336735 | 92 | 0.6324 | false |
nall/pythonista-tradervue | utils.py | 1 | 5565 | # vim: ft=python tabstop=2 shiftwidth=2 expandtab
# Copyright (c) 2015, Jon Nall
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pythonista-tradervue nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import clipboard
import console
import keychain
import logging
import os
import re
import sys
from datetime import datetime, date
sys.path.insert(0, os.path.realpath(os.path.join(os.getcwd(), 'tradervue')))
from tradervue import Tradervue, TradervueLogFormatter
LOG = None
DEBUG = 0 # 1 for normal debug, 2 for HTTP debug as well
KEYCHAIN_ID = 'tradervue'
USER_AGENT = "Pythonista Tradervue ([email protected])"
def get_args(argv):
args = { 'action': 'set_password',
'user': None,
'text': clipboard.get(),
'date': date.today().strftime('%Y%m%d'),
'overwrite': "0" }
for a in argv:
pairs = a.split(':')
for p in pairs:
(k, v) = p.split('=', 2)
if k not in args:
raise ValueError("Invalid argument '%s'" % (k))
args[k] = v
if args['user'] is None:
args['user'] = console.input_alert("Tradervue Username")
if not re.match(r'^\d{8}$', args['date']):
raise ValueError("Invalid date format '%s'. Must be YYYYMMDD" % (args['date']))
if int(args['overwrite']) == 0:
args['overwrite'] = False
else:
args['overwrite'] = True
args['date'] = datetime.strptime(args['date'], '%Y%m%d')
return args
def set_password(args):
p = console.password_alert("Tradervue Credentials", args['user'])
keychain.set_password(KEYCHAIN_ID, args['user'], p)
return True
def delete_password(args):
if keychain.get_password(KEYCHAIN_ID, args['user']) is None:
LOG.error("No password was set for %s" % (args['user']))
return False
else:
keychain.delete_password(KEYCHAIN_ID, args['user'])
LOG.info("Deleted credentials for %s" % (args['user']))
return True
def new_note(args, tv):
note_id = tv.create_note(args['text'])
if note_id is None:
LOG.error("Failed to create new note")
return False
else:
LOG.info("Created new note with ID %s" % (note_id))
return True
def update_journal(args, tv):
datestring = args['date'].strftime('%Y-%m-%d')
# Check if we have an existing entry on the date. If not, just create it
# Otherwise overwrite it if args['overwrite'] is set or append to it if not
#
journal = tv.get_journal(date = args['date'])
if journal is None:
journal_id = tv.create_journal(args['date'], args['text'])
if journal_id is None:
LOG.error("Failed to create journal on %s" % (datestring))
return False
else:
LOG.info("Created new journal on %s with ID %s" % (datestring, journal_id))
return True
else:
verb = 'Appended'
text = journal['notes']
if args['overwrite']:
verb = 'Overwrite'
text = ''
text += "\n%s" % (args['text'])
print text
if tv.update_journal(journal['id'], text):
LOG.info("%s journal on %s (ID %s)" % (verb, journal['id'], datestring))
return True
else:
LOG.error("Failed to update journal on %s (ID %s)" % (datestring, journal['id']))
return False
def main():
global LOG
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
if DEBUG > 1:
LOG.setLevel(logging.DEBUG)
c = logging.StreamHandler()
c.setFormatter(TradervueLogFormatter())
LOG.addHandler(c)
args = get_args(sys.argv[1:])
actions = { 'set_password': set_password,
'delete_password': delete_password,
'new_note': new_note,
'update_journal': update_journal }
ok = False
if args['action'] not in actions:
raise ValueError("Invalid action '%s'" % (args['action']))
elif args['action'].endswith('_password'):
ok = actions[args['action']](args)
else:
p = keychain.get_password(KEYCHAIN_ID, args['user'])
if p is None:
# Request one from the user
p = console.password_alert("Tradervue Credentials", args['user'])
else:
tv = Tradervue(args['user'], p, USER_AGENT, verbose_http = True if DEBUG > 1 else False)
ok = actions[args['action']](args, tv)
return 0 if ok else 1
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | -2,197,524,809,621,453,300 | 32.727273 | 94 | 0.669003 | false |
smcoll/stormpath-django | django_stormpath/social.py | 1 | 7183 | from django.contrib.auth import login as django_login
from django.shortcuts import resolve_url
from django.core.urlresolvers import reverse
from django.conf import settings
from stormpath.error import Error as StormpathError
from stormpath.resources.provider import Provider
from requests_oauthlib import OAuth2Session
from .models import CLIENT, APPLICATION
from .backends import StormpathSocialBackend
SOCIAL_AUTH_BACKEND = 'django_stormpath.backends.StormpathSocialBackend'
GITHUB_AUTHORIZATION_BASE_URL = 'https://github.com/login/oauth/authorize'
GITHUB_TOKEN_URL = 'https://github.com/login/oauth/access_token'
GOOGLE_AUTHORIZATION_BASE_URL = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
FACEBOOK_AUTHORIZATION_BASE_URL = 'https://www.facebook.com/dialog/oauth'
FACEBOOK_TOKEN_URL = 'https://graph.facebook.com/oauth/access_token'
LINKEDIN_AUTHORIZATION_BASE_URL = 'https://www.linkedin.com/uas/oauth2/authorization'
LINKEDIN_TOKEN_URL = 'https://www.linkedin.com/uas/oauth2/accessToken'
def _get_django_user(account):
backend = StormpathSocialBackend()
return backend.authenticate(account=account)
def get_access_token(provider, authorization_response, redirect_uri):
if provider == Provider.GOOGLE:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GOOGLE']['client_id'],
redirect_uri=redirect_uri
)
ret = p.fetch_token(GOOGLE_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['GOOGLE']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.FACEBOOK:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['FACEBOOK']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
p = facebook_compliance_fix(p)
ret = p.fetch_token(FACEBOOK_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['FACEBOOK']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.GITHUB or provider.upper() == Provider.GITHUB:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GITHUB']['client_id'],
)
ret = p.fetch_token(GITHUB_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['GITHUB']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
elif provider == Provider.LINKEDIN:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['LINKEDIN']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import linkedin_compliance_fix
p = linkedin_compliance_fix(p)
ret = p.fetch_token(LINKEDIN_TOKEN_URL,
client_secret=settings.STORMPATH_SOCIAL['LINKEDIN']['client_secret'],
authorization_response=authorization_response)
return ret['access_token']
else:
return None
def handle_social_callback(request, provider):
provider_redirect_url = 'stormpath_' + provider.lower() + '_login_callback'
abs_redirect_uri = request.build_absolute_uri(
reverse(provider_redirect_url, kwargs={'provider': provider}))
access_token = get_access_token(
provider,
request.build_absolute_uri(),
abs_redirect_uri)
if not access_token:
raise RuntimeError('Error communicating with Autentication Provider: %s' % provider)
params = {'provider': provider, 'access_token': access_token}
try:
account = APPLICATION.get_provider_account(**params)
except StormpathError as e:
# We might be missing a social directory
# First we look for one and see if it's already there
# and just error out
for asm in APPLICATION.account_store_mappings:
if (getattr(asm.account_store, 'provider') and
asm.account_store.provider.provider_id == provider):
raise e
# Or if we couldn't find one we create it for the user
# map it to the current application
# and try authenticate again
create_provider_directory(provider, abs_redirect_uri)
account = APPLICATION.get_provider_account(**params)
user = _get_django_user(account)
user.backend = SOCIAL_AUTH_BACKEND
django_login(request, user)
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
def create_provider_directory(provider, redirect_uri):
"""Helper function for creating a provider directory"""
dir = CLIENT.directories.create({
'name': APPLICATION.name + '-' + provider,
'provider': {
'client_id': settings.STORMPATH_SOCIAL[provider.upper()]['client_id'],
'client_secret': settings.STORMPATH_SOCIAL[provider.upper()]['client_secret'],
'redirect_uri': redirect_uri,
'provider_id': provider,
},
})
APPLICATION.account_store_mappings.create({
'application': APPLICATION,
'account_store': dir,
'list_index': 99,
'is_default_account_store': False,
'is_default_group_store': False,
})
def get_authorization_url(provider, redirect_uri):
if provider == Provider.GOOGLE:
scope = [
"email",
"profile"
]
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GOOGLE']['client_id'],
scope=scope,
redirect_uri=redirect_uri
)
authorization_url, state = p.authorization_url(GOOGLE_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.FACEBOOK:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['FACEBOOK']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
p = facebook_compliance_fix(p)
authorization_url, state = p.authorization_url(FACEBOOK_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.GITHUB or provider.upper() == Provider.GITHUB:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['GITHUB']['client_id'],
)
authorization_url, state = p.authorization_url(GITHUB_AUTHORIZATION_BASE_URL)
return authorization_url, state
elif provider == Provider.LINKEDIN:
p = OAuth2Session(
client_id=settings.STORMPATH_SOCIAL['LINKEDIN']['client_id'],
redirect_uri=redirect_uri
)
from requests_oauthlib.compliance_fixes import linkedin_compliance_fix
p = linkedin_compliance_fix(p)
authorization_url, state = p.authorization_url(LINKEDIN_AUTHORIZATION_BASE_URL)
return authorization_url, state
else:
raise RuntimeError('Invalid Provider %s' % provider)
| apache-2.0 | 732,327,008,245,947,000 | 38.905556 | 92 | 0.655019 | false |
jakirkham/lazyflow | lazyflow/operators/opFeatureMatrixCache.py | 1 | 12534 | from functools import partial
import logging
logger = logging.getLogger(__name__)
import numpy
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.request import RequestLock, Request, RequestPool
from lazyflow.utility import OrderedSignal
from lazyflow.roi import getBlockBounds, getIntersectingBlocks, determineBlockShape
class OpFeatureMatrixCache(Operator):
"""
- Request features and labels in blocks
- For nonzero label pixels in each block, extract the label image
- Cache the feature matrix for each block separately
- Output the concatenation of all feature matrices
Note: This operator does not currently use the NonZeroLabelBlocks slot.
Instead, it only requests labels for blocks that have been
marked dirty via dirty notifications from the LabelImage slot.
As a result, you MUST connect/configure this operator before you
load your upstream label cache with values.
This operator must already be "watching" when when the label operator
is initialized with its first labels.
"""
FeatureImage = InputSlot()
LabelImage = InputSlot()
NonZeroLabelBlocks = InputSlot() # TODO: Eliminate this slot. It isn't used...
# Output is a single 'value', which is a 2D ndarray.
# The first row is labels, the rest are the features.
# (As a consequence of this, labels are converted to float)
LabelAndFeatureMatrix = OutputSlot()
ProgressSignal = OutputSlot() # For convenience of passing several progress signals
# to a downstream operator (such as OpConcatenateFeatureMatrices),
# we provide the progressSignal member as an output slot.
MAX_BLOCK_PIXELS = 1e6
def __init__(self, *args, **kwargs):
super(OpFeatureMatrixCache, self).__init__(*args, **kwargs)
self._lock = RequestLock()
self.progressSignal = OrderedSignal()
self._progress_lock = RequestLock()
self._blockshape = None
self._dirty_blocks = set()
self._blockwise_feature_matrices = {}
self._block_locks = {} # One lock per stored block
self._init_blocks(None, None)
def _init_blocks(self, input_shape, new_blockshape):
old_blockshape = self._blockshape
if new_blockshape == old_blockshape:
# Nothing to do
return
if ( len(self._dirty_blocks) != 0
or len(self._blockwise_feature_matrices) != 0):
raise RuntimeError("It's too late to change the dimensionality of your data after you've already started training.\n"
"Delete all your labels and try again.")
# In these set/dict members, the block id (dict key)
# is simply the block's start coordinate (as a tuple)
self._blockshape = new_blockshape
logger.debug("Initialized with blockshape: {}".format(new_blockshape))
def setupOutputs(self):
# We assume that channel the last axis
assert self.FeatureImage.meta.getAxisKeys()[-1] == 'c'
assert self.LabelImage.meta.getAxisKeys()[-1] == 'c'
assert self.LabelImage.meta.shape[-1] == 1
# For now, we assume that the two input images have the same shape (except channel)
# This constraint could be relaxed in the future if necessary
assert self.FeatureImage.meta.shape[:-1] == self.LabelImage.meta.shape[:-1],\
"FeatureImage and LabelImage shapes do not match: {} vs {}"\
"".format( self.FeatureImage.meta.shape, self.LabelImage.meta.shape )
self.LabelAndFeatureMatrix.meta.shape = (1,)
self.LabelAndFeatureMatrix.meta.dtype = object
self.LabelAndFeatureMatrix.meta.channel_names = self.FeatureImage.meta.channel_names
num_feature_channels = self.FeatureImage.meta.shape[-1]
if num_feature_channels != self.LabelAndFeatureMatrix.meta.num_feature_channels:
self.LabelAndFeatureMatrix.meta.num_feature_channels = num_feature_channels
self.LabelAndFeatureMatrix.setDirty()
self.ProgressSignal.meta.shape = (1,)
self.ProgressSignal.meta.dtype = object
self.ProgressSignal.setValue( self.progressSignal )
# Auto-choose a blockshape
tagged_shape = self.LabelImage.meta.getTaggedShape()
if 't' in tagged_shape:
# A block should never span multiple time slices.
# For txy volumes, that could lead to lots of extra features being computed.
tagged_shape['t'] = 1
blockshape = determineBlockShape( tagged_shape.values(), OpFeatureMatrixCache.MAX_BLOCK_PIXELS )
# Don't span more than 256 px along any axis
blockshape = tuple(min(x, 256) for x in blockshape)
self._init_blocks(self.LabelImage.meta.shape, blockshape)
def execute(self, slot, subindex, roi, result):
assert slot == self.LabelAndFeatureMatrix
self.progressSignal(0.0)
# Technically, this could result in strange progress reporting if execute()
# is called by multiple threads in parallel.
# This could be fixed with some fancier progress state, but
# (1) We don't expect that to by typical, and
# (2) progress reporting is merely informational.
num_dirty_blocks = len( self._dirty_blocks )
remaining_dirty = [num_dirty_blocks]
def update_progress( result ):
remaining_dirty[0] -= 1
percent_complete = 95.0*(num_dirty_blocks - remaining_dirty[0])/num_dirty_blocks
self.progressSignal( percent_complete )
# Update all dirty blocks in the cache
logger.debug( "Updating {} dirty blocks".format(num_dirty_blocks) )
# Before updating the blocks, ensure that the necessary block locks exist
# It's better to do this now instead of inside each request
# to avoid contention over self._lock
with self._lock:
for block_start in self._dirty_blocks:
if block_start not in self._block_locks:
self._block_locks[block_start] = RequestLock()
# Update each block in its own request.
pool = RequestPool()
reqs = {}
for block_start in self._dirty_blocks:
req = Request( partial(self._get_features_for_block, block_start ) )
req.notify_finished( update_progress )
reqs[block_start] = req
pool.add( req )
pool.wait()
# Now store the results we got.
# It's better to store the blocks here -- rather than within each request -- to
# avoid contention over self._lock from within every block's request.
with self._lock:
for block_start, req in reqs.items():
if req.result is None:
# 'None' means the block wasn't dirty. No need to update.
continue
labels_and_features_matrix = req.result
self._dirty_blocks.remove(block_start)
if labels_and_features_matrix.shape[0] > 0:
# Update the block entry with the new matrix.
self._blockwise_feature_matrices[block_start] = labels_and_features_matrix
else:
# All labels were removed from the block,
# So the new feature matrix is empty.
# Just delete its entry from our list.
try:
del self._blockwise_feature_matrices[block_start]
except KeyError:
pass
# Concatenate the all blockwise results
if self._blockwise_feature_matrices:
total_feature_matrix = numpy.concatenate( self._blockwise_feature_matrices.values(), axis=0 )
else:
# No label points at all.
# Return an empty label&feature matrix (of the correct shape)
num_feature_channels = self.FeatureImage.meta.shape[-1]
total_feature_matrix = numpy.ndarray( shape=(0, 1 + num_feature_channels), dtype=numpy.float32 )
self.progressSignal(100.0)
logger.debug( "After update, there are {} clean blocks".format( len(self._blockwise_feature_matrices) ) )
result[0] = total_feature_matrix
def propagateDirty(self, slot, subindex, roi):
if slot == self.NonZeroLabelBlocks:
# Label changes will be handled via labelimage dirtyness propagation
return
assert slot == self.FeatureImage or slot == self.LabelImage
# Our blocks are tracked by label roi (1 channel)
roi = roi.copy()
roi.start[-1] = 0
roi.stop[-1] = 1
# Bookkeeping: Track the dirty blocks
block_starts = getIntersectingBlocks( self._blockshape, (roi.start, roi.stop) )
block_starts = map( tuple, block_starts )
#
# If the features were dirty (not labels), we only really care about
# the blocks that are actually stored already
# For big dirty rois (e.g. the entire image),
# we avoid a lot of unnecessary entries in self._dirty_blocks
if slot == self.FeatureImage:
block_starts = set( block_starts ).intersection( self._blockwise_feature_matrices.keys() )
with self._lock:
self._dirty_blocks.update( block_starts )
# Output has no notion of roi. It's all dirty.
self.LabelAndFeatureMatrix.setDirty()
def _get_features_for_block(self, block_start):
"""
Computes the feature matrix for the given block IFF the block is dirty.
Otherwise, returns None.
"""
# Caller must ensure that the lock for this block already exists!
with self._block_locks[block_start]:
if block_start not in self._dirty_blocks:
# Nothing to do if this block isn't actually dirty
# (For parallel requests, its theoretically possible.)
return None
block_roi = getBlockBounds( self.LabelImage.meta.shape, self._blockshape, block_start )
# TODO: Shrink the requested roi using the nonzero blocks slot...
# ...or just get rid of the nonzero blocks slot...
labels_and_features_matrix = self._extract_feature_matrix(block_roi)
return labels_and_features_matrix
def _extract_feature_matrix(self, label_block_roi):
num_feature_channels = self.FeatureImage.meta.shape[-1]
labels = self.LabelImage(label_block_roi[0], label_block_roi[1]).wait()
label_block_positions = numpy.nonzero(labels[...,0].view(numpy.ndarray))
labels_matrix = labels[label_block_positions].astype(numpy.float32).view(numpy.ndarray)
if len(label_block_positions) == 0 or len(label_block_positions[0]) == 0:
# No label points in this roi.
# Return an empty label&feature matrix (of the correct shape)
return numpy.ndarray( shape=(0, 1 + num_feature_channels), dtype=numpy.float32 )
# Shrink the roi to the bounding box of nonzero labels
block_bounding_box_start = numpy.array( map( numpy.min, label_block_positions ) )
block_bounding_box_stop = 1 + numpy.array( map( numpy.max, label_block_positions ) )
global_bounding_box_start = block_bounding_box_start + label_block_roi[0][:-1]
global_bounding_box_stop = block_bounding_box_stop + label_block_roi[0][:-1]
# Since we're just requesting the bounding box, offset the feature positions by the box start
bounding_box_positions = numpy.transpose( numpy.transpose(label_block_positions) - numpy.array(block_bounding_box_start) )
bounding_box_positions = tuple(bounding_box_positions)
# Append channel roi (all feature channels)
feature_roi_start = list(global_bounding_box_start) + [0]
feature_roi_stop = list(global_bounding_box_stop) + [num_feature_channels]
# Request features (bounding box only)
features = self.FeatureImage(feature_roi_start, feature_roi_stop).wait()
# Cast as plain ndarray (not VigraArray), since we don't need/want axistags
features_matrix = features[bounding_box_positions].view(numpy.ndarray)
return numpy.concatenate( (labels_matrix, features_matrix), axis=1)
| lgpl-3.0 | -5,391,571,419,154,124,000 | 47.211538 | 130 | 0.631961 | false |
hoaibang07/Webscrap | transcripture/sources/crawler_data.py | 1 | 2804 | # -*- encoding: utf-8 -*-
import io
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import urllib2
import urlparse
def _remove_div_vdx(soup):
for div in soup.find_all('div', class_='vidx'):
div.extract()
return soup
def get_data(urlchuong_list, i):
filename = 'urlsach/data/sach' + str(i) + '.txt'
ftmp = io.open(filename, 'w', encoding='utf-8')
try:
hdrs = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Connection': 'keep-alive',
'Cookie': 'ipq_lip=20376774; ipq_set=1453874029; __atuvc=2%7C4; __utma=126044488.676620502.1453787537.1453787537.1453787537.1; __utmz=126044488.1453787537.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); PHPSESSID=ed3f4874b92a29b6ed036adfa5ad6fb3; ipcountry=us',
'Host': 'www.transcripture.com',
'Referer': 'http://www.transcripture.com/vietnamese-spanish-genesis-1.html',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:43.0) Gecko/20100101 Firefox/43.0'
}
for urlchuong in urlchuong_list:
# urlchuong = 'http://www.transcripture.com/vietnamese-chinese-revelation-3.html'
print urlchuong
# create request
req = urllib2.Request(urlchuong, headers=hdrs)
# get response
response = urllib2.urlopen(req)
soup = BeautifulSoup(response.read())
soup = _remove_div_vdx(soup)
# print soup
table_tag = soup.find_all('table', attrs={'width':'100%', 'cellspacing':'0'})[0]
tr_tags = table_tag.find_all('tr')
_len = len(tr_tags)
# in first tr tag:
h2_class = tr_tags[0].find_all('h2', class_='cphd')
ftmp.write(u'' + h2_class[0].get_text() + '|')
ftmp.write(u'' + h2_class[1].get_text() + '\n')
# print table_tag
for x in xrange(1,_len):
data = tr_tags[x].get_text('|')
# print data
# url_ec = url.encode('unicode','utf-8')
ftmp.write(u'' + data + '\n')
except Exception, e:
print e
# close file
ftmp.close()
def main():
for x in xrange(1,67):
print('Dang get data sach %d'%x)
urlchuong_list = []
filename = 'urlsach/sach' + str(x) + '.txt'
urlchuong_file = open(filename, 'r')
for line in urlchuong_file:
# print(line)
urlchuong_list.append(line.rstrip())
get_data(urlchuong_list, x)
urlchuong_file.close()
if __name__ == '__main__':
main()
# urlchuong_list = ['http://www.transcripture.com/vietnamese-chinese-revelation-3.html']
# get_data(urlchuong_list, 1) | gpl-2.0 | 4,626,698,484,316,223,000 | 34.506329 | 280 | 0.557418 | false |
maheshp/novatest | nova/virt/xenapi/vmops.py | 1 | 74575 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import functools
import itertools
import time
from eventlet import greenthread
import netaddr
from oslo.config import cfg
from nova import block_device
from nova.compute import api as compute
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.StrOpt('xenapi_image_upload_handler',
default='nova.virt.xenapi.imageupload.glance.GlanceStore',
help='Object Store Driver used to handle image uploads.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_EPHEMERAL = '3'
DEVICE_CD = '4'
DEVICE_CONFIGDRIVE = '5'
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
# Compare each individual portion of both version strings
for va, vb in zip(a, b):
ret = int(va) - int(vb)
if ret:
return ret
# Fallback to comparing length last
return len(a) - len(b)
def make_step_decorator(context, instance, instance_update):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
progress = round(float(step_info['current']) /
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
msg = _("Importing image upload handler: %s")
LOG.debug(msg % CONF.xenapi_image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenapi_image_upload_handler)
@property
def agent_enabled(self):
return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def list_instances(self):
"""List VM instances."""
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
def confirm_migration(self, migration, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info=network_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
# because some guests (windows) don't load PV drivers quickly
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'],
mount_device,
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
# NOTE(danms): if we're reverting migration in the failure case,
# make sure we don't have a conflicting vm still running here,
# as might be the case in a failed migrate-to-same-host situation
new_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is not None:
if new_ref is not None:
self._destroy(instance, new_ref)
# Remove the '-orig' suffix (which was added in case the
# resized VM ends up on the source host, common during
# testing)
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._attach_mapped_block_devices(instance, block_device_info)
elif new_ref is not None:
# We crashed before the -orig backup was made
vm_ref = new_ref
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
root_vdi = vm_utils.move_disks(self._session, instance, disk_info)
if resize_instance:
self._resize_instance(instance, root_vdi)
# Check if kernel and ramdisk are external
kernel_file = None
ramdisk_file = None
name_label = instance['name']
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
disk_image_type = vm_utils.determine_disk_image_type(image_meta)
vm_ref = self._create_vm(context, instance, instance['name'],
{'root': root_vdi},
disk_image_type, network_info, kernel_file,
ramdisk_file)
self._attach_mapped_block_devices(instance, block_device_info)
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
# Attached volumes that have become non-responsive will prevent a VM
# from starting, so scan for these before attempting to start
#
# In order to make sure this detach is consistent (virt, BDM, cinder),
# we only detach in the virt-layer if a callback is provided.
if bad_volumes_callback:
bad_devices = self._volumeops.find_bad_volumes(vm_ref)
for device_name in bad_devices:
self._volumeops.detach_volume(
None, instance['name'], device_name)
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
# Allow higher-layers a chance to detach bad-volumes as well (in order
# to cleanup BDM entries and detach in Cinder)
if bad_volumes_callback and bad_devices:
bad_volumes_callback(bad_devices)
def _create_disks(self, context, instance, name_label, disk_image_type,
image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
image_meta.get('id'),
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
for vdi in vdis.itervalues():
vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
vdi['uuid'])
root_vdi = vdis.get('root')
if root_vdi:
self._resize_instance(instance, root_vdi)
return vdis
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
name_label=None, rescue=False):
if name_label is None:
name_label = instance['name']
step = make_step_decorator(context, instance,
self._virtapi.instance_update)
@step
def determine_disk_image_type_step(undo_mgr):
return vm_utils.determine_disk_image_type(image_meta)
@step
def create_disks_step(undo_mgr, disk_image_type, image_meta):
vdis = self._create_disks(context, instance, name_label,
disk_image_type, image_meta,
block_device_info=block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
if not vdi.get('osvol')]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
@step
def create_kernel_ramdisk_step(undo_mgr):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
def undo_create_kernel_ramdisk():
if kernel_file or ramdisk_file:
LOG.debug(_("Removing kernel/ramdisk files from dom0"),
instance=instance)
vm_utils.destroy_kernel_ramdisk(
self._session, kernel_file, ramdisk_file)
undo_mgr.undo_with(undo_create_kernel_ramdisk)
return kernel_file, ramdisk_file
@step
def create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file):
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
def undo_create_vm():
self._destroy(instance, vm_ref, network_info=network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type, admin_password,
injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
@step
def setup_network_step(undo_mgr, vm_ref, vdis):
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
@step
def inject_metadata_step(undo_mgr, vm_ref):
self.inject_instance_metadata(instance, vm_ref)
@step
def prepare_security_group_filters_step(undo_mgr):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
self._boot_new_instance(instance, vm_ref, injected_files,
admin_password)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
@step
def bdev_set_default_root(undo_mgr):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
undo_mgr = utils.UndoManager()
try:
# NOTE(sirp): The create_disks() step will potentially take a
# *very* long time to complete since it has to fetch the image
# over the network and images can be several gigs in size. To
# avoid progress remaining at 0% for too long, make sure the
# first step is something that completes rather quickly.
bdev_set_default_root(undo_mgr)
disk_image_type = determine_disk_image_type_step(undo_mgr)
vdis = create_disks_step(undo_mgr, disk_image_type, image_meta)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type)
setup_network_step(undo_mgr, vm_ref, vdis)
inject_metadata_step(undo_mgr, vm_ref)
prepare_security_group_filters_step(undo_mgr)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _create_vm(self, context, instance, name_label, vdis,
disk_image_type, network_info, kernel_file=None,
ramdisk_file=None, rescue=False):
"""Create VM instance."""
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
# NOTE(mikal): file injection only happens if we are _not_ using a
# configdrive.
if not configdrive.required_by(instance):
self.inject_instance_metadata(instance, vm_ref)
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
rescue):
# Alter the image before VM start for network injection.
if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
self.inject_hostname(instance, vm_ref, hostname)
def _create_vm_record(self, context, instance, name_label, vdis,
disk_image_type, kernel_file, ramdisk_file):
"""Create the VM record in Xen, making sure that we do not create
a duplicate name-label. Also do a rough sanity check on memory
to try to short-circuit a potential failure later. (The memory
check only accounts for running VMs, so it can miss other builds
that are in progress.)
"""
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
# Ensure enough free memory is available
if not vm_utils.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
mode = vm_mode.get_from_instance(instance)
if mode == vm_mode.XEN:
use_pv_kernel = True
elif mode == vm_mode.HVM:
use_pv_kernel = False
else:
use_pv_kernel = vm_utils.determine_is_pv(self._session,
vdis['root']['ref'], disk_image_type, instance['os_type'])
mode = use_pv_kernel and vm_mode.XEN or vm_mode.HVM
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
self._virtapi.instance_update(context,
instance['uuid'], {'vm_mode': mode})
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = instance_types.extract_instance_type(instance)
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
LOG.debug(_("Detected ISO image type, creating blank VM "
"for install"), instance=instance)
cd_vdi = vdis.pop('root')
root_vdi = vm_utils.fetch_blank_disk(self._session,
instance_type['id'])
vdis['root'] = root_vdi
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=False)
vm_utils.create_vbd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD, vbd_type='CD', bootable=True)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
vm_utils.auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
# Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
# Attach (optional) configdrive v2 disk
if configdrive.required_by(instance):
vm_utils.generate_configdrive(self._session, instance, vm_ref,
DEVICE_CONFIGDRIVE,
admin_password=admin_password,
files=files)
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""
LOG.debug(_('Starting VM'), instance=instance)
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
if self.agent_enabled:
agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance['os_type'],
'architecture': instance['architecture']})
# Update agent, if necessary
# This also waits until the agent starts
agent = self._get_agent(instance, vm_ref)
version = agent.get_agent_version()
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build)
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject ssh key.
agent.inject_ssh_key()
# Inject files, if necessary
if injected_files:
# Inject any files, if specified
for path, contents in injected_files:
agent.inject_file(path, contents)
# Set admin password, if necessary
if admin_password and not no_agent:
agent.set_admin_password(admin_password)
# Reset network config
agent.resetnetwork()
# Set VCPU weight
instance_type = instance_types.extract_instance_type(instance)
vcpu_weight = instance_type['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
'weight', str(vcpu_weight))
def _get_vm_opaque_ref(self, instance):
"""Get xapi OpaqueRef from a db record."""
vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-data-store: Once coalesced, we call a plugin on the
XenServer that will bundle the VHDs together and then push the
bundle. Depending on the configured value of
'xenapi_image_upload_handler', image data may be pushed to
Glance or the specified data store.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label,
update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self.image_upload_handler.upload_image(context,
self._session,
instance,
vdi_uuids,
image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
locals(), instance=instance)
instance_uuid = instance['uuid']
try:
self._session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=instance_uuid, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except self._session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the _create_disks step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
# 1. NOOP since we're not transmitting the base-copy separately
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
vdi_uuid = vm_vdi_rec['uuid']
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
LOG.debug(_("Resizing down VDI %(vdi_uuid)s from "
"%(old_gb)dGB to %(new_gb)dGB"), locals(),
instance=instance)
# 2. Power down the instance before resizing
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Copy VDI, resize partition and filesystem, forget VDI,
# truncate VHD
new_ref, new_uuid = vm_utils.resize_disk(self._session,
instance,
vdi_ref,
instance_type)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the new VHD
self._migrate_vhd(instance, new_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_ref)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
# 1. Create Snapshot
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as vdi_uuids:
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Transfer the immutable VHDs (base-copies)
#
# The first VHD will be the leaf (aka COW) that is being used by
# the VM. For this step, we're only interested in the immutable
# VHDs which are all of the parents of the leaf VHD.
for seq_num, vdi_uuid in itertools.islice(
enumerate(vdi_uuids), 1, None):
self._migrate_vhd(instance, vdi_uuid, dest, sr_path, seq_num)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
self._migrate_vhd(instance, cow_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
resize_down = instance['root_gb'] > instance_type['root_gb']
if resize_down and not instance['auto_disk_config']:
reason = _('Resize down not allowed without auto_disk_config')
raise exception.ResizeError(reason=reason)
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
if resize_down:
self._migrate_disk_resizing_down(
context, instance, dest, instance_type, vm_ref, sr_path)
else:
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
if not new_disk_size:
return
# Get current size of VDI
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"), locals(), instance=instance)
resize_func_name = self.check_resize_func_name()
self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
def check_resize_func_name(self):
"""Check the function name used to resize an instance based
on product_brand and product_version."""
brand = self._session.product_brand
version = self._session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if bool(version) and bool(brand):
xcp = brand == 'XCP'
r1_2_or_above = (
(
version[0] == 1
and version[1] > 1
)
or version[0] > 1)
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def reboot(self, instance, reboot_type, bad_volumes_callback=None):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
# remove existing filters
vm_ref = self._get_vm_opaque_ref(instance)
try:
if reboot_type == "HARD":
self._session.call_xenapi('VM.hard_reboot', vm_ref)
else:
self._session.call_xenapi('VM.clean_reboot', vm_ref)
except self._session.XenAPI.Failure, exc:
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
" volumes and starting halted instance"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
else:
raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.set_admin_password(new_pass)
else:
raise NotImplementedError()
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.inject_file(path, contents)
else:
raise NotImplementedError()
@staticmethod
def _sanitize_xenstore_key(key):
"""
Xenstore only allows the following characters as keys:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789-/_@
So convert the others to _
Also convert / to _, because that is somewhat like a path
separator.
"""
allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_@")
return ''.join([x in allowed_chars and x or '_' for x in key])
def inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
def store_meta(topdir, data_list):
for item in data_list:
key = self._sanitize_xenstore_key(item['key'])
value = item['value'] or ''
self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key),
jsonutils.dumps(value))
# Store user metadata
store_meta('vm-data/user-metadata', instance['metadata'])
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
vm_ref = self._get_vm_opaque_ref(instance)
for key, change in diff.items():
key = self._sanitize_xenstore_key(key)
location = 'vm-data/user-metadata/%s' % key
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
elif change[0] == '+':
self._add_to_param_xenstore(vm_ref, location,
jsonutils.dumps(change[1]))
try:
self._write_to_xenstore(instance, location, change[1],
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
return None
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
return
for vdi_ref in vdi_refs:
try:
vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
# 2. We only have kernel xor ramdisk
raise exception.InstanceUnacceptable(instance_id=instance_uuid,
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
if kernel or ramdisk:
vm_utils.destroy_kernel_ramdisk(self._session, kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
LOG.info(_("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
vm_ref = vm_utils.lookup(self._session, instance['name'])
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
# NOTE(sirp): `block_device_info` is not used, information about which
# volumes should be detached is determined by the
# VBD.other_config['osvol'] attribute
return self._destroy(instance, vm_ref, network_info=network_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
2. Destroying associated VDIs.
3. Destroying kernel and ramdisk files (if necessary).
4. Destroying that actual VM record.
"""
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
if destroy_disks:
self._volumeops.detach_all(vm_ref)
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.pause', vm_ref)
def unpause(self, instance):
"""Unpause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.unpause', vm_ref)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._acquire_bootlock(vm_ref)
self._session.call_xenapi('VM.suspend', vm_ref)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._session.call_xenapi('VM.resume', vm_ref, False, True)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
- shutdown the instance VM.
- set 'bootlock' to prevent the instance from starting in rescue.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
rescue_name_label = '%s-rescue' % instance['name']
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
def unrescue(self, instance):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
- teardown the rescue VM.
- release the bootlock to allow the instance VM to start.
"""
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
instance_id=instance['uuid'])
original_vm_ref = self._get_vm_opaque_ref(instance)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
"""Restore the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._start(instance, vm_ref)
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._start(instance, vm_ref)
def _cancel_stale_tasks(self, timeout, task):
"""Cancel the given tasks that are older than the given timeout."""
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
reboot state for >= the given timeout
"""
# NOTE(jk0): All existing clean_reboot tasks must be cancelled before
# we can kick off the hard_reboot tasks.
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
def _get_vif_device_map(self, vm_rec):
vif_map = {}
for vif in [self._session.call_xenapi("VIF.get_record", vrec)
for vrec in vm_rec['VIFs']]:
vif_map[vif['device']] = vif['MAC']
return vif_map
def get_all_bw_counters(self):
"""Return running bandwidth counter for each interface on each
running VM"""
counters = vm_utils.fetch_bandwidth(self._session)
bw = {}
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vif_map = self._get_vif_device_map(vm_rec)
name = vm_rec['name_label']
if 'nova_uuid' not in vm_rec['other_config']:
continue
dom = vm_rec.get('domid')
if dom is None or dom not in counters:
continue
vifs_bw = bw.setdefault(name, {})
for vif_num, vif_data in counters[dom].iteritems():
mac = vif_map[vif_num]
vif_data['mac_address'] = mac
vifs_bw[mac] = vif_data
return bw
def get_console_output(self, instance):
"""Return snapshot of console."""
# TODO(armando-migliaccio): implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
# The compute manager expects InstanceNotFound for this case.
raise exception.InstanceNotFound(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
"""convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_route(route):
return {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
network = vif['network']
v4_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 4]
v6_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 6]
# NOTE(tr3buchet): routes and DNS come from all subnets
routes = [convert_route(route) for subnet in network['subnets']
for route in subnet['routes']]
dns = [get_ip(ip) for subnet in network['subnets']
for ip in subnet['dns']]
info_dict = {'label': network['label'],
'mac': vif['address']}
if v4_subnets:
# NOTE(tr3buchet): gateway and broadcast from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway'] = get_ip(v4_subnets[0]['gateway'])
info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast)
info_dict['ips'] = [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']]
if v6_subnets:
# NOTE(tr3buchet): gateway from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet)
for subnet in v6_subnets
for ip in subnet['ips']]
if routes:
info_dict['routes'] = routes
if dns:
info_dict['dns'] = list(set(dns))
return info_dict
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
for vif in network_info:
xs_data = self._vif_xenstore_data(vif)
location = ('vm-data/networking/%s' %
vif['address'].replace(':', ''))
self._add_to_param_xenstore(vm_ref,
location,
jsonutils.dumps(xs_data))
try:
self._write_to_xenstore(instance, location, xs_data,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _create_vifs(self, vm_ref, instance, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_record", vm_ref)
for device, vif in enumerate(network_info):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %(network_ref)s'),
locals(), instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
locals(), instance=instance)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def reset_network(self, instance):
"""Calls resetnetwork method in agent."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.resetnetwork()
else:
raise NotImplementedError()
def inject_hostname(self, instance, vm_ref, hostname):
"""Inject the hostname of the instance into the xenstore."""
if instance['os_type'] == "windows":
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname to xenstore"), instance=instance)
self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname)
def _write_to_xenstore(self, instance, path, value, vm_ref=None):
"""
Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
return self._make_plugin_call('xenstore.py', 'write_record', instance,
vm_ref=vm_ref, path=path,
value=jsonutils.dumps(value))
def _delete_from_xenstore(self, instance, path, vm_ref=None):
"""
Deletes the value from the xenstore record for the given VM at
the specified location. A XenAPIPlugin.PluginError will be
raised if any error is encountered in the delete process.
"""
return self._make_plugin_call('xenstore.py', 'delete_record', instance,
vm_ref=vm_ref, path=path)
def _make_plugin_call(self, plugin, method, instance, vm_ref=None,
**addl_args):
"""
Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
args = {'dom_id': vm_rec['domid']}
args.update(addl_args)
try:
return self._session.call_plugin(plugin, method, args)
except self._session.XenAPI.Failure, e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
locals(), instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'error', 'message': err_msg}
return None
def _add_to_param_xenstore(self, vm_ref, key, val):
"""
Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten
"""
self._remove_from_param_xenstore(vm_ref, key)
self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val)
def _remove_from_param_xenstore(self, vm_ref, key):
"""
Takes a single key and removes it from the xenstore parameter
record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
current_aggregate = self._virtapi.aggregate_get_by_host(
context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
reason = _('Destination host:%(hostname)s must be in the same '
'aggregate as the source server')
raise exception.MigrationError(reason=reason % locals())
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
def _migrate_receive(self, ctxt):
destref = self._session.get_xenapi_host()
# Get the network to for migrate.
# This is the one associated with the pif marked management. From cli:
# uuid=`xe pif-list --minimal management=true`
# xe pif-param-get param-name=network-uuid uuid=$uuid
expr = 'field "management" = "true"'
pifs = self._session.call_xenapi('PIF.get_all_records_where',
expr)
if len(pifs) != 1:
raise exception.MigrationError('No suitable network for migrate')
nwref = pifs[pifs.keys()[0]]['network']
try:
options = {}
migrate_data = self._session.call_xenapi("host.migrate_receive",
destref,
nwref,
options)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Receive failed'))
return migrate_data
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
if block_migration:
migrate_send_data = self._migrate_receive(ctxt)
destination_sr_ref = vm_utils.safe_find_sr(self._session)
dest_check_data = {
"block_migration": block_migration,
"migrate_data": {"migrate_send_data": migrate_send_data,
"destination_sr_ref": destination_sr_ref}}
return dest_check_data
else:
src = instance_ref['host']
self._ensure_host_in_aggregate(ctxt, src)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
return None
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it's possible to execute live migration on the source side.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
destination, includes block_migration flag
"""
if dest_check_data and 'migrate_data' in dest_check_data:
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = dest_check_data['migrate_data']
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
return dest_check_data
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('VM.assert_can_migrate'
'failed'))
def _generate_vdi_map(self, destination_sr_ref, vm_ref):
"""generate a vdi_map for _call_live_migrate_command."""
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
vif_map = {}
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
def live_migrate(self, context, instance, destination_hostname,
post_method, recover_method, block_migration,
migrate_data=None):
try:
vm_ref = self._get_vm_opaque_ref(instance)
if block_migration:
if not migrate_data:
raise exception.InvalidParameterValue('Block Migration '
'requires migrate data from destination')
try:
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Send failed'))
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {})
post_method(context, instance, destination_hostname,
block_migration)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
usage = {}
def _is_active(vm_rec):
power_state = vm_rec['power_state'].lower()
return power_state in ['running', 'paused']
def _get_uuid(vm_rec):
other_config = vm_rec['other_config']
return other_config.get('nova_uuid', None)
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage
| apache-2.0 | -5,872,085,318,642,051,000 | 41.73639 | 79 | 0.55996 | false |
sullivat/Markov-Twitter-Bot | src/mybot.test.py | 1 | 1257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime as dt
import logging
import time
import tweepy
from tweet_builder import *
from credentials import *
# Housekeeping: do not edit
logging.basicConfig(filename='tweet_test.log', level=logging.DEBUG)
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
INTERVALS = [1, 1, 1, 5, 10]
# What the bot will tweet
def gen_tweet():
"""Generate a tweet from markovify."""
return str(create_tweet(authors[pick_author()]))
def is_tweet_safe(tweet):
"""using Mark Twain text inevitably leads to tweets with offensive langueage"""
vulgarities = ['nigger', 'fuck']
for vulg in vulgarities:
if vulg in tweet.lower():
return False
else:
return True
def main_no_tweet():
while True:
t = gen_tweet()
if is_tweet_safe(t):
# api.update_status(t) # DON'T TWEET
logging.info("On {0} -- Tweeted: {1}".format(dt.datetime.today(), t))
time.sleep(random.choice(INTERVALS))
print("Tweeting: {}".format(t))
print('...\nAll done!')
if __name__ == '__main__':
#main()
main_no_tweet()
| bsd-2-clause | -7,918,807,360,116,483,000 | 22.277778 | 83 | 0.61973 | false |
alphagov/notifications-api | migrations/versions/0151_refactor_letter_rates.py | 1 | 3140 | """
Revision ID: 0151_refactor_letter_rates
Revises: 0150_another_letter_org
Create Date: 2017-12-05 10:24:41.232128
"""
import uuid
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0151_refactor_letter_rates'
down_revision = '0150_another_letter_org'
def upgrade():
op.drop_table('letter_rate_details')
op.drop_table('letter_rates')
op.create_table('letter_rates',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=False),
sa.Column('end_date', sa.DateTime(), nullable=True),
sa.Column('sheet_count', sa.Integer(), nullable=False),
sa.Column('rate', sa.Numeric(), nullable=False),
sa.Column('crown', sa.Boolean(), nullable=False),
sa.Column('post_class', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
start_date = datetime(2016, 3, 31, 23, 00, 00)
op.execute("insert into letter_rates values('{}', '{}', null, 1, 0.30, True, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 2, 0.33, True, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 3, 0.36, True, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 1, 0.33, False, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 2, 0.39, False, 'second')".format(
str(uuid.uuid4()), start_date)
)
op.execute("insert into letter_rates values('{}', '{}', null, 3, 0.45, False, 'second')".format(
str(uuid.uuid4()), start_date)
)
def downgrade():
op.drop_table('letter_rates')
op.create_table('letter_rates',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('valid_from', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='letter_rates_pkey'),
postgresql_ignore_search_path=False
)
op.create_table('letter_rate_details',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('letter_rate_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('page_total', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('rate', sa.NUMERIC(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['letter_rate_id'], ['letter_rates.id'],
name='letter_rate_details_letter_rate_id_fkey'),
sa.PrimaryKeyConstraint('id', name='letter_rate_details_pkey')
)
| mit | 8,390,699,011,551,367,000 | 43.225352 | 105 | 0.576433 | false |
jpscaletti/rev-assets | rev_assets/__init__.py | 1 | 1570 | """
===========================
RevAssets
===========================
Makes possible for python web apps to work with hashed static assets
generated by other tools like Gulp or Webpack.
It does so by reading the manifest generated by the revision tool.
"""
import json
import io
__version__ = '1.0.3'
class AssetNotFound(Exception):
pass
class RevAssets(object):
"""
Map the source -> hashed assets
:param base_url: From where the hashed assets are served.
:param reload: Reload the manifest each time an asset is requested.
:param manifest: Path and filename of the manifest file.
:param quiet: If False, a missing asset will raise an exception
"""
def __init__(self, base_url='/static', reload=False,
manifest='manifest.json', quiet=True):
self.base_url = base_url.rstrip('/')
self.reload = reload
self.manifest = manifest
self.assets = {}
self.quiet = quiet
def _load_manifest(self):
with io.open(self.manifest, 'rt', encoding='utf-8') as mf:
return json.loads(mf.read())
def asset_url(self, asset):
if not self.assets or self.reload:
self.assets = self._load_manifest()
asset = asset.strip('/')
path = self.assets.get(asset)
if not path:
if self.quiet:
return ''
msg = 'Asset file {!r} not found'.format(asset)
raise AssetNotFound(msg)
return '{}/{}'.format(
self.base_url,
path.lstrip('/'),
)
| bsd-3-clause | -7,948,017,020,570,609,000 | 25.166667 | 71 | 0.576433 | false |
Python1320/icmpviewer | main.py | 1 | 1217 | #!/usr/bin/env python2
QUEUE_NUM = 5
#hush verbosity
import logging
l=logging.getLogger("scapy.runtime")
l.setLevel(49)
import os,sys,time
from sys import stdout as out
import nfqueue,socket
from scapy.all import *
import GeoIP
gi = GeoIP.open("GeoLiteCity.dat",GeoIP.GEOIP_STANDARD)
lastip=""
def DoGeoIP(pkt):
global lastip
ip = pkt[IP].src
if lastip==ip:
out.write('.')
out.flush()
return
lastip=ip
gir = gi.record_by_addr(ip)
if gir != None:
out.write("\n%s %s %s %s "%(
time.strftime("%Y-%m-%d %H:%M:%S"),
ip,
gir['country_name'] or "?",
gir['city'] or "?"))
out.flush()
def process_packet(dummy, payload):
payload.set_verdict(nfqueue.NF_ACCEPT)
data = payload.get_data()
pkt = IP(data)
proto = pkt.proto
if proto is 0x01:
if pkt[ICMP].type is 8:
DoGeoIP(pkt)
#automatic iptables rules?
def hook():
pass
def unhook():
pass
def main():
q = nfqueue.queue()
q.open()
q.bind(socket.AF_INET)
q.set_callback(process_packet)
q.create_queue(QUEUE_NUM)
try:
hook()
q.try_run()
except KeyboardInterrupt:
unhook()
print("Exit...")
q.unbind(socket.AF_INET)
q.close()
sys.exit(0)
print("Listening on queue number "+str(QUEUE_NUM))
main()
| unlicense | 3,581,285,116,070,015,000 | 15.226667 | 55 | 0.653246 | false |
mhabib1981/pySecn00b | zap_xml_parse.py | 1 | 2159 | from xml.dom.minidom import parse
import xml.dom.minidom
import sys
import csv
#uni_file=open(sys.argv[1],'r')
#non_uni_file=uni_file.decode("utf8")
dom_tree=parse(sys.argv[1])
collect=dom_tree.documentElement
output_data=[[],[],[],[],[],[],[],[]]
out_filename=((sys.argv[1].split("/")[-1]).split(".")[0])+".csv"
out_file=open(out_filename,'w')
write_csv=csv.writer(out_file, dialect=csv.excel)
for item in collect.getElementsByTagName("alertitem"):
try:
risk_desc=item.getElementsByTagName('riskdesc')[0]
output_data[0].append(risk_desc.childNodes[0].data)
except IndexError:
output_data[0].append("NONE")
try:
alert_name=item.getElementsByTagName('alert')[0]
output_data[1].append(alert_name.childNodes[0].data)
except IndexError:
output_data[1].append("NONE")
try:
alert_desc=item.getElementsByTagName('desc')[0]
output_data[2].append((alert_desc.childNodes[0].data).encode("utf-8"))
except IndexError:
output_data[2].append("NONE")
try:
alert_solution=item.getElementsByTagName('solution')[0]
output_data[3].append((alert_solution.childNodes[0].data).encode("utf-8"))
except IndexError:
output_data[3].append("NONE")
try:
alert_ref=item.getElementsByTagName('reference')[0]
output_data[4].append((alert_ref.childNodes[0].data).encode("utf-8"))
except IndexError:
output_data[4].append("NONE")
try:
uri=item.getElementsByTagName('uri')[0]
output_data[5].append(uri.childNodes[0].data)
except IndexError:
output_data[5].append("NONE")
try:
evid=item.getElementsByTagName('evidence')[0]
output_data[6].append(evid.childNodes[0].data)
except IndexError:
output_data[6].append("NONE")
try:
attack=item.getElementsByTagName('attack')[0]
output_data[7].append(attack.childNodes[0].data)
except IndexError:
output_data[7].append("NONE")
try:
for i in range(0,len(output_data[0])-1):
row=[]
for x in range(0,len(output_data)):
row.append(str(output_data[x][i]).replace(',',';c'))
print row
except UnicodeEncodeError:
raise
#print output_data
# for x in xrange(0,len(output_data)-1):
# print output_data[x][i]
#write_csv.writerows(output_data)
| cc0-1.0 | -1,634,140,350,638,111,200 | 22.467391 | 76 | 0.695692 | false |
bedder/gifbot | test/test_gif_bot.py | 1 | 7000 |
# MIT License
#
# Copyright (c) 2018 Matthew Bedder ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for the ``GifBot`` class.
"""
import unittest
from unittest.mock import patch, MagicMock
from gif_bot.gif_bot import GifBot
api_collector = MagicMock()
def mock_api_call(command, *args, **kwargs):
if command == "users.list":
return {
"ok": True,
"members": [
{"name": "test_bot_name", "id": "test_bot_id"},
{"name": "test_owner_name", "id": "test_owner_id"}
]
}
else:
api_collector(command, *args, **kwargs)
def mock_client(_):
return MagicMock(api_call=mock_api_call)
def Any(cls):
class Any(cls):
def __eq__(self, other):
return True
return Any()
@patch("gif_bot.gif_bot.SlackClient", mock_client)
@patch("gif_bot.gif_bot.getLogger")
@patch("gif_bot.gif_bot.Formatter")
@patch("gif_bot.gif_bot.Logger")
@patch("gif_bot.gif_bot.RotatingFileHandler")
class TestGifBot(unittest.TestCase):
def setUp(self):
api_collector.reset_mock()
def test_is_mention(self, *args):
""" The bot should be able to identify direct mentions """
bot = GifBot("test.config", MagicMock())
self.assertTrue(bot.is_mention("@test_bot_name"))
self.assertTrue(bot.is_mention("@test_bot_name help"))
self.assertFalse(bot.is_mention("Something @test_bot_name"))
def test_is_trigger(self, *args):
""" The bot should be able to identify trigger words being used in messages """
bot = GifBot("test.config", MagicMock())
self.assertTrue(bot.is_trigger("test_trigger blah"))
self.assertTrue(bot.is_trigger("blah test_trigger"))
self.assertFalse(bot.is_trigger("something else"))
def test_not_trigger_non_message(self, *args):
""" The bot should ignore non-messages """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_not_called()
def test_not_trigger_self(self, *args):
""" The bot shouldn't be able to trigger itself """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"user": "test_bot_id",
"text": "Something something test_trigger",
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_not_called()
def test_handle_trigger_message(self, *args):
""" The bot should trigger on messages from users containing a trigger word """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"user": "test_user_id",
"text": "Something something test_trigger",
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_any_call("chat.postMessage", text=Any(str),
channel="test_channel", as_user=True)
api_collector.assert_any_call("reactions.add", name="test_reaction",
channel="test_channel", timestamp="test_ts")
def test_handle_request_success(self, *args):
""" The bot should post a gif and a happy reaction when they can fulfill a request """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"user": "test_user_id",
"text": "@test_bot_name request tag_a1",
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_any_call("chat.postMessage", text=Any(str),
channel="test_channel", as_user=True)
api_collector.assert_any_call("reactions.add", name="test_reaction",
channel="test_channel", timestamp="test_ts")
def test_handle_request_failure(self, *args):
""" The bot should send a message and react with :brokenheart: when it cannot fulfill a
request """
bot = GifBot("test.config", MagicMock())
bot.handle_message({
"user": "test_user_id",
"text": "@test_bot_name request invalid_tag",
"channel": "test_channel",
"ts": "test_ts"
})
api_collector.assert_any_call("chat.postMessage", text=Any(str),
channel="test_channel", as_user=True)
api_collector.assert_any_call("reactions.add", name="broken_heart",
channel="test_channel", timestamp="test_ts")
def test_admin(self, *args):
""" Test that basic admin commands work """
bot = GifBot("test.config", MagicMock())
self.assertNotIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 2)
bot.handle_message({
"user": "test_owner_id",
"text": "add url tag",
"channel": "Dtest_channel",
"ts": "test_ts"
})
self.assertIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 3)
bot.handle_message({
"user": "test_owner_id",
"text": "remove url",
"channel": "Dtest_channel",
"ts": "test_ts"
})
self.assertNotIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 2)
def test_admin_access(self, *args):
""" Test that basic admin commands work only for the owner """
bot = GifBot("test.config", MagicMock())
self.assertNotIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 2)
bot.handle_message({
"user": "test_user_id",
"text": "add url tag",
"channel": "Dtest_channel",
"ts": "test_ts"
})
self.assertNotIn("tag", bot.store.tags)
self.assertEqual(len(bot.store.elements), 2)
| mit | 5,020,034,094,223,202,000 | 34.897436 | 95 | 0.591143 | false |
google/fedjax | fedjax/models/stackoverflow.py | 1 | 5453 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stack Overflow recurrent models."""
from typing import Optional
from fedjax.core import metrics
from fedjax.core import models
import haiku as hk
import jax.numpy as jnp
def create_lstm_model(vocab_size: int = 10000,
embed_size: int = 96,
lstm_hidden_size: int = 670,
lstm_num_layers: int = 1,
share_input_output_embeddings: bool = False,
expected_length: Optional[float] = None) -> models.Model:
"""Creates LSTM language model.
Word-level language model for Stack Overflow.
Defaults to the model used in:
Adaptive Federated Optimization
Sashank Reddi, Zachary Charles, Manzil Zaheer, Zachary Garrett, Keith Rush,
Jakub Konečný, Sanjiv Kumar, H. Brendan McMahan.
https://arxiv.org/abs/2003.00295
Args:
vocab_size: The number of possible output words. This does not include
special tokens like PAD, BOS, EOS, or OOV.
embed_size: Embedding size for each word.
lstm_hidden_size: Hidden size for LSTM cells.
lstm_num_layers: Number of LSTM layers.
share_input_output_embeddings: Whether to share the input embeddings with
the output logits.
expected_length: Expected average sentence length used to scale the training
loss down by `1. / expected_length`. This constant term is used so that
the total loss over all the words in a sentence can be scaled down to per
word cross entropy values by a constant factor instead of dividing by
number of words which can vary across batches. Defaults to no scaling.
Returns:
Model.
"""
# TODO(jaero): Replace these with direct references from dataset.
pad = 0
bos = 1
eos = 2
oov = vocab_size + 3
full_vocab_size = vocab_size + 4
# We do not guess EOS, and if we guess OOV, it's treated as a mistake.
logits_mask = [0. for _ in range(full_vocab_size)]
for i in (pad, bos, eos, oov):
logits_mask[i] = jnp.NINF
logits_mask = tuple(logits_mask)
def forward_pass(batch):
x = batch['x']
# [time_steps, batch_size, ...].
x = jnp.transpose(x)
# [time_steps, batch_size, embed_dim].
embedding_layer = hk.Embed(full_vocab_size, embed_size)
embeddings = embedding_layer(x)
lstm_layers = []
for _ in range(lstm_num_layers):
lstm_layers.extend([
hk.LSTM(hidden_size=lstm_hidden_size),
jnp.tanh,
# Projection changes dimension from lstm_hidden_size to embed_size.
hk.Linear(embed_size)
])
rnn_core = hk.DeepRNN(lstm_layers)
initial_state = rnn_core.initial_state(batch_size=embeddings.shape[1])
# [time_steps, batch_size, hidden_size].
output, _ = hk.static_unroll(rnn_core, embeddings, initial_state)
if share_input_output_embeddings:
output = jnp.dot(output, jnp.transpose(embedding_layer.embeddings))
output = hk.Bias(bias_dims=[-1])(output)
else:
output = hk.Linear(full_vocab_size)(output)
# [batch_size, time_steps, full_vocab_size].
output = jnp.transpose(output, axes=(1, 0, 2))
return output
def train_loss(batch, preds):
"""Returns total loss per sentence optionally scaled down to token level."""
targets = batch['y']
per_token_loss = metrics.unreduced_cross_entropy_loss(targets, preds)
# Don't count padded values in loss.
per_token_loss *= targets != pad
sentence_loss = jnp.sum(per_token_loss, axis=-1)
if expected_length is not None:
return sentence_loss * (1. / expected_length)
return sentence_loss
transformed_forward_pass = hk.transform(forward_pass)
return models.create_model_from_haiku(
transformed_forward_pass=transformed_forward_pass,
sample_batch={
'x': jnp.zeros((1, 1), dtype=jnp.int32),
'y': jnp.zeros((1, 1), dtype=jnp.int32),
},
train_loss=train_loss,
eval_metrics={
'accuracy_in_vocab':
metrics.SequenceTokenAccuracy(
masked_target_values=(pad, eos), logits_mask=logits_mask),
'accuracy_no_eos':
metrics.SequenceTokenAccuracy(masked_target_values=(pad, eos)),
'num_tokens':
metrics.SequenceTokenCount(masked_target_values=(pad,)),
'sequence_length':
metrics.SequenceLength(masked_target_values=(pad,)),
'sequence_loss':
metrics.SequenceCrossEntropyLoss(masked_target_values=(pad,)),
'token_loss':
metrics.SequenceTokenCrossEntropyLoss(
masked_target_values=(pad,)),
'token_oov_rate':
metrics.SequenceTokenOOVRate(
oov_target_values=(oov,), masked_target_values=(pad,)),
'truncation_rate':
metrics.SequenceTruncationRate(
eos_target_value=eos, masked_target_values=(pad,)),
})
| apache-2.0 | -2,779,023,789,321,949,700 | 37.935714 | 80 | 0.652174 | false |
lglenat/whmnet | RaspberryPi/readGateway.py | 1 | 12588 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
readGateway.py script
=====================
This script is used in the whmnet project to receive data from the
wireless network gateway and send it to a custom server on the web.
This script is run on a Raspberry Pi, connected to the Gateway
through the UART serial port on the Pi GPIO header.
"""
# built-in modules
import serial
import binascii
import struct
import datetime
import logging
import logging.handlers
# third-party modules
import crcmod
import requests
# Import configuration variables
from config import *
# Constants
FILE_TYPE_GW_LOG = 0
FILE_TYPE_SENSOR_LOG = 1
FILE_TYPE_LEGACY_DATA = 2
FILE_TYPE_DATA = 3
GW_TYPE_REMOTE_DATA = 0
GW_TYPE_LOCAL_DATA = 1
GW_TYPE_LOG = 2
SENS_TYPE_DATA = 0
SENS_TYPE_LOG = 1
SENS_TYPE_DATA_LEGACY = 2
logger = logging.getLogger()
def main():
# Configure logger
wfh = logging.handlers.WatchedFileHandler(cfgLoggerFile) # log file
formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s') # log header
wfh.setFormatter(formatter)
logger.addHandler(wfh)
logger.setLevel(cfgLogLevel) # set level according to your needs
# Configure CRC with polynome, reversing etc.
crc32_func = crcmod.mkCrcFun(0x104C11DB7, initCrc=0x0, rev=True, xorOut=0xFFFFFFFF)
# Open serial port to communicate with gateway
try:
logger.info('Opening serial port.')
port = serial.Serial(cfgSerialName, cfgSerialSpeed, timeout=None)
except serial.SerialException:
logger.critical('Serial port unavailable')
raise
else:
logger.info('Serial port successfully opened.')
# main loop
while True:
# search for sync byte 0xAA
rcv = port.read(1)
if rcv == b'\xAA':
logger.debug('Sync word received.')
# Get timestamp
timedata = datetime.datetime.now()
# Proceed with message
# First byte is length of UART frame
# use ord() because data is written in binary format on UART by STM32 (not char)
length = ord(port.read(1))
logger.debug('Size of rcvd frame: ' + str(length))
# We can have length = 0 if rx uart buffer is full (in case python script
# is started after sensor gateway)
if length > 0:
# Then read the entire frame
msg = port.read(length)
logger.debug('Rx frame: ' + binascii.hexlify(msg))
# Unpack the CRC from the 4 last bytes of frame
try:
rxcrc = struct.unpack('<I', msg[length-4:length])[0]
except struct.error:
logger.exception('CRC struct error.')
else:
logger.debug('Rx CRC: ' + str(rxcrc) + ' - ' + hex(rxcrc))
# Compute CRC on frame data (except sync and length bytes)
compcrc = crc32_func(msg[0:length-4])
logger.debug('Calculated CRC: ' + str(compcrc) + ' - ' + hex(int(compcrc)))
# Compare rcvd CRC and calculated CRC
if rxcrc != int(compcrc):
# A problem occured during UART transmission
logger.info('CRC ERROR.')
else:
# Get message type from Gateway
gwMsgType = ord(msg[0]);
# Remote data is data coming from wireless sensors
if gwMsgType == GW_TYPE_REMOTE_DATA:
# get sensor id and msg type
sensMsgType = ord(msg[2]) >> 4
sensorId = ord(msg[2]) & 0xf
# get RSSI (can be negative)
rssi = ord(msg[length-6])
if rssi > 127:
rssi = (256 - rssi) * (-1)
# Print sensor ID
logger.info('Sensor ID: ' + str(sensorId) + ' - RSSI: ' + str(rssi))
# log/error message from sensor
if sensMsgType == SENS_TYPE_LOG:
# print and process log message
log_msg = binascii.hexlify(msg[3:6])
logger.info('Log message: ' + log_msg)
# Write msg to file
writeSensorLog(sensorId, timedata, log_msg, rssi)
# Post msg on server
postMeasFromFile()
# measurement message from V1 sensor (not used anymore)
elif sensMsgType == SENS_TYPE_DATA_LEGACY:
# Extract and print temperature #
temperature = computeTemp(msg[3:5])
logger.debug('Temperature: ' + str(temperature))
# Write measurement to file
writeLegacyData(sensorId, timedata, temperature, rssi)
# Post measurement on server
postMeasFromFile()
# measurement message from V2 sensor
elif sensMsgType == SENS_TYPE_DATA:
#Extract data from message
data = computeData(msg[3:8])
logger.info('Temp: ' + '{:.2f}'.format(data['temp']))
logger.info('Hum: ' + '{:.2f}'.format(data['hum']))
logger.info('Pres: ' + str(data['pres']))
# Write data to file
writeData(sensorId, timedata, data['temp'], data['hum'], data['pres'], rssi)
# Post on server
postMeasFromFile()
else:
logger.warning('UNKNOWN SENSOR MSG TYPE.')
# log message from gateway itself
elif gwMsgType == GW_TYPE_LOG:
# Print log message
logger.info('Gateway log: ' + str(ord(msg[1])))
# Write msg to file
writeGatewayLog(timedata, ord(msg[1]))
# Post msg on server
postMeasFromFile()
else:
logger.warning('UNKNOWN GATEWAY MSG TYPE.')
else:
logger.error('Gateway msg is of length 0.')
# The 4 functions below save posts to the CSV buffer file before they are sent to the server
def writeLegacyData(id, timedata, temp, rssi):
with open(cfgBufferFile, 'a') as f:
f.write(str(id) + ',' + str(FILE_TYPE_LEGACY_DATA) + ',' +
timedata.strftime("%Y-%m-%d %H:%M:%S") + ',' +
str(temp) + ',' + str(rssi))
f.write('\n')
def writeData(id, timedata, temp, hum, pres, rssi):
with open(cfgBufferFile, 'a') as f:
f.write(str(id) + ',' + str(FILE_TYPE_DATA) + ',' + timedata.strftime("%Y-%m-%d %H:%M:%S") + ',' +
'{:.2f}'.format(temp) + ',' + '{:.2f}'.format(hum) + ',' + str(pres) + ',' + str(rssi))
f.write('\n')
def writeSensorLog(id, timedata, log, rssi):
with open(cfgBufferFile, 'a') as f:
f.write(str(id) + ',' + str(FILE_TYPE_SENSOR_LOG) + ',' + timedata.strftime("%Y-%m-%d %H:%M:%S") +
',' + str(log) + ',' + str(rssi))
f.write('\n')
def writeGatewayLog(timedata, log):
with open(cfgBufferFile, 'a') as f:
f.write('255' + ',' + str(FILE_TYPE_GW_LOG) + ',' + timedata.strftime("%Y-%m-%d %H:%M:%S") + ',' + str(log))
f.write('\n')
# Function to compute temperature from V1 sensor message (not for V2 sensor)
def computeTemp(tempString):
# unpack data - big endian 16 bit
try:
temp = struct.unpack('>h', tempString)[0]
except struct.error:
logger.exception('Temperature struct error.')
return -99
else:
# Convert code to actual temperature
temp = temp * 0.0625 # 1 LSB = 0.0625 °C
return temp
# Function to extract temperature, RH and pressure data from sensor V2 message
# See sensor V2 STM32L0 firmware source to retrieve message structure
def computeData(dataStr):
# Initialize dictionnary
data = {}
#fixme: return errors and check for error in calling function
# Little endian 24-bit padded to 32
try:
sht = struct.unpack('<I', dataStr[0:3] + '\x00')
except struct.error:
logger.exception('SHT21 data decoding struct error.')
return -99
else:
data['temp'] = -46.85 + 175.72 * ((sht[0] & 0x7FF) << 5) / pow(2,16)
data['hum'] = -6.0 + 125.0 * ((sht[0] >> 11) << 5) / pow(2,16)
# Little endian 16-bit
try:
ms56 = struct.unpack('<H', dataStr[3:5])
except struct.error:
logger.exception('MS5637 data decoding struct error.')
return -99
else:
data['pres'] = ms56[0] + 85000
return data
# Function that reads the CSV buffer file line by line and post the data to the
# webserver if it is reachable on the internet
def postMeasFromFile():
nbLinesPosted = 0
# open backup file in read mode
with open(cfgBufferFile, 'r') as f:
# Save all measurements in lines variable
lines = f.readlines()
# Go back to start of file and read it line by line
f.seek(0, 0)
for line in f:
# Remove newline character
line = line.rstrip('\n')
# Split the line to get the items in a list
s = line.split(',', -1)
if len(s) != 0:
# Try to post measurement on server
type = int(s[1])
if type == FILE_TYPE_GW_LOG:
status = postOnServer(s[0], s[1], s[2], s[3], '', '', '', '')
elif type == FILE_TYPE_SENSOR_LOG:
status = postOnServer(s[0], s[1], s[2], s[3], '', '', '', s[4])
elif type == FILE_TYPE_LEGACY_DATA:
status = postOnServer(s[0], s[1], s[2], '', s[3], '', '', s[4])
elif type == FILE_TYPE_DATA:
status = postOnServer(s[0], s[1], s[2], '', s[3], s[4], s[5], s[6])
else:
logger.error('Unknow type in data file.')
status = 200
# If posting is successful, increment variable else break
if status != 200:
break
else:
nbLinesPosted = nbLinesPosted + 1
else:
# simply ignore line
logger.error('Invalid line in file. Skipping.')
nbLinesPosted = nbLinesPosted + 1
# Open the file not appending write mode
with open(cfgBufferFile, 'w') as f:
# Write all lines that were not posted on server
f.writelines(lines[nbLinesPosted:])
# Function to post data on websever. Uses the requests package.
def postOnServer(id_s, dataType_s, datetime_s, log_s, temp_s, hum_s, pres_s, rssi_s):
retval = 0;
payload = {'id': id_s, 'type': dataType_s, 'time': datetime_s,
'temp': temp_s, 'hum': hum_s, 'pres': pres_s,
'log': log_s, 'rssi': rssi_s, 'chk': cfgPostPwd}
logger.debug(payload)
try:
r = requests.post(cfgPostUrl, data=payload, timeout=5)
except requests.exceptions.ConnectionError:
logger.exception('Connection error')
except requests.exceptions.HTTPError:
logger.exception('HTTP invalid response error.')
except requests.exceptions.Timeout:
logger.exception('Connection timeout error.')
except requests.exceptions.TooManyRedirects:
logger.exception('Too many redirects.')
else:
retval = r.status_code
logger.debug(r.text)
return retval
if __name__ == "__main__":
main()
__author__ = "Lucas Glénat"
__copyright__ = "Copyright 2017, whmnet project"
__credits__ = ["Lucas Glénat"]
__license__ = "GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Lucas Glénat"
__email__ = "[email protected]"
__status__ = "Production"
#### END OF FILE ####
| gpl-3.0 | 5,445,676,795,377,608,000 | 36.011765 | 116 | 0.516052 | false |
ISRyuu/ISNNTF | test.py | 1 | 3957 | import numpy as np
def iou(box_1, box_2):
box_1_ulx = box_1[0] - box_1[2] * 0.5
box_1_uly = box_1[1] - box_1[3] * 0.5
box_1_lrx = box_1[0] + box_1[2] * 0.5
box_1_lry = box_1[1] + box_1[3] * 0.5
box_2_ulx = box_2[0] - box_2[2] * 0.5
box_2_uly = box_2[1] - box_2[3] * 0.5
box_2_lrx = box_2[0] + box_2[2] * 0.5
box_2_lry = box_2[1] + box_2[3] * 0.5
overlap_ulx = max(box_1_ulx, box_2_ulx)
overlap_uly = max(box_1_uly, box_2_uly)
overlap_lrx = min(box_1_lrx, box_2_lrx)
overlap_lry = min(box_1_lry, box_2_lry)
overlap = max(0, (overlap_lrx - overlap_ulx)) * max(0, (overlap_lry - overlap_uly))
union = max(1e-10, (box_1[2] * box_1[3] + box_2[2] * box_2[3] - overlap))
return min(max(0, overlap / union), 1)
def non_max_suppression(output, cell_size, class_num, boxes_per_cell,
threshold=0.1, iou_threshold=0.5):
'''output [cell_size, cell_size, boxes_per_cell, values]'''
offset_y = np.reshape(
np.asarray([np.arange(cell_size)]*cell_size*boxes_per_cell).T,
(cell_size, cell_size, boxes_per_cell))
offset_x = np.transpose(offset_y, [1, 0, 2])
output = np.asarray(output)
classes = np.reshape(output[..., :class_num],
[cell_size, cell_size, class_num])
confidences = np.reshape(output[..., class_num:class_num+boxes_per_cell],
[cell_size, cell_size, boxes_per_cell])
boxes = np.reshape(output[..., class_num+boxes_per_cell:],
[cell_size, cell_size, boxes_per_cell, -1])
boxes[..., 0] = (boxes[..., 0] + offset_x) / cell_size
boxes[..., 1] = (boxes[..., 1] + offset_y) / cell_size
boxes[..., 2:] = np.square(boxes[..., 2:])
class_confidences = []
for i in range(boxes_per_cell):
class_confidences += [np.expand_dims(confidences[..., i], axis=-1) * classes]
class_confidences = np.stack(class_confidences, axis=-2)
class_filter = class_confidences >= threshold
class_filtered_indices = np.nonzero(class_filter)
boxes_filtered = boxes[class_filtered_indices[0:3]]
class_filtered = np.argmax(class_confidences, axis=-1)[class_filtered_indices[0:3]]
probabilites_filtered = class_confidences[class_filter]
sorted_probs_indices = np.flip(np.argsort(probabilites_filtered), axis=0)
probabilites_filtered = probabilites_filtered[sorted_probs_indices]
boxes_filtered = boxes_filtered[sorted_probs_indices]
class_filtered = class_filtered[sorted_probs_indices]
for i in range(len(sorted_probs_indices)):
if probabilites_filtered[i] == 0:
continue
for j in range(i+1, len(sorted_probs_indices)):
if iou(boxes_filtered[i], boxes_filtered[j]) >= iou_threshold:
probabilites_filtered[j] = 0
result_indices = probabilites_filtered > 0
confidence_result = probabilites_filtered[result_indices]
classes_result = class_filtered[result_indices]
boxes_result = boxes_filtered[result_indices]
return np.concatenate([np.expand_dims(confidence_result, axis=-1),
np.expand_dims(classes_result, axis=-1),
boxes_result],
axis=-1)
if __name__ == '__main__':
test_data = np.reshape(np.load("/Users/Kevin/Desktop/out.npy")[2], [7, 7, 30])
print(non_max_suppression(test_data, 7, 20, 2))
# confidences = np.random.randn(3,3,2)
# classes = np.random.randn(3,3,20)
# boxes_per_cell = 2
# probs = np.zeros([3,3,2,20])
# for i in range(boxes_per_cell):
# for j in range(20):
# probs[:, :, i, j] = np.multiply(
# classes[:, :, j], confidences[:, :, i])
# probabilites = []
# for i in range(boxes_per_cell):
# probabilites += [np.expand_dims(confidences[..., i], axis=-1) * classes]
# print(probs == np.stack(probabilites, axis=-2))
| bsd-3-clause | 5,585,448,979,678,535,000 | 37.048077 | 87 | 0.581248 | false |
canaryhealth/nlu_trainer | nlu_trainer/util.py | 1 | 1186 | # -*- coding: utf-8 -*-
import re
def phrase_index(sentence, phrase):
'''
Returns the start and end index of phrase (first instance) if it exists in
sentence.
ex: >>> phrase_index('the quick brown fox jumps over the lazy dog',
'brown fox jumps')
(10, 24)
'''
phrase = str(phrase) # in case phrase is a number
m = re.match(r'(.*?)\b'+re.escape(phrase)+r'\b', sentence)
if m:
# group 0 and 1 returns the match with and without the phrase respectively
l = len(m.group(1))
return (l, l+len(phrase)-1)
return None
def phrase_pos(sentence, phrase):
'''
Returns the start and end position of phrase (first instance) if it exists in
sentence.
ex: >>> phrase_index('the quick brown fox jumps over the lazy dog',
'brown fox jumps')
(2, 5)
'''
phrase = str(phrase) # in case phrase is a number
s_tok = sentence.split()
p_tok = phrase.split()
p_len = len(p_tok)
# get all indices where s_tok[i] matches p_tok[0]
indices = [ i for i, x in enumerate(s_tok) if x == p_tok[0] ]
for i in indices:
if s_tok[i : i+p_len] == p_tok:
return i, i+p_len
return None
| mit | -3,836,816,383,660,354,000 | 27.926829 | 79 | 0.598651 | false |
dprog-philippe-docourt/django-qr-code | setup.py | 1 | 1589 | import re
from setuptools import setup
# Get version without importing
with open('qr_code/__init__.py', 'rb') as f:
VERSION = str(re.search('__version__ = \'(.+?)\'', f.read().decode('utf-8')).group(1))
setup(
name='django-qr-code',
version=VERSION,
packages=['qr_code', 'qr_code.qrcode', 'qr_code.templatetags'],
url='https://github.com/dprog-philippe-docourt/django-qr-code',
license='BSD 3-clause',
author='Philippe Docourt',
author_email='[email protected]',
maintainer='Philippe Docourt',
description='An application that provides tools for displaying QR codes on your Django site.',
long_description="""This application provides tools for displaying QR codes on your `Django <https://www.djangoproject.com/>`_ site.
This application depends on the `Segno QR Code generator <https://pypi.org/project/segno/>`_.
This app makes no usage of the Django models and therefore do not use any database.
Only Python >= 3.6 is supported.""",
install_requires=['segno', 'django>=2.2'],
python_requires='>=3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.1',
'Natural Language :: English'
],
keywords='qr code django',
)
| bsd-3-clause | 7,363,241,608,391,071,000 | 37.756098 | 136 | 0.642542 | false |
PyPlanet/PyPlanet | pyplanet/conf/backends/python.py | 1 | 1598 | import importlib
import os
from pyplanet.conf.backends.base import ConfigBackend
from pyplanet.core.exceptions import ImproperlyConfigured
class PythonConfigBackend(ConfigBackend):
name = 'python'
def __init__(self, **options):
super().__init__(**options)
self.module = None
def load(self):
# Make sure we load the defaults first.
super().load()
# Prepare the loading.
self.module = os.environ.get('PYPLANET_SETTINGS_MODULE', 'settings')
if not self.module:
raise ImproperlyConfigured(
'Settings module is not defined! Please define PYPLANET_SETTINGS_MODULE in your environment or start script.'
)
# Add the module itself to the configuration.
self.settings['SETTINGS_MODULE'] = self.module
# Load the module, put the settings into the local context.
try:
module = importlib.import_module(self.module)
except ModuleNotFoundError as e:
raise ImproperlyConfigured(
'The settings module doesn\'t contain any submodules or files to load! Please make sure '
'your settings module exist or contains the files base.py and apps.py. Your module: {}'.format(self.module)
) from e
# Load from the modules.
processed = 0
for setting in dir(module):
if setting.isupper():
self.settings[setting] = getattr(module, setting)
processed += 1
# Check for empty results.
if processed < 1:
raise ImproperlyConfigured(
'The settings module doesn\'t contain any submodules or files to load! Please make sure '
'your settings module exist or contains the files base.py and apps.py. Your module: {}'.format(self.module)
)
| gpl-3.0 | 1,730,560,987,640,634,400 | 29.730769 | 113 | 0.72403 | false |
keis/smoke | tests/test_mixed.py | 1 | 1963 | import pytest
import mock
from hamcrest import assert_that
from matchmock import called_once_with
from smoke import signal, Broker
class Source(object):
spam = signal()
egg = signal(name='egg')
class Mixed(Source, Broker):
pass
@pytest.fixture
def listener():
return mock.Mock()
@pytest.fixture
def mixed():
return Mixed()
def test_subscribe_signal_publish_broker(mixed, listener):
sentinel = object()
mixed.spam.subscribe(listener.spam_cb)
mixed.publish(mixed.spam, s=sentinel)
assert_that(listener.spam_cb, called_once_with(s=sentinel))
def test_subscribe_broker_publish_signal(mixed, listener):
sentinel = object()
mixed.subscribe(mixed.spam, listener.spam_cb)
mixed.spam(s=sentinel)
assert_that(listener.spam_cb, called_once_with(s=sentinel))
def test_subscribe_broker_publish_signal_with_name(mixed, listener):
sentinel = object()
mixed.subscribe(mixed.egg, listener.egg_cb)
mixed.egg(s=sentinel)
assert_that(listener.egg_cb, called_once_with(s=sentinel))
@pytest.mark.skip(reason="Not supported, for now")
def test_subscribe_signal_publish_boundsignal(mixed, listener):
# Supporting this in a general way might be a bit to intrusive as
# boundmethod and function and other things implementing the descriptor
# protocol would be consider equal as well.
sentinel = object()
mixed.subscribe(Mixed.spam, listener.spam_cb)
mixed.publish(mixed.spam, s=sentinel)
assert_that(listener.spam_cb, called_once_with(s=sentinel))
def test_subscribe_by_name(mixed, listener):
sentinel = object()
mixed.subscribe('egg', listener.egg_cb)
mixed.egg(s=sentinel)
assert_that(listener.egg_cb, called_once_with(s=sentinel))
def test_publish_override(mixed, listener):
sentinel = object()
mixed.publish = mock.Mock(wraps=mixed.publish)
mixed.egg(s=sentinel)
assert_that(mixed.publish, called_once_with(mixed.egg, s=sentinel))
| mit | 5,439,008,101,361,624,000 | 24.828947 | 75 | 0.720835 | false |
laginimaineb/android_fde_bruteforce | structures.py | 1 | 2262 | import struct
from StringIO import StringIO
#The crypt_mnt_ftr structure - see /system/vold/cryptfs.h
CRYPT_MNT_FTR = [('magic' , 'I'),
('major_version' , 'H'),
('minor_version' , 'H'),
('ftr_size' , 'I'),
('flags' , 'I'),
('keysize' , 'I'),
('crypt_size' , 'I'),
('fs_size' , 'Q'),
('failed_decrypt_count' , 'I'),
('crypto_type_name' , '64s'),
('spare2' , 'I'),
('master_key' , '48s'),
('salt' , '16s'),
('persist_data_offset_0' , 'Q'),
('persist_data_offset_1' , 'Q'),
('persist_data_size' , 'I'),
('kdf_type' , 'B'),
('N_factor' , 'B'),
('r_factor' , 'B'),
('p_factor' , 'B'),
('encrypted_upto' , 'Q'),
('hash_first_block' , '32s'),
('keymaster_blob' , '2048s'),
('keymaster_blob_size' , 'I'),
('scrypted_intermediate_key', '32s')]
#The qcom_km_key_blob structure - see /hardware/qcom/keymaster/keymaster_qcom.h
QCOM_KEY_BLOB = [('magic_num' , 'I'),
('version_num' , 'I'),
('modulus' , '512s'),
('modulus_size' , 'I'),
('public_exponent' , '512s'),
('public_exponent_size' , 'I'),
('iv' , '16s'),
('encrypted_private_exponent' , '512s'),
('encrypted_private_exponent_size' , 'I'),
('hmac' , '32s')]
def read_object(data, definition):
'''
Unpacks a structure using the given data and definition.
'''
reader = StringIO(data)
obj = {}
object_size = 0
for (name, stype) in definition:
object_size += struct.calcsize(stype)
obj[name] = struct.unpack(stype, reader.read(struct.calcsize(stype)))[0]
obj['object_size'] = object_size
obj['raw_data'] = data
return obj
def read_crypt_mnt_ftr(data):
return read_object(data, CRYPT_MNT_FTR)
def read_qcom_key_blob(data):
return read_object(data, QCOM_KEY_BLOB)
| gpl-2.0 | 1,956,154,337,582,137,300 | 36.081967 | 79 | 0.438992 | false |
windweaver828/kspeech | commandtools.py | 1 | 1026 | #!/usr/bin/env python
def isCommand(command, args):
index = 0
for arg in args:
if isinstance(arg, list):
for ar in arg:
if isinstance(ar, list):
for a in ar:
if isinstance(a, list):
index-=1
isCommand(command, a)
elif not a in command:
break
else:
index+=1
elif ar in command:
index+=1
break
if index >= len(args):
return True
def callCommand(func, args):
if args: return func(*args)
else: return func()
def matchCommand(command, commands):
for commdef in commands.keys():
if isCommand(command, commdef):
return commands[commdef]
else: return False
def matchAndCallCommand(command, commands):
ret = matchCommand(command, commands)
if ret: callCommand(*ret)
| gpl-2.0 | 3,961,388,683,577,054,700 | 24.65 | 49 | 0.477583 | false |
snakecon/AI_Lab | spider/book/book/pipelines.py | 1 | 4704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import hashlib
import book.database as db
from scrapy import Request
from scrapy.utils.misc import arg_to_iter
from twisted.internet.defer import DeferredList
from scrapy.pipelines.images import ImagesPipeline
from book.items import Subject, Meta, Comment
class BookPipeline(object):
def get_subject(self, item):
sql = 'SELECT * FROM subjects WHERE douban_id=%s' % item['douban_id']
return db.conn.get(sql)
def save_subject(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
sql = 'INSERT INTO subjects (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *values)
def get_meta(self, item):
sql = 'SELECT * FROM books WHERE douban_id=%s' % item['douban_id']
return db.conn.get(sql)
def save_meta(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
sql = 'INSERT INTO books (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *(i.strip() for i in values))
def update_meta(self, item):
douban_id = item.pop('douban_id')
keys = item.keys()
values = item.values()
values.append(douban_id)
fields = ['%s=' % i + '%s' for i in keys]
sql = 'UPDATE books SET %s WHERE douban_id=%s\
' % (','.join(fields), '%s')
db.conn.update(sql, *values)
def get_comment(self, item):
sql = 'SELECT * FROM comments WHERE douban_comment_id=%s\
' % item['douban_comment_id']
return db.conn.get(sql)
def save_comment(self, item):
keys = item.keys()
values = item.values()
fields = ','.join(keys)
temp = ','.join(['%s'] * len(keys))
db.conn.execute('SET NAMES utf8mb4')
sql = 'INSERT INTO comments (%s) VALUES (%s)' % (fields, temp)
db.conn.insert(sql, *(i.strip() for i in values))
def process_item(self, item, spider):
if isinstance(item, Subject):
'''
subject
'''
exsit = self.get_subject(item)
if not exsit:
self.save_subject(item)
elif isinstance(item, Meta):
'''
book meta
'''
exsit = self.get_meta(item)
if not exsit:
try:
self.save_meta(item)
except Exception, e:
print item
print e
else:
self.update_meta(item)
elif isinstance(item, Comment):
'''
book comment
'''
exsit = self.get_comment(item)
if not exsit:
try:
self.save_comment(item)
except Exception, e:
print item
print e
return item
class CoverPipeline(ImagesPipeline):
def process_item(self, item, spider):
if spider.name != 'meta':
return item
info = self.spiderinfo
requests = arg_to_iter(self.get_media_requests(item, info))
dlist = [self._process_request(r, info) for r in requests]
dfd = DeferredList(dlist, consumeErrors=1)
return dfd.addCallback(self.item_completed, item, info)
def file_path(self, request, response=None, info=None):
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are \
deprecated, please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
image_guid = hashlib.sha1(url).hexdigest()
return '%s%s/%s%s/%s.jpg\
' % (image_guid[9], image_guid[19], image_guid[29], image_guid[39], image_guid)
def get_media_requests(self, item, info):
if item['cover']:
return Request(item['cover'])
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if image_paths:
item['cover'] = image_paths[0]
else:
item['cover'] = ''
return item
| apache-2.0 | 2,729,605,431,872,991,000 | 32.361702 | 88 | 0.540816 | false |
elkingtonmcb/bcbio-nextgen | bcbio/variation/validateplot.py | 1 | 15359 | """Plot validation results from variant calling comparisons.
Handles data normalization and plotting, emphasizing comparisons on methodology
differences.
"""
import collections
import os
import numpy as np
import pandas as pd
try:
import matplotlib as mpl
mpl.use('Agg', force=True)
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
except ImportError:
mpl, plt = None, None
try:
import seaborn as sns
except ImportError:
sns = None
from bcbio.log import logger
from bcbio import utils
from bcbio.variation import bamprep
def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None):
"""Create a plot from individual summary csv files with classification metrics.
"""
df = pd.concat([pd.read_csv(x) for x in plot_files])
df.to_csv(out_csv, index=False)
return classifyplot_from_valfile(out_csv, outtype, title, size)
def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None):
"""Create a plot from a summarized validation file.
Does new-style plotting of summarized metrics of
false negative rate and false discovery rate.
https://en.wikipedia.org/wiki/Sensitivity_and_specificity
"""
df = pd.read_csv(val_file)
grouped = df.groupby(["sample", "caller", "vtype"])
df = grouped.apply(_calculate_fnr_fdr)
df = df.reset_index()
out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype)
_do_classifyplot(df, out_file, title, size)
return [out_file]
def _calculate_fnr_fdr(group):
"""Calculate the false negative rate (1 - sensitivity) and false discovery rate (1 - precision).
"""
data = {k: d["value"] for k, d in group.set_index("metric").T.to_dict().items()}
return pd.DataFrame([{"fnr": data["fn"] / float(data["tp"] + data["fn"]) * 100.0 if data["tp"] > 0 else 0.0,
"fdr": data["fp"] / float(data["tp"] + data["fp"]) * 100.0 if data["tp"] > 0 else 0.0,
"tpr": "TP: %s FN: %s" % (data["tp"], data["fn"]),
"spc": "FP: %s" % (data["fp"])}])
def _do_classifyplot(df, out_file, title=None, size=None):
"""Plot using classification-based plot using seaborn.
"""
metric_labels = {"fdr": "False discovery rate",
"fnr": "False negative rate"}
metrics = [("fnr", "tpr"), ("fdr", "spc")]
colors = ["light grey", "greyish"]
data_dict = df.set_index(["sample", "caller", "vtype"]).T.to_dict()
plt.ioff()
sns.set(style='white')
vtypes = sorted(df["vtype"].unique(), reverse=True)
callers = sorted(df["caller"].unique())
samples = sorted(df["sample"].unique())
fig, axs = plt.subplots(len(vtypes) * len(callers), len(metrics))
fig.text(.5, .95, title if title else "", horizontalalignment='center', size=14)
for vi, vtype in enumerate(vtypes):
sns.set_palette(sns.xkcd_palette([colors[vi]]))
for ci, caller in enumerate(callers):
for j, (metric, label) in enumerate(metrics):
cur_plot = axs[vi * len(vtypes) + ci][j]
vals, labels = [], []
for sample in samples:
cur_data = data_dict[(sample, caller, vtype)]
vals.append(cur_data[metric])
labels.append(cur_data[label])
cur_plot.barh(np.arange(len(samples)), vals)
all_vals = []
for k, d in data_dict.items():
if k[-1] == vtype:
for m in metrics:
all_vals.append(d[m[0]])
metric_max = max(all_vals)
cur_plot.set_xlim(0, metric_max)
pad = 0.1 * metric_max
for ai, (val, label) in enumerate(zip(vals, labels)):
cur_plot.annotate(label, (pad + (0 if max(vals) > metric_max / 2.0 else max(vals)),
ai + 0.35), va='center', size=7)
if j == 0:
cur_plot.tick_params(axis='y', which='major', labelsize=8)
cur_plot.locator_params(nbins=len(samples) + 2, axis="y", tight=True)
cur_plot.set_yticklabels(samples, size=8, va="bottom")
cur_plot.set_title("%s: %s" % (vtype, caller), fontsize=12, loc="left")
else:
cur_plot.get_yaxis().set_ticks([])
if ci == len(callers) - 1:
cur_plot.tick_params(axis='x', which='major', labelsize=8)
cur_plot.get_xaxis().set_major_formatter(
FuncFormatter(lambda v, p: "%s%%" % (int(v) if round(v) == v else v)))
if vi == len(vtypes) - 1:
cur_plot.get_xaxis().set_label_text(metric_labels[metric], size=12)
else:
cur_plot.get_xaxis().set_ticks([])
cur_plot.spines['bottom'].set_visible(False)
cur_plot.spines['left'].set_visible(False)
cur_plot.spines['top'].set_visible(False)
cur_plot.spines['right'].set_visible(False)
x, y = (6, len(vtypes) * len(callers) + 1 * 0.5 * len(samples)) if size is None else size
fig.set_size_inches(x, y)
fig.tight_layout(rect=(0, 0, 1, 0.95))
plt.subplots_adjust(hspace=0.6)
fig.savefig(out_file)
def create_from_csv(in_csv, config=None, outtype="png", title=None, size=None):
df = pd.read_csv(in_csv)
create(df, None, 0, config or {}, os.path.splitext(in_csv)[0], outtype, title,
size)
def create(plot_data, header, ploti, sample_config, out_file_base, outtype="png",
title=None, size=None):
"""Create plots of validation results for a sample, labeling prep strategies.
"""
if mpl is None or plt is None or sns is None:
not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None])
logger.info("No validation plot. Missing imports: %s" % not_found)
return None
if header:
df = pd.DataFrame(plot_data, columns=header)
else:
df = plot_data
df["aligner"] = [get_aligner(x, sample_config) for x in df["sample"]]
df["bamprep"] = [get_bamprep(x, sample_config) for x in df["sample"]]
floors = get_group_floors(df, cat_labels)
df["value.floor"] = [get_floor_value(x, cat, vartype, floors)
for (x, cat, vartype) in zip(df["value"], df["category"], df["variant.type"])]
out = []
for i, prep in enumerate(df["bamprep"].unique()):
out.append(plot_prep_methods(df, prep, i + ploti, out_file_base, outtype, title, size))
return out
cat_labels = {"concordant": "Concordant",
"discordant-missing-total": "Discordant (missing)",
"discordant-extra-total": "Discordant (extra)",
"discordant-shared-total": "Discordant (shared)"}
vtype_labels = {"snp": "SNPs", "indel": "Indels"}
prep_labels = {}
caller_labels = {"ensemble": "Ensemble", "freebayes": "FreeBayes",
"gatk": "GATK Unified\nGenotyper", "gatk-haplotype": "GATK Haplotype\nCaller"}
def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None,
size=None):
"""Plot comparison between BAM preparation methods.
"""
samples = df[(df["bamprep"] == prep)]["sample"].unique()
assert len(samples) >= 1, samples
out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype)
df = df[df["category"].isin(cat_labels)]
_seaborn(df, prep, prepi, out_file, title, size)
return out_file
def _seaborn(df, prep, prepi, out_file, title=None, size=None):
"""Plot using seaborn wrapper around matplotlib.
"""
plt.ioff()
sns.set(style='dark')
vtypes = df["variant.type"].unique()
callers = sorted(df["caller"].unique())
cats = _check_cats(["concordant", "discordant-missing-total",
"discordant-extra-total", "discordant-shared-total"],
vtypes, df, prep, callers)
fig, axs = plt.subplots(len(vtypes), len(cats))
width = 0.8
for i, vtype in enumerate(vtypes):
ax_row = axs[i] if len(vtypes) > 1 else axs
for j, cat in enumerate(cats):
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
if len(cats) == 1:
assert j == 0
ax = ax_row
else:
ax = ax_row[j]
if i == 0:
ax.set_title(cat_labels[cat], size=14)
ax.get_yaxis().set_ticks([])
if j == 0:
ax.set_ylabel(vtype_labels[vtype], size=14)
ax.bar(np.arange(len(callers)), vals, width=width)
ax.set_ylim(0, maxval)
if i == len(vtypes) - 1:
ax.set_xticks(np.arange(len(callers)) + width / 2.0)
ax.set_xticklabels([caller_labels.get(x, x).replace("__", "\n") if x else ""
for x in callers], size=8, rotation=45)
else:
ax.get_xaxis().set_ticks([])
_annotate(ax, labels, vals, np.arange(len(callers)), width)
fig.text(.5, .95, prep_labels.get(prep, "") if title is None else title, horizontalalignment='center', size=16)
fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1)
x, y = (10, 5) if size is None else size
fig.set_size_inches(x, y)
fig.savefig(out_file)
def _check_cats(cats, vtypes, df, prep, callers):
"""Only include categories in the final output if they have values.
"""
out = []
for cat in cats:
all_vals = []
for vtype in vtypes:
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
all_vals.extend(vals)
if sum(all_vals) / float(len(all_vals)) > 2:
out.append(cat)
if len(out) == 0:
return cats
else:
return out
def _get_chart_info(df, vtype, cat, prep, callers):
"""Retrieve values for a specific variant type, category and prep method.
"""
maxval_raw = max(list(df["value.floor"]))
curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat)
& (df["bamprep"] == prep)]
vals = []
labels = []
for c in callers:
row = curdf[df["caller"] == c]
if len(row) > 0:
vals.append(list(row["value.floor"])[0])
labels.append(list(row["value"])[0])
else:
vals.append(1)
labels.append("")
return vals, labels, maxval_raw
def _annotate(ax, annotate, height, left, width):
"""Annotate axis with labels.
"""
annotate_yrange_factor = 0.010
xticks = np.array(left) + width / 2.0
ymin, ymax = ax.get_ylim()
yrange = ymax - ymin
# Reset ymax and ymin so there's enough room to see the annotation of
# the top-most
if ymax > 0:
ymax += yrange * 0.15
if ymin < 0:
ymin -= yrange * 0.15
ax.set_ylim(ymin, ymax)
yrange = ymax - ymin
offset_ = yrange * annotate_yrange_factor
if isinstance(annotate, collections.Iterable):
annotations = map(str, annotate)
else:
annotations = ['%.3f' % h if type(h) is np.float_ else str(h)
for h in height]
for x, h, annotation in zip(xticks, height, annotations):
# Adjust the offset to account for negative bars
offset = offset_ if h >= 0 else -1 * offset_
verticalalignment = 'bottom' if h >= 0 else 'top'
if len(str(annotation)) > 6:
size = 7
elif len(str(annotation)) > 5:
size = 8
else:
size = 10
# Finally, add the text to the axes
ax.annotate(annotation, (x, h + offset),
verticalalignment=verticalalignment,
horizontalalignment='center',
size=size)
def _ggplot(df, out_file):
"""Plot faceted items with ggplot wrapper on top of matplotlib.
XXX Not yet functional
"""
import ggplot as gg
df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]]
df["category"] = [cat_labels[x] for x in df["category"]]
df["caller"] = [caller_labels.get(x, None) for x in df["caller"]]
p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar()
+ gg.facet_wrap("variant.type", "category")
+ gg.theme_seaborn())
gg.ggsave(p, out_file)
def get_floor_value(x, cat, vartype, floors):
"""Modify values so all have the same relative scale for differences.
Using the chosen base heights, adjusts an individual sub-plot to be consistent
relative to that height.
"""
all_base = floors[vartype]
cur_max = floors[(cat, vartype)]
if cur_max > all_base:
diff = cur_max - all_base
x = max(1, x - diff)
return x
def get_group_floors(df, cat_labels):
"""Retrieve the floor for a given row of comparisons, creating a normalized set of differences.
We need to set non-zero floors so large numbers (like concordance) don't drown out small
numbers (like discordance). This defines the height for a row of comparisons as either
the minimum height of any sub-plot, or the maximum difference between higher and lower
(plus 10%).
"""
group_maxes = collections.defaultdict(list)
group_diffs = collections.defaultdict(list)
diff_pad = 0.1 # 10% padding onto difference to avoid large numbers looking like zero
for name, group in df.groupby(["category", "variant.type"]):
label, stype = name
if label in cat_labels:
diff = max(group["value"]) - min(group["value"])
group_diffs[stype].append(diff + int(diff_pad * diff))
group_maxes[stype].append(max(group["value"]))
group_maxes[name].append(max(group["value"]))
out = {}
for k, vs in group_maxes.iteritems():
if k in group_diffs:
out[k] = max(max(group_diffs[stype]), min(vs))
else:
out[k] = min(vs)
return out
def get_aligner(x, config):
return utils.get_in(config, ("algorithm", "aligner"), "")
def get_bamprep(x, config):
params = bamprep._get_prep_params({"config": {"algorithm": config.get("algorithm", {})}})
if params["realign"] == "gatk" and params["recal"] == "gatk":
return "gatk"
elif not params["realign"] and not params["recal"]:
return "none"
elif not params.get("recal") or not params.get("realign"):
return "mixed"
else:
return ""
# ## Frequency plots
def facet_freq_plot(freq_csv, caller):
"""Prepare a facet plot of frequencies stratified by variant type and status (TP, FP, FN).
Makes a nice plot with the output from validate.freq_summary
"""
out_file = "%s.png" % os.path.splitext(freq_csv)[0]
plt.ioff()
sns.set(style='dark')
df = pd.read_csv(freq_csv)
g = sns.FacetGrid(df, row="vtype", col="valclass", margin_titles=True,
col_order=["TP", "FN", "FP"], row_order=["snp", "indel"],
sharey=False)
g.map(plt.hist, "freq", bins=20, align="left")
g.set(xlim=(0.0, 1.0))
g.fig.set_size_inches(8, 6)
g.fig.text(.05, .97, caller, horizontalalignment='center', size=14)
g.fig.savefig(out_file)
| mit | -1,548,195,915,837,181,700 | 40.736413 | 115 | 0.573996 | false |
stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/modules/dnsutil.py | 1 | 11113 | # -*- coding: utf-8 -*-
'''
Compendium of generic DNS utilities
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
import socket
# Import python libs
import logging
import time
log = logging.getLogger(__name__)
def __virtual__():
'''
Generic, should work on any platform (including Windows). Functionality
which requires dependencies outside of Python do not belong in this module.
'''
return True
def parse_hosts(hostsfile='/etc/hosts', hosts=None):
'''
Parse /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.parse_hosts
'''
if not hosts:
try:
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
except Exception:
return 'Error: hosts data was not found'
hostsdict = {}
for line in hosts.splitlines():
if not line:
continue
if line.startswith('#'):
continue
comps = line.split()
ip = comps[0]
aliases = comps[1:]
hostsdict.setdefault(ip, []).extend(aliases)
return hostsdict
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.fopen(hostsfile, 'a') as fp_:
fp_.write(append_line)
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line)
def hosts_remove(hostsfile='/etc/hosts', entries=None):
'''
Remove a host from the /etc/hosts file. If doing so will leave a line
containing only an IP address, then the line will be deleted. This function
will leave comments and blank lines intact.
CLI Examples:
.. code-block:: bash
salt '*' dnsutil.hosts_remove /etc/hosts ad1.yuk.co
salt '*' dnsutil.hosts_remove /etc/hosts ad2.yuk.co,ad1.yuk.co
'''
with salt.utils.fopen(hostsfile, 'r') as fp_:
hosts = fp_.read()
host_list = entries.split(',')
with salt.utils.fopen(hostsfile, 'w') as out_file:
for line in hosts.splitlines():
if not line or line.strip().startswith('#'):
out_file.write('{0}\n'.format(line))
continue
comps = line.split()
for host in host_list:
if host in comps[1:]:
comps.remove(host)
if len(comps) > 1:
out_file.write(' '.join(comps))
out_file.write('\n')
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.fopen(zonefile, 'r') as fp_:
zone = fp_.read()
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.'):
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict
def _to_seconds(timestr):
'''
Converts a time value to seconds.
As per RFC1035 (page 45), max time is 1 week, so anything longer (or
unreadable) will be set to one week (604800 seconds).
'''
timestr = timestr.upper()
if 'H' in timestr:
seconds = int(timestr.replace('H', '')) * 3600
elif 'D' in timestr:
seconds = int(timestr.replace('D', '')) * 86400
elif 'W' in timestr:
seconds = 604800
else:
try:
seconds = int(timestr)
except ValueError:
seconds = 604800
if seconds > 604800:
seconds = 604800
return seconds
def _has_dig():
'''
The dig-specific functions have been moved into their own module, but
because they are also DNS utilities, a compatibility layer exists. This
function helps add that layer.
'''
return salt.utils.which('dig') is not None
def check_ip(ip_addr):
'''
Check that string ip_addr is a valid IP
CLI Example:
.. code-block:: bash
salt ns1 dig.check_ip 127.0.0.1
'''
if _has_dig():
return __salt__['dig.check_ip'](ip_addr)
return 'This function requires dig, which is not currently available'
def A(host, nameserver=None):
'''
Return the A record(s) for `host`.
Always returns a list.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.A www.google.com
'''
if _has_dig():
return __salt__['dig.A'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for `host`.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available'
def NS(domain, resolve=True, nameserver=None):
'''
Return a list of IPs of the nameservers for ``domain``
If 'resolve' is False, don't resolve names.
CLI Example:
.. code-block:: bash
salt ns1 dig.NS google.com
'''
if _has_dig():
return __salt__['dig.NS'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def SPF(domain, record='SPF', nameserver=None):
'''
Return the allowed IPv4 ranges in the SPF record for ``domain``.
If record is ``SPF`` and the SPF record is empty, the TXT record will be
searched automatically. If you know the domain uses TXT and not SPF,
specifying that will save a lookup.
CLI Example:
.. code-block:: bash
salt ns1 dig.SPF google.com
'''
if _has_dig():
return __salt__['dig.SPF'](domain, record, nameserver)
return 'This function requires dig, which is not currently available'
def MX(domain, resolve=False, nameserver=None):
'''
Return a list of lists for the MX of ``domain``.
If the 'resolve' argument is True, resolve IPs for the servers.
It's limited to one IP, because although in practice it's very rarely a
round robin, it is an acceptable configuration and pulling just one IP lets
the data be similar to the non-resolved version. If you think an MX has
multiple IPs, don't use the resolver here, resolve them in a separate step.
CLI Example:
.. code-block:: bash
salt ns1 dig.MX google.com
'''
if _has_dig():
return __salt__['dig.MX'](domain, resolve, nameserver)
return 'This function requires dig, which is not currently available'
def serial(zone='', update=False):
'''
Return, store and update a dns serial for your zone files.
zone: a keyword for a specific zone
update: store an updated version of the serial in a grain
If ``update`` is False, the function will retrieve an existing serial or
return the current date if no serial is stored. Nothing will be stored
If ``update`` is True, the function will set the serial to the current date
if none exist or if the existing serial is for a previous date. If a serial
for greater than the current date is already stored, the function will
increment it.
This module stores the serial in a grain, you can explicitly set the
stored value as a grain named ``dnsserial_<zone_name>``.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.serial example.com
'''
grains = {}
key = 'dnsserial'
if zone:
key += '_{0}'.format(zone)
stored = __salt__['grains.get'](key=key)
present = time.strftime('%Y%m%d01')
if not update:
return stored or present
if stored and stored >= present:
current = str(int(stored) + 1)
else:
current = present
__salt__['grains.setval'](key=key, val=current)
return current
| apache-2.0 | -4,666,205,067,642,195,000 | 27.494872 | 116 | 0.579052 | false |
shiburizu/py2discord | py2discord.py | 1 | 8389 | import discord
import sqlite3 as sql
import logging
import cleverbot
import random
logging.basicConfig(level=logging.INFO)
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
from apiclient.discovery import build
import apiclient.errors
# Please refer to the README to find where you should paste your bot's credentials for services.
blacklistwords = ['image','gif','help','add','talk','permissions','blacklist','whitelist']
maxwhiterank = 3
maxblackrank = 3
service = build("customsearch", "v1",
developerKey="CREATE FROM CONSOLE.DEVELOPERS.GOOGLE.COM")
class client(discord.Client):
def isBlacklisted(self,msg,p):
c.execute("SELECT level FROM blacklist where id = ?", (msg.author.id,))
blacklist = c.fetchone()
if blacklist:
val = int(blacklist[0][0])
if val >= int(p):
self.send_message(msg.channel,'%s is blacklist level %s, therefore this command is locked.' % (msg.author.name, blacklist[0][0]))
return True
else:
return False
else:
return False
def isWhitelisted(self,msg,p):
c.execute("SELECT level FROM whitelist where id = ?", (msg.author.id,))
whitelist = c.fetchone()
if whitelist:
val = int(whitelist[0][0])
if val >= int(p):
return True
else:
self.send_message(msg.channel,'%s does not have sufficient permissions to use that command.' % msg.author.name)
return False
else:
self.send_message(msg.channel,'%s does not have sufficient permissions to use that command.' % msg.author.name)
return False
def on_message(self, message):
p = self.isBlacklisted(message,'3')
if p == False:
if message.content.startswith('$help'):
commands = c.execute('SELECT name FROM cmds')
self.send_message(message.channel,
"""py2discord is a Discord chat bot written in Python
by https://github.com/shiburizu/""" % ', '.join([str(i[0])for i in commands]))
elif message.content.startswith('$blacklist '):
try:
p = self.isWhitelisted(message,'1') #check whitelist 1
if p == True:
insert = (message.content[13:].replace('>','')).split(' ', 1)
try:
if insert[1].isdigit():
print insert
if int(insert[1]) > maxblackrank:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
else:
c.execute('INSERT OR REPLACE INTO blacklist(id, level) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
'Successfully blacklisted ID %s at level %s.' % (insert[0],insert[1]))
else:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
except IndexError:
self.send_message(message.channel, 'Please provide a valid blacklist level. Can be from 0 (None) to %s.' % maxblackrank)
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$whitelist '):
try:
p = self.isWhitelisted(message,'2') #check whitelist 2
if p == True:
insert = (message.content[13:].replace('>','')).split(' ', 1)
try:
if insert[1].isdigit():
print insert
if int(insert[1]) > maxwhiterank:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
else:
c.execute('INSERT OR REPLACE INTO whitelist(id, level) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
'Successfully whitelisted ID %s at level %s.' % (insert[0],insert[1]))
else:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
except IndexError:
self.send_message(message.channel, 'Please provide a valid whitelist level. Can be from 0 (None) to %s.' % maxwhiterank)
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$image '):
try:
p = self.isBlacklisted(message,'1') #check blacklist 1
if p == False:
query = message.content[7:]
if query != '':
res = service.cse().list(
q=query,
cx='INSERT CX KEY FROM CSE.GOOGLE.COM',
searchType='image',
num=10,
safe='off'
).execute()
if not 'items' in res:
self.send_message(message.channel, "No image found.")
else:
results = []
for item in res['items']:
results.append(item['link'])
self.send_message(message.channel, random.choice(results))
else:
self.send_message(message.channel,'Please input search terms.')
except apiclient.errors.HttpError as e:
self.send_message(message.channel,
"There was a problem with your request. Here is some information:```%s```" % e)
elif message.content.startswith('$gif '):
try:
p = self.isBlacklisted(message,'1') #check blacklist 1
if p == False:
query = message.content[7:]
if query != '' or None:
res = service.cse().list(
q=query,
cx='INSERT CX KEY FROM CSE.GOOGLE.COM',
searchType='image',
fileType='gif',
num=10,
safe='off'
).execute()
if not 'items' in res:
self.send_message(message.channel, "No image found.")
else:
results = []
for item in res['items']:
results.append(item['link'])
self.send_message(message.channel, random.choice(results))
else:
self.send_message(message.channel,'Please input search terms.')
except apiclient.errors.HttpError as e:
self.send_message(message.channel,
"There was a problem with your request. Here is some information:```%s```" % e)
elif message.content.startswith('$add '):
try:
p = self.isBlacklisted(message,'2')
if p == False:
insert = (message.content[5:].encode('utf-8')).split(' ', 1)
if not insert in blacklistwords:
print insert
c.execute('INSERT OR ABORT INTO cmds(name,cmd) VALUES(?,?)',
(insert[0],insert[1]))
db.commit()
self.send_message(message.channel,
"Command added.")
else:
self.send_message(message.channel,
"This is a blacklisted word, and cannot be added.")
except sql.IntegrityError:
self.send_message(message.channel, "Already exists. Aborted.")
except sql.Error as e:
if db:
db.rollback()
print "Error %s:" % e.args[0]
self.send_message(message.channel,
"Something went wrong. It has been logged.")
elif message.content.startswith('$talk '):
reply = talk.ask(message.content[6:])
print "Was asked:", message.content[6:], "Replied with:", reply
self.send_message(message.channel, reply)
elif message.content.startswith('$permissions'):
c.execute('SELECT level FROM whitelist WHERE id = ?', (message.author.id,))
white = c.fetchone()
if not white:
white = 0
else:
white = white[0][0]
c.execute('SELECT level FROM blacklist WHERE id = ?', (message.author.id,))
black = c.fetchone()
if not black:
black = 0
else:
black = black[0][0]
self.send_message(message.channel,
'%s, your Discord ID is %s. Your whitelist level is %s and blacklist level is %s.' % (
message.author.name,message.author.id,white,black))
elif message.content.startswith('$'):
try:
c.execute("SELECT cmd FROM cmds WHERE name = ?",
(message.content[1:],))
fetch = c.fetchone()
self.send_message(message.channel, fetch[0])
except TypeError:
pass
talk = cleverbot.Cleverbot()
bot = client()
db = sql.connect('commands.db')
db.text_factory = str
c = db.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS cmds(name VARCHAR(10) UNIQUE,
cmd VARCHAR(64));''')
c.execute('''CREATE TABLE IF NOT EXISTS blacklist(id VARCHAR(10) UNIQUE,
level VARCHAR(64));''')
c.execute('''CREATE TABLE IF NOT EXISTS whitelist(id VARCHAR(10) UNIQUE,
level VARCHAR(64));''')
db.commit()
bot.login('EMAIL','PASSWORD')
bot.run()
| isc | -2,526,754,386,635,023,000 | 35.004292 | 133 | 0.635713 | false |
LMSlay/wiper | modules/clamav.py | 1 | 2875 | # This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import getopt
try:
import pyclamd
HAVE_CLAMD = True
except ImportError:
HAVE_CLAMD = False
from viper.common.out import *
from viper.common.abstracts import Module
from viper.core.session import __sessions__
class ClamAV(Module):
cmd = 'clamav'
description = 'Scan file from local ClamAV daemon'
authors = ['neriberto']
def run(self):
def usage():
self.log('', "usage: clamav [-h] [-s]")
def help():
usage()
self.log('', "")
self.log('', "Options:")
self.log('', "\t--help (-h)\tShow this help message")
self.log('', "\t--socket(-s)\tSpecify an unix socket (default: Clamd Unix Socket)")
self.log('', "")
if not HAVE_CLAMD:
self.log('error', "Missing dependency, install requests (`pip install pyclamd`)")
return
try:
opts, argv = getopt.getopt(self.args, 'hs:', ['help', 'socket='])
except getopt.GetoptError as e:
self.log('', e)
usage()
return
daemon = None
result = None
socket = None
for opt, value in opts:
if opt in ('-h', '--help'):
help()
return
elif opt in ('-s', '--socket'):
self.log('info', "Using socket {0} to connect to ClamAV daemon".format(value))
socket = value
try:
daemon = pyclamd.ClamdUnixSocket(socket)
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
try:
if not daemon:
daemon = pyclamd.ClamdUnixSocket()
socket = 'Clamav'
except Exception as e:
self.log('error', "Daemon connection failure, {0}".format(e))
return
try:
if daemon.ping():
results = daemon.scan_file(__sessions__.current.file.path)
else:
self.log('error', "Unable to connect to the daemon")
except Exception as e:
self.log('error', "Unable to scan with antivirus daemon, {0}".format(e))
return
found = None
name = 'not found'
if results:
for item in results:
found = results[item][0]
name = results[item][1]
if found == 'ERROR':
self.log('error', "Check permissions of the binary folder, {0}".format(name))
else:
self.log('info', "Daemon {0} returns: {1}".format(socket, name))
| bsd-3-clause | -1,005,792,964,719,011,700 | 29.913978 | 95 | 0.510957 | false |
vmendez/DIRAC | Resources/Storage/StorageElement.py | 1 | 35478 | """ This is the StorageElement class.
"""
from types import ListType
__RCSID__ = "$Id$"
# # custom duty
import re
import time
import datetime
import copy
import errno
# # from DIRAC
from DIRAC import gLogger, gConfig, siteName
from DIRAC.Core.Utilities import DErrno, DError
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR, returnSingleResult
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
class StorageElementCache( object ):
def __init__( self ):
self.seCache = DictCache()
def __call__( self, name, protocols = None, vo = None, hideExceptions = False ):
self.seCache.purgeExpired( expiredInSeconds = 60 )
argTuple = ( name, protocols, vo )
seObj = self.seCache.get( argTuple )
if not seObj:
seObj = StorageElementItem( name, protocols, vo, hideExceptions = hideExceptions )
# Add the StorageElement to the cache for 1/2 hour
self.seCache.add( argTuple, 1800, seObj )
return seObj
class StorageElementItem( object ):
"""
.. class:: StorageElement
common interface to the grid storage element
self.name is the resolved name of the StorageElement i.e CERN-tape
self.options is dictionary containing the general options defined in the CS e.g. self.options['Backend] = 'Castor2'
self.storages is a list of the stub objects created by StorageFactory for the protocols found in the CS.
self.localPlugins is a list of the local protocols that were created by StorageFactory
self.remotePlugins is a list of the remote protocols that were created by StorageFactory
self.protocolOptions is a list of dictionaries containing the options found in the CS. (should be removed)
dynamic method :
retransferOnlineFile( lfn )
exists( lfn )
isFile( lfn )
getFile( lfn, localPath = False )
putFile( lfnLocal, sourceSize = 0 ) : {lfn:local}
replicateFile( lfn, sourceSize = 0 )
getFileMetadata( lfn )
getFileSize( lfn )
removeFile( lfn )
prestageFile( lfn, lifetime = 86400 )
prestageFileStatus( lfn )
pinFile( lfn, lifetime = 60 * 60 * 24 )
releaseFile( lfn )
isDirectory( lfn )
getDirectoryMetadata( lfn )
getDirectorySize( lfn )
listDirectory( lfn )
removeDirectory( lfn, recursive = False )
createDirectory( lfn )
putDirectory( lfn )
getDirectory( lfn, localPath = False )
"""
__deprecatedArguments = ["singleFile", "singleDirectory"] # Arguments that are now useless
# Some methods have a different name in the StorageElement and the plugins...
# We could avoid this static list in the __getattr__ by checking the storage plugin and so on
# but fine... let's not be too smart, otherwise it becomes unreadable :-)
__equivalentMethodNames = {"exists" : "exists",
"isFile" : "isFile",
"getFile" : "getFile",
"putFile" : "putFile",
"replicateFile" : "putFile",
"getFileMetadata" : "getFileMetadata",
"getFileSize" : "getFileSize",
"removeFile" : "removeFile",
"prestageFile" : "prestageFile",
"prestageFileStatus" : "prestageFileStatus",
"pinFile" : "pinFile",
"releaseFile" : "releaseFile",
"isDirectory" : "isDirectory",
"getDirectoryMetadata" : "getDirectoryMetadata",
"getDirectorySize" : "getDirectorySize",
"listDirectory" : "listDirectory",
"removeDirectory" : "removeDirectory",
"createDirectory" : "createDirectory",
"putDirectory" : "putDirectory",
"getDirectory" : "getDirectory",
}
# We can set default argument in the __executeFunction which impacts all plugins
__defaultsArguments = {"putFile" : {"sourceSize" : 0 },
"getFile": { "localPath": False },
"prestageFile" : { "lifetime" : 86400 },
"pinFile" : { "lifetime" : 60 * 60 * 24 },
"removeDirectory" : { "recursive" : False },
"getDirectory" : { "localPath" : False },
}
def __init__( self, name, plugins = None, vo = None, hideExceptions = False ):
""" c'tor
:param str name: SE name
:param list plugins: requested storage plugins
:param vo
"""
self.methodName = None
if vo:
self.vo = vo
else:
result = getVOfromProxyGroup()
if not result['OK']:
return
self.vo = result['Value']
self.opHelper = Operations( vo = self.vo )
proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
useProxy = ( gConfig.getValue( "/Resources/StorageElements/%s/AccessProtocol.1/Protocol" % name, "UnknownProtocol" )
in proxiedProtocols )
if not useProxy:
useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
if not useProxy:
useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )
self.valid = True
if plugins == None:
res = StorageFactory( useProxy = useProxy, vo = self.vo ).getStorages( name, pluginList = [], hideExceptions = hideExceptions )
else:
res = StorageFactory( useProxy = useProxy, vo = self.vo ).getStorages( name, pluginList = plugins, hideExceptions = hideExceptions )
if not res['OK']:
self.valid = False
self.name = name
self.errorReason = res['Message']
else:
factoryDict = res['Value']
self.name = factoryDict['StorageName']
self.options = factoryDict['StorageOptions']
self.localPlugins = factoryDict['LocalPlugins']
self.remotePlugins = factoryDict['RemotePlugins']
self.storages = factoryDict['StorageObjects']
self.protocolOptions = factoryDict['ProtocolOptions']
self.turlProtocols = factoryDict['TurlProtocols']
for storage in self.storages:
storage.setStorageElement( self )
self.log = gLogger.getSubLogger( "SE[%s]" % self.name )
self.useCatalogURL = gConfig.getValue( '/Resources/StorageElements/%s/UseCatalogURL' % self.name, False )
# 'getTransportURL',
self.readMethods = [ 'getFile',
'prestageFile',
'prestageFileStatus',
'getDirectory']
self.writeMethods = [ 'retransferOnlineFile',
'putFile',
'replicateFile',
'pinFile',
'releaseFile',
'createDirectory',
'putDirectory' ]
self.removeMethods = [ 'removeFile', 'removeDirectory' ]
self.checkMethods = [ 'exists',
'getDirectoryMetadata',
'getDirectorySize',
'getFileSize',
'getFileMetadata',
'listDirectory',
'isDirectory',
'isFile',
]
self.okMethods = [ 'getLocalProtocols',
'getProtocols',
'getRemoteProtocols',
'getStorageElementName',
'getStorageParameters',
'getTransportURL',
'isLocalSE' ]
self.__fileCatalog = None
def dump( self ):
""" Dump to the logger a summary of the StorageElement items. """
log = self.log.getSubLogger( 'dump', True )
log.verbose( "Preparing dump for StorageElement %s." % self.name )
if not self.valid:
log.debug( "Failed to create StorageElement plugins.", self.errorReason )
return
i = 1
outStr = "\n\n============ Options ============\n"
for key in sorted( self.options ):
outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), self.options[key] )
for storage in self.storages:
outStr = "%s============Protocol %s ============\n" % ( outStr, i )
storageParameters = storage.getParameters()
for key in sorted( storageParameters ):
outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), storageParameters[key] )
i = i + 1
log.verbose( outStr )
#################################################################################################
#
# These are the basic get functions for storage configuration
#
def getStorageElementName( self ):
""" SE name getter """
self.log.getSubLogger( 'getStorageElementName' ).verbose( "The Storage Element name is %s." % self.name )
return S_OK( self.name )
def getChecksumType( self ):
""" get local /Resources/StorageElements/SEName/ChecksumType option if defined, otherwise
global /Resources/StorageElements/ChecksumType
"""
self.log.getSubLogger( 'getChecksumType' ).verbose( "get checksum type for %s." % self.name )
return S_OK( str( gConfig.getValue( "/Resources/StorageElements/ChecksumType", "ADLER32" ) ).upper()
if "ChecksumType" not in self.options else str( self.options["ChecksumType"] ).upper() )
def getStatus( self ):
"""
Return Status of the SE, a dictionary with:
- Read: True (is allowed), False (it is not allowed)
- Write: True (is allowed), False (it is not allowed)
- Remove: True (is allowed), False (it is not allowed)
- Check: True (is allowed), False (it is not allowed).
NB: Check always allowed IF Read is allowed (regardless of what set in the Check option of the configuration)
- DiskSE: True if TXDY with Y > 0 (defaults to True)
- TapeSE: True if TXDY with X > 0 (defaults to False)
- TotalCapacityTB: float (-1 if not defined)
- DiskCacheTB: float (-1 if not defined)
"""
self.log.getSubLogger( 'getStatus' ).verbose( "determining status of %s." % self.name )
retDict = {}
if not self.valid:
retDict['Read'] = False
retDict['Write'] = False
retDict['Remove'] = False
retDict['Check'] = False
retDict['DiskSE'] = False
retDict['TapeSE'] = False
retDict['TotalCapacityTB'] = -1
retDict['DiskCacheTB'] = -1
return S_OK( retDict )
# If nothing is defined in the CS Access is allowed
# If something is defined, then it must be set to Active
retDict['Read'] = not ( 'ReadAccess' in self.options and self.options['ReadAccess'] not in ( 'Active', 'Degraded' ) )
retDict['Write'] = not ( 'WriteAccess' in self.options and self.options['WriteAccess'] not in ( 'Active', 'Degraded' ) )
retDict['Remove'] = not ( 'RemoveAccess' in self.options and self.options['RemoveAccess'] not in ( 'Active', 'Degraded' ) )
if retDict['Read']:
retDict['Check'] = True
else:
retDict['Check'] = not ( 'CheckAccess' in self.options and self.options['CheckAccess'] not in ( 'Active', 'Degraded' ) )
diskSE = True
tapeSE = False
if 'SEType' in self.options:
# Type should follow the convention TXDY
seType = self.options['SEType']
diskSE = re.search( 'D[1-9]', seType ) != None
tapeSE = re.search( 'T[1-9]', seType ) != None
retDict['DiskSE'] = diskSE
retDict['TapeSE'] = tapeSE
try:
retDict['TotalCapacityTB'] = float( self.options['TotalCapacityTB'] )
except Exception:
retDict['TotalCapacityTB'] = -1
try:
retDict['DiskCacheTB'] = float( self.options['DiskCacheTB'] )
except Exception:
retDict['DiskCacheTB'] = -1
return S_OK( retDict )
def isValid( self, operation = '' ):
""" check CS/RSS statuses for :operation:
:param str operation: operation name
"""
log = self.log.getSubLogger( 'isValid', True )
log.verbose( "Determining if the StorageElement %s is valid for VO %s" % ( self.name, self.vo ) )
if not self.valid:
log.debug( "Failed to create StorageElement plugins.", self.errorReason )
return S_ERROR( "SE.isValid: Failed to create StorageElement plugins: %s" % self.errorReason )
# Check if the Storage Element is eligible for the user's VO
if 'VO' in self.options and not self.vo in self.options['VO']:
log.debug( "StorageElement is not allowed for VO", self.vo )
return DError( errno.EACCES, "StorageElement.isValid: StorageElement is not allowed for VO" )
log.verbose( "Determining if the StorageElement %s is valid for %s" % ( self.name, operation ) )
if ( not operation ) or ( operation in self.okMethods ):
return S_OK()
# Determine whether the StorageElement is valid for checking, reading, writing
res = self.getStatus()
if not res[ 'OK' ]:
log.debug( "Could not call getStatus", res['Message'] )
return S_ERROR( "SE.isValid could not call the getStatus method" )
checking = res[ 'Value' ][ 'Check' ]
reading = res[ 'Value' ][ 'Read' ]
writing = res[ 'Value' ][ 'Write' ]
removing = res[ 'Value' ][ 'Remove' ]
# Determine whether the requested operation can be fulfilled
if ( not operation ) and ( not reading ) and ( not writing ) and ( not checking ):
log.debug( "Read, write and check access not permitted." )
return DError( errno.EACCES, "SE.isValid: Read, write and check access not permitted." )
# The supplied operation can be 'Read','Write' or any of the possible StorageElement methods.
if ( operation in self.readMethods ) or ( operation.lower() in ( 'read', 'readaccess' ) ):
operation = 'ReadAccess'
elif operation in self.writeMethods or ( operation.lower() in ( 'write', 'writeaccess' ) ):
operation = 'WriteAccess'
elif operation in self.removeMethods or ( operation.lower() in ( 'remove', 'removeaccess' ) ):
operation = 'RemoveAccess'
elif operation in self.checkMethods or ( operation.lower() in ( 'check', 'checkaccess' ) ):
operation = 'CheckAccess'
else:
log.debug( "The supplied operation is not known.", operation )
return DError( DErrno.ENOMETH , "SE.isValid: The supplied operation is not known." )
log.debug( "check the operation: %s " % operation )
# Check if the operation is valid
if operation == 'CheckAccess':
if not reading:
if not checking:
log.debug( "Check access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Check access not currently permitted." )
if operation == 'ReadAccess':
if not reading:
log.debug( "Read access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Read access not currently permitted." )
if operation == 'WriteAccess':
if not writing:
log.debug( "Write access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Write access not currently permitted." )
if operation == 'RemoveAccess':
if not removing:
log.debug( "Remove access not currently permitted." )
return DError( errno.EACCES, "SE.isValid: Remove access not currently permitted." )
return S_OK()
def getPlugins( self ):
""" Get the list of all the plugins defined for this Storage Element
"""
self.log.getSubLogger( 'getPlugins' ).verbose( "Obtaining all plugins of %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
allPlugins = self.localPlugins + self.remotePlugins
return S_OK( allPlugins )
def getRemotePlugins( self ):
""" Get the list of all the remote access protocols defined for this Storage Element
"""
self.log.getSubLogger( 'getRemotePlugins' ).verbose( "Obtaining remote protocols for %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
return S_OK( self.remotePlugins )
def getLocalPlugins( self ):
""" Get the list of all the local access protocols defined for this Storage Element
"""
self.log.getSubLogger( 'getLocalPlugins' ).verbose( "Obtaining local protocols for %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
return S_OK( self.localPlugins )
def getStorageParameters( self, plugin ):
""" Get plugin specific options
:param plugin : plugin we are interested in
"""
log = self.log.getSubLogger( 'getStorageParameters' )
log.verbose( "Obtaining storage parameters for %s plugin %s." % ( self.name,
plugin ) )
res = self.getPlugins()
if not res['OK']:
return res
availablePlugins = res['Value']
if not plugin in availablePlugins:
errStr = "Requested plugin not available for SE."
log.debug( errStr, '%s for %s' % ( plugin, self.name ) )
return S_ERROR( errStr )
for storage in self.storages:
storageParameters = storage.getParameters()
if storageParameters['PluginName'] == plugin:
return S_OK( storageParameters )
errStr = "Requested plugin supported but no object found."
log.debug( errStr, "%s for %s" % ( plugin, self.name ) )
return S_ERROR( errStr )
def negociateProtocolWithOtherSE( self, sourceSE, protocols = None ):
""" Negotiate what protocol could be used for a third party transfer
between the sourceSE and ourselves. If protocols is given,
the chosen protocol has to be among those
:param sourceSE : storageElement instance of the sourceSE
:param protocols: protocol restriction list
:return a list protocols that fits the needs, or None
"""
# We should actually separate source and destination protocols
# For example, an SRM can get as a source an xroot or gsiftp url...
# but with the current implementation, we get only srm
destProtocols = set( [destStorage.protocolParameters['Protocol'] for destStorage in self.storages] )
sourceProtocols = set( [sourceStorage.protocolParameters['Protocol'] for sourceStorage in sourceSE.storages] )
commonProtocols = destProtocols & sourceProtocols
if protocols:
protocols = set( list( protocols ) ) if protocols else set()
commonProtocols = commonProtocols & protocols
return S_OK( list( commonProtocols ) )
#################################################################################################
#
# These are the basic get functions for lfn manipulation
#
def __getURLPath( self, url ):
""" Get the part of the URL path below the basic storage path.
This path must coincide with the LFN of the file in order to be compliant with the DIRAC conventions.
"""
log = self.log.getSubLogger( '__getURLPath' )
log.verbose( "Getting path from url in %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
res = pfnparse( url )
if not res['OK']:
return res
fullURLPath = '%s/%s' % ( res['Value']['Path'], res['Value']['FileName'] )
# Check all available storages and check whether the url is for that protocol
urlPath = ''
for storage in self.storages:
res = storage.isNativeURL( url )
if res['OK']:
if res['Value']:
parameters = storage.getParameters()
saPath = parameters['Path']
if not saPath:
# If the sa path doesn't exist then the url path is the entire string
urlPath = fullURLPath
else:
if re.search( saPath, fullURLPath ):
# Remove the sa path from the fullURLPath
urlPath = fullURLPath.replace( saPath, '' )
if urlPath:
return S_OK( urlPath )
# This should never happen. DANGER!!
errStr = "Failed to get the url path for any of the protocols!!"
log.debug( errStr )
return S_ERROR( errStr )
def getLFNFromURL( self, urls ):
""" Get the LFN from the PFNS .
:param lfn : input lfn or lfns (list/dict)
"""
result = checkArgumentFormat( urls )
if result['OK']:
urlDict = result['Value']
else:
errStr = "Supplied urls must be string, list of strings or a dictionary."
self.log.getSubLogger( 'getLFNFromURL' ).debug( errStr )
return DError( errno.EINVAL, errStr )
retDict = { "Successful" : {}, "Failed" : {} }
for url in urlDict:
res = self.__getURLPath( url )
if res["OK"]:
retDict["Successful"][url] = res["Value"]
else:
retDict["Failed"][url] = res["Message"]
return S_OK( retDict )
###########################################################################################
#
# This is the generic wrapper for file operations
#
def getURL( self, lfn, protocol = False, replicaDict = None ):
""" execute 'getTransportURL' operation.
:param str lfn: string, list or dictionary of lfns
:param protocol: if no protocol is specified, we will request self.turlProtocols
:param replicaDict: optional results from the File Catalog replica query
"""
self.log.getSubLogger( 'getURL' ).verbose( "Getting accessUrl %s for lfn in %s." % ( "(%s)" % protocol if protocol else "", self.name ) )
if not protocol:
protocols = self.turlProtocols
elif type( protocol ) is ListType:
protocols = protocol
elif type( protocol ) == type( '' ):
protocols = [protocol]
self.methodName = "getTransportURL"
result = self.__executeMethod( lfn, protocols = protocols )
return result
def __isLocalSE( self ):
""" Test if the Storage Element is local in the current context
"""
self.log.getSubLogger( 'LocalSE' ).verbose( "Determining whether %s is a local SE." % self.name )
import DIRAC
localSEs = getSEsForSite( DIRAC.siteName() )['Value']
if self.name in localSEs:
return S_OK( True )
else:
return S_OK( False )
def __getFileCatalog( self ):
if not self.__fileCatalog:
self.__fileCatalog = FileCatalog( vo = self.vo )
return self.__fileCatalog
def __generateURLDict( self, lfns, storage, replicaDict = {} ):
""" Generates a dictionary (url : lfn ), where the url are constructed
from the lfn using the constructURLFromLFN method of the storage plugins.
:param: lfns : dictionary {lfn:whatever}
:returns dictionary {constructed url : lfn}
"""
log = self.log.getSubLogger( "__generateURLDict" )
log.verbose( "generating url dict for %s lfn in %s." % ( len( lfns ), self.name ) )
urlDict = {} # url : lfn
failed = {} # lfn : string with errors
for lfn in lfns:
if self.useCatalogURL:
# Is this self.name alias proof?
url = replicaDict.get( lfn, {} ).get( self.name, '' )
if url:
urlDict[url] = lfn
continue
else:
fc = self.__getFileCatalog()
result = fc.getReplicas()
if not result['OK']:
failed[lfn] = result['Message']
url = result['Value']['Successful'].get( lfn, {} ).get( self.name, '' )
if not url:
failed[lfn] = 'Failed to get catalog replica'
else:
# Update the URL according to the current SE description
result = returnSingleResult( storage.updateURL( url ) )
if not result['OK']:
failed[lfn] = result['Message']
else:
urlDict[result['Value']] = lfn
else:
result = storage.constructURLFromLFN( lfn, withWSUrl = True )
if not result['OK']:
errStr = result['Message']
log.debug( errStr, 'for %s' % ( lfn ) )
failed[lfn] = "%s %s" % ( failed[lfn], errStr ) if lfn in failed else errStr
else:
urlDict[result['Value']] = lfn
res = S_OK( {'Successful': urlDict, 'Failed' : failed} )
# res['Failed'] = failed
return res
def __executeMethod( self, lfn, *args, **kwargs ):
""" Forward the call to each storage in turn until one works.
The method to be executed is stored in self.methodName
:param lfn : string, list or dictionnary
:param *args : variable amount of non-keyword arguments. SHOULD BE EMPTY
:param **kwargs : keyword arguments
:returns S_OK( { 'Failed': {lfn : reason} , 'Successful': {lfn : value} } )
The Failed dict contains the lfn only if the operation failed on all the storages
The Successful dict contains the value returned by the successful storages.
"""
removedArgs = {}
log = self.log.getSubLogger( '__executeMethod' )
log.verbose( "preparing the execution of %s" % ( self.methodName ) )
# args should normaly be empty to avoid problem...
if len( args ):
log.verbose( "args should be empty!%s" % args )
# because there is normally only one kw argument, I can move it from args to kwargs
methDefaultArgs = StorageElementItem.__defaultsArguments.get( self.methodName, {} ).keys()
if len( methDefaultArgs ):
kwargs[methDefaultArgs[0] ] = args[0]
args = args[1:]
log.verbose( "put it in kwargs, but dirty and might be dangerous!args %s kwargs %s" % ( args, kwargs ) )
# We check the deprecated arguments
for depArg in StorageElementItem.__deprecatedArguments:
if depArg in kwargs:
log.verbose( "%s is not an allowed argument anymore. Please change your code!" % depArg )
removedArgs[depArg] = kwargs[depArg]
del kwargs[depArg]
# Set default argument if any
methDefaultArgs = StorageElementItem.__defaultsArguments.get( self.methodName, {} )
for argName in methDefaultArgs:
if argName not in kwargs:
log.debug( "default argument %s for %s not present.\
Setting value %s." % ( argName, self.methodName, methDefaultArgs[argName] ) )
kwargs[argName] = methDefaultArgs[argName]
res = checkArgumentFormat( lfn )
if not res['OK']:
errStr = "Supplied lfns must be string, list of strings or a dictionary."
log.debug( errStr )
return res
lfnDict = res['Value']
log.verbose( "Attempting to perform '%s' operation with %s lfns." % ( self.methodName, len( lfnDict ) ) )
res = self.isValid( operation = self.methodName )
if not res['OK']:
return res
else:
if not self.valid:
return S_ERROR( self.errorReason )
successful = {}
failed = {}
localSE = self.__isLocalSE()['Value']
# Try all of the storages one by one
for storage in self.storages:
# Determine whether to use this storage object
storageParameters = storage.getParameters()
if not storageParameters:
log.debug( "Failed to get storage parameters.", "%s %s" % ( self.name, res['Message'] ) )
continue
pluginName = storageParameters['PluginName']
if not lfnDict:
log.debug( "No lfns to be attempted for %s protocol." % pluginName )
continue
if not ( pluginName in self.remotePlugins ) and not localSE and not storage.pluginName == "Proxy":
# If the SE is not local then we can't use local protocols
log.debug( "Local protocol not appropriate for remote use: %s." % pluginName )
continue
log.verbose( "Generating %s protocol URLs for %s." % ( len( lfnDict ), pluginName ) )
replicaDict = kwargs.pop( 'replicaDict', {} )
if storage.pluginName != "Proxy":
res = self.__generateURLDict( lfnDict, storage, replicaDict = replicaDict )
urlDict = res['Value']['Successful'] # url : lfn
failed.update( res['Value']['Failed'] )
else:
urlDict = dict( [ ( lfn, lfn ) for lfn in lfnDict ] )
if not len( urlDict ):
log.verbose( "__executeMethod No urls generated for protocol %s." % pluginName )
else:
log.verbose( "Attempting to perform '%s' for %s physical files" % ( self.methodName, len( urlDict ) ) )
fcn = None
if hasattr( storage, self.methodName ) and callable( getattr( storage, self.methodName ) ):
fcn = getattr( storage, self.methodName )
if not fcn:
return DError( DErrno.ENOMETH, "SE.__executeMethod: unable to invoke %s, it isn't a member function of storage" )
urlsToUse = {} # url : the value of the lfn dictionary for the lfn of this url
for url in urlDict:
urlsToUse[url] = lfnDict[urlDict[url]]
startDate = datetime.datetime.utcnow()
startTime = time.time()
res = fcn( urlsToUse, *args, **kwargs )
elapsedTime = time.time() - startTime
self.addAccountingOperation( urlsToUse, startDate, elapsedTime, storageParameters, res )
if not res['OK']:
errStr = "Completely failed to perform %s." % self.methodName
log.debug( errStr, 'with plugin %s: %s' % ( pluginName, res['Message'] ) )
for lfn in urlDict.values():
if lfn not in failed:
failed[lfn] = ''
failed[lfn] = "%s %s" % ( failed[lfn], res['Message'] ) if failed[lfn] else res['Message']
else:
for url, lfn in urlDict.items():
if url not in res['Value']['Successful']:
if lfn not in failed:
failed[lfn] = ''
if url in res['Value']['Failed']:
self.log.debug( res['Value']['Failed'][url] )
failed[lfn] = "%s %s" % ( failed[lfn], res['Value']['Failed'][url] ) if failed[lfn] else res['Value']['Failed'][url]
else:
errStr = 'No error returned from plug-in'
failed[lfn] = "%s %s" % ( failed[lfn], errStr ) if failed[lfn] else errStr
else:
successful[lfn] = res['Value']['Successful'][url]
if lfn in failed:
failed.pop( lfn )
lfnDict.pop( lfn )
gDataStoreClient.commit()
return S_OK( { 'Failed': failed, 'Successful': successful } )
def __getattr__( self, name ):
""" Forwards the equivalent Storage calls to __executeMethod"""
# We take either the equivalent name, or the name itself
self.methodName = StorageElementItem.__equivalentMethodNames.get( name, None )
if self.methodName:
return self.__executeMethod
raise AttributeError( "StorageElement does not have a method '%s'" % name )
def addAccountingOperation( self, lfns, startDate, elapsedTime, storageParameters, callRes ):
"""
Generates a DataOperation accounting if needs to be, and adds it to the DataStore client cache
:param lfns : list of lfns on which we attempted the operation
:param startDate : datetime, start of the operation
:param elapsedTime : time (seconds) the operation took
:param storageParameters : the parameters of the plugins used to perform the operation
:param callRes : the return of the method call, S_OK or S_ERROR
The operation is generated with the OperationType "se.methodName"
The TransferSize and TransferTotal for directory methods actually take into
account the files inside the directory, and not the amount of directory given
as parameter
"""
if self.methodName not in ( self.readMethods + self.writeMethods + self.removeMethods ):
return
baseAccountingDict = {}
baseAccountingDict['OperationType'] = 'se.%s' % self.methodName
baseAccountingDict['User'] = getProxyInfo().get( 'Value', {} ).get( 'username', 'unknown' )
baseAccountingDict['RegistrationTime'] = 0.0
baseAccountingDict['RegistrationOK'] = 0
baseAccountingDict['RegistrationTotal'] = 0
# if it is a get method, then source and destination of the transfer should be inverted
if self.methodName in ( 'putFile', 'getFile' ):
baseAccountingDict['Destination'] = siteName()
baseAccountingDict[ 'Source'] = self.name
else:
baseAccountingDict['Destination'] = self.name
baseAccountingDict['Source'] = siteName()
baseAccountingDict['TransferTotal'] = 0
baseAccountingDict['TransferOK'] = 0
baseAccountingDict['TransferSize'] = 0
baseAccountingDict['TransferTime'] = 0.0
baseAccountingDict['FinalStatus'] = 'Successful'
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( baseAccountingDict )
oDataOperation.setStartTime( startDate )
oDataOperation.setEndTime( startDate + datetime.timedelta( seconds = elapsedTime ) )
oDataOperation.setValueByKey( 'TransferTime', elapsedTime )
oDataOperation.setValueByKey( 'Protocol', storageParameters.get( 'Protocol', 'unknown' ) )
if not callRes['OK']:
# Everything failed
oDataOperation.setValueByKey( 'TransferTotal', len( lfns ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
else:
succ = callRes.get( 'Value', {} ).get( 'Successful', {} )
failed = callRes.get( 'Value', {} ).get( 'Failed', {} )
totalSize = 0
# We don't take len(lfns) in order to make two
# separate entries in case of few failures
totalSucc = len( succ )
if self.methodName in ( 'putFile', 'getFile' ):
# putFile and getFile return for each entry
# in the successful dir the size of the corresponding file
totalSize = sum( succ.values() )
elif self.methodName in ( 'putDirectory', 'getDirectory' ):
# putDirectory and getDirectory return for each dir name
# a dictionnary with the keys 'Files' and 'Size'
totalSize = sum( val.get( 'Size', 0 ) for val in succ.values() if isinstance( val, dict ) )
totalSucc = sum( val.get( 'Files', 0 ) for val in succ.values() if isinstance( val, dict ) )
oDataOperation.setValueByKey( 'TransferOK', len( succ ) )
oDataOperation.setValueByKey( 'TransferSize', totalSize )
oDataOperation.setValueByKey( 'TransferTotal', totalSucc )
oDataOperation.setValueByKey( 'TransferOK', totalSucc )
if callRes['Value']['Failed']:
oDataOperationFailed = copy.deepcopy( oDataOperation )
oDataOperationFailed.setValueByKey( 'TransferTotal', len( failed ) )
oDataOperationFailed.setValueByKey( 'TransferOK', 0 )
oDataOperationFailed.setValueByKey( 'TransferSize', 0 )
oDataOperationFailed.setValueByKey( 'FinalStatus', 'Failed' )
accRes = gDataStoreClient.addRegister( oDataOperationFailed )
if not accRes['OK']:
self.log.error( "Could not send failed accounting report", accRes['Message'] )
accRes = gDataStoreClient.addRegister( oDataOperation )
if not accRes['OK']:
self.log.error( "Could not send accounting report", accRes['Message'] )
StorageElement = StorageElementCache()
| gpl-3.0 | 3,227,499,265,825,615,000 | 40.34965 | 141 | 0.621681 | false |
lukasmartinelli/py14 | py14/scope.py | 1 | 2384 | import ast
from contextlib import contextmanager
def add_scope_context(node):
"""Provide to scope context to all nodes"""
return ScopeTransformer().visit(node)
class ScopeMixin(object):
"""
Adds a scope property with the current scope (function, module)
a node is part of.
"""
scopes = []
@contextmanager
def enter_scope(self, node):
if self._is_scopable_node(node):
self.scopes.append(node)
yield
self.scopes.pop()
else:
yield
@property
def scope(self):
try:
return self.scopes[-1]
except IndexError:
return None
def _is_scopable_node(self, node):
scopes = [ast.Module, ast.FunctionDef, ast.For, ast.If, ast.With]
return len([s for s in scopes if isinstance(node, s)]) > 0
class ScopeList(list):
"""
Wraps around list of scopes and provides find method for finding
the definition of a variable
"""
def find(self, lookup):
"""Find definition of variable lookup."""
def is_match(var):
return ((isinstance(var, ast.alias) and var.name == lookup) or
(isinstance(var, ast.Name) and var.id == lookup))
def find_definition(scope, var_attr="vars"):
for var in getattr(scope, var_attr):
if is_match(var):
return var
for scope in self:
defn = find_definition(scope)
if not defn and hasattr(scope, "body_vars"):
defn = find_definition(scope, "body_vars")
if not defn and hasattr(scope, "orelse_vars"):
defn = find_definition(scope, "orelse_vars")
if defn:
return defn
def find_import(self, lookup):
for scope in reversed(self):
if hasattr(scope, "imports"):
for imp in scope.imports:
if imp.name == lookup:
return imp
class ScopeTransformer(ast.NodeTransformer, ScopeMixin):
"""
Adds a scope attribute to each node.
The scope contains the current scope (function, module, for loop)
a node is part of.
"""
def visit(self, node):
with self.enter_scope(node):
node.scopes = ScopeList(self.scopes)
return super(ScopeTransformer, self).visit(node)
| mit | 1,515,808,432,834,027,300 | 28.432099 | 74 | 0.570889 | false |
deter-project/magi | magi/tests/023_multicastNetworkTestServer.py | 1 | 1888 | #!/usr/bin/env python
import unittest2
import logging
import time
from magi.messaging.magimessage import MAGIMessage
from magi.messaging.transportMulticast import MulticastTransport
from magi.testbed import testbed
from magi.messaging.api import Messenger
class TransportTest(unittest2.TestCase):
"""
Testing of basics in TCPTransport class
"""
def setUp(self):
#TODO: Test needs to be fixed
return
self.messenger = Messenger("testmessenger")
self.conn = MulticastTransport('239.255.1.1', 18808, testbed.controlip)
self.messenger.addTransport(self.conn, True)
self.messenger.join('multicastgroup', 'tester')
self.msgid = 1234
def sendMsg(self):
self.msgid += 1
msg = MAGIMessage()
msg.msgid = self.msgid
msg.contenttype = MAGIMessage.NONE
msg.src = "servernode"
msg.srcdock = "serverdock"
msg.dstgroups = ['multicastgroup']
msg.data = "success"
msg._routed = [self.conn.fileno()]
self.messenger.thread.sendDirect(msg)
while self.messenger.thread.pollMap[self.conn.fileno()].outmessages:
time.sleep(0.2) #waiting for message to be sent
def test_BasicRequest(self):
""" Testing multicast transport - Server """
#TODO: Test needs to be fixed
return
msg = self.messenger.nextMessage(block=True)
self.assertEqual(msg.src, "clientnode", "Source error, Excepted: clientnode, Received: " + msg.src)
self.assertEqual(msg.srcdock, "clientdock", "Dock error, Excepted: clientdock, Received: " + msg.srcdock)
self.assertEqual(msg.data, "testing", "Data error, Excepted: testing, Received: " + msg.data)
self.sendMsg()
if __name__ == '__main__':
hdlr = logging.StreamHandler()
hdlr.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s', '%m-%d %H:%M:%S'))
root = logging.getLogger()
root.handlers = []
root.addHandler(hdlr)
root.setLevel(logging.DEBUG)
unittest2.main(verbosity=2)
| gpl-2.0 | -2,350,573,303,916,748,000 | 29.95082 | 110 | 0.723517 | false |
faroit/loudness | python/tests/test_FrameGenerator.py | 1 | 1471 | import numpy as np
import loudness as ln
fs = 32000
N = 10000
x = np.arange(0, N)
# Input SignalBank
bufSize = 32
nEars = 2
nChannels = 1
inputBank = ln.SignalBank()
inputBank.initialize(nEars, nChannels, bufSize, int(fs))
# Frame generator
frameSize = 2048
hopSize = 32
startAtWindowCentre = True
frameGen = ln.FrameGenerator(frameSize, hopSize, startAtWindowCentre)
frameGen.initialize(inputBank)
outputBank = frameGen.getOutput()
nBlocks = int(x.size / bufSize)
if startAtWindowCentre:
nProcessedBlocks = int(nBlocks - 0.5 * frameSize / hopSize + 1)
else:
nProcessedBlocks = int(nBlocks - frameSize / hopSize + 1)
frames = np.zeros((nEars, nProcessedBlocks, frameSize))
frameIndex = 0
for block in range(nBlocks):
# Update input bank
idx = block * bufSize
inputBank.setSignal(0, 0, x[idx:idx + bufSize])
inputBank.setSignal(1, 0, x[idx:idx + bufSize])
# process it
frameGen.process(inputBank)
# get output
if(outputBank.getTrig()):
frames[:, frameIndex, :] = outputBank.getSignals().reshape((2, frameSize))
frameIndex += 1
# Check frames are correct
if startAtWindowCentre:
x = np.hstack((np.zeros(np.ceil((frameSize - 1) / 2.0)), x))
for ear in range(nEars):
for i, frame in enumerate(frames[ear]):
start = i * hopSize
if all(frame == x[start:start + frameSize]):
print("Frame number %d correct" % i)
else:
print("Frame number %d incorrect" % i)
| gpl-3.0 | -7,065,117,697,718,872,000 | 25.745455 | 82 | 0.674371 | false |
robinkraft/cloudless | src/cloudless/train/predict.py | 1 | 7335 | import os
import caffe
import numpy as np
import plyvel
import skimage
from caffe_pb2 import Datum
import constants
def predict(image_path):
"""
Takes a single image, and makes a prediction whether it has a cloud or not.
"""
print "Generating prediction for %s..." % image_path
net, transformer = _initialize_caffe()
im = caffe.io.load_image(image_path)
prob = _predict_image(im, net, transformer)
print "Probability this image has a cloud: {0:.2f}%".format(prob)
def test_validation():
"""
Takes validation images and runs them through a trained model to see how
well they do. Generates statistics like precision and recall, F1, and a confusion matrix,
in order to gauge progress.
"""
print "Generating predictions for validation images..."
validation_data = _load_validation_data()
target_details = _run_through_caffe(validation_data)
statistics = _calculate_positives_negatives(target_details)
accuracy = _calculate_accuracy(statistics)
precision = _calculate_precision(statistics)
recall = _calculate_recall(statistics)
f1 = _calculate_f1(precision, recall)
# TODO: Write these out to a file as well as the screen.
results = ""
results += "\n"
results += "\nStatistics on validation dataset using threshold %f:" % constants.THRESHOLD
results += "\n\tAccuracy: {0:.2f}%".format(accuracy)
results += "\n\tPrecision: %.2f" % precision
results += "\n\tRecall: %.2f" % recall
results += "\n\tF1 Score: %.2f" % f1
results += "\n"
results += _print_confusion_matrix(statistics)
print results
with open(constants.OUTPUT_LOG_PREFIX + ".statistics.txt", "w") as f:
f.write(results)
def _load_validation_data():
"""
Loads all of our validation data from our leveldb database, producing unrolled numpy input
vectors ready to test along with their correct, expected target values.
"""
print "\tLoading validation data..."
input_vectors = []
expected_targets = []
db = plyvel.DB(constants.VALIDATION_FILE)
for key, value in db:
datum = Datum()
datum.ParseFromString(value)
data = np.fromstring(datum.data, dtype=np.uint8)
data = np.reshape(data, (3, constants.HEIGHT, constants.WIDTH))
# Move the color channel to the end to match what Caffe wants.
data = np.swapaxes(data, 0, 2) # Swap channel with width.
data = np.swapaxes(data, 0, 1) # Swap width with height, to yield final h x w x channel.
input_vectors.append(data)
expected_targets.append(datum.label)
db.close()
print "\t\tValidation data has %d images" % len(input_vectors)
return {
"input_vectors": np.asarray(input_vectors),
"expected_targets": np.asarray(expected_targets)
}
def _initialize_caffe():
"""
Initializes Caffe to prepare to run some data through the model for inference.
"""
caffe.set_mode_gpu()
net = caffe.Net(constants.DEPLOY_FILE, constants.WEIGHTS_FINETUNED, caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape})
# PIL.Image loads the data with the channel last.
# TODO: Think through whether these should be BGR during training and validation.
transformer.set_transpose("data", (2, 0, 1))
# Mean pixel.
transformer.set_mean("data", np.load(constants.TRAINING_MEAN_PICKLE).mean(1).mean(1))
# The reference model operates on images in [0, 255] range instead of [0, 1].
transformer.set_raw_scale("data", 255)
# The reference model has channels in BGR order instead of RGB.
transformer.set_channel_swap("data", (2, 1, 0))
net.blobs["data"].reshape(1, 3, constants.INFERENCE_HEIGHT, constants.INFERENCE_WIDTH)
return (net, transformer)
def _run_through_caffe(validation_data):
"""
Runs our validation images through Caffe.
"""
print "\tInitializing Caffe..."
net, transformer = _initialize_caffe()
print "\tComputing probabilities using Caffe..."
results = []
for idx in range(len(validation_data["input_vectors"])):
im = validation_data["input_vectors"][idx]
prob = _predict_image(im, net, transformer)
expected_target = validation_data["expected_targets"][idx]
predicted_target = 0
if prob >= constants.THRESHOLD:
predicted_target = 1
results.append({
"expected_target": expected_target,
"predicted_target": predicted_target
})
return results
def _predict_image(im, net, transformer):
"""
Given a caffe.io.load_image, returns the probability that it contains a cloud.
"""
net.blobs["data"].data[...] = transformer.preprocess("data", im)
out = net.forward()
probs = out["prob"][0]
prob_cloud = probs[1] * 100.0
return prob_cloud
def _calculate_positives_negatives(target_details):
"""
Takes expected and actual target values, generating true and false positives and negatives,
including the actual correct # of positive and negative values.
"""
true_positive = 0
true_negative = 0
false_negative = 0
false_positive = 0
actual_positive = 0
actual_negative = 0
for idx in range(len(target_details)):
predicted_target = target_details[idx]["predicted_target"]
expected_target = target_details[idx]["expected_target"]
if expected_target == 1:
actual_positive = actual_positive + 1
else:
actual_negative = actual_negative + 1
if predicted_target == 1 and expected_target == 1:
true_positive = true_positive + 1
elif predicted_target == 0 and expected_target == 0:
true_negative = true_negative + 1
elif predicted_target == 1 and expected_target == 0:
false_positive = false_positive + 1
elif predicted_target == 0 and expected_target == 1:
false_negative = false_negative + 1
return {
"true_positive": float(true_positive),
"false_positive": float(false_positive),
"actual_positive": float(actual_positive),
"true_negative": float(true_negative),
"false_negative": float(false_negative),
"actual_negative": float(actual_negative),
}
def _calculate_accuracy(s):
top = (s["true_positive"] + s["true_negative"])
bottom = (s["actual_positive"] + s["actual_negative"])
return (top / bottom) * 100.0
def _calculate_precision(s):
return s["true_positive"] / (s["true_positive"] + s["false_positive"])
def _calculate_recall(s):
return s["true_positive"] / (s["true_positive"] + s["false_negative"])
def _calculate_f1(precision, recall):
return 2.0 * ((precision * recall) / (precision + recall))
def _print_confusion_matrix(s):
results = ""
results += "\nConfusion matrix:"
results += "\n\t\t\t\tPositive\t\tNegative"
results += "\nPositive (%d)\t\t\tTrue Positive (%d)\tFalse Positive (%d)" % \
(s["actual_positive"], s["true_positive"], s["false_positive"])
results += "\nNegative (%d)\t\t\tFalse Negative (%d)\tTrue Negative (%d)" % \
(s["actual_negative"], s["false_negative"], s["true_negative"])
return results
| apache-2.0 | -1,540,303,570,774,137,900 | 33.763033 | 96 | 0.647307 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.