repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sergiorb/askkit | askkit/settings.py | 1 | 12452 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Django settings for askkit project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if 'SECRET_KEY' in os.environ:
SECRET_KEY = os.environ['SECRET_KEY']
else:
SECRET_KEY = 'mysecretkey'
# SECURITY WARNING: don't run with debug turned on in production!
if 'DJANGO_DEBUG' in os.environ:
DEBUG = True
TEMPLATE_DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = False
# SSL settings
if not DEBUG:
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
ADMINS = (('Sergio', '[email protected]'),)
ALLOWED_HOSTS = ['localhost', 'askkit-dev-env.elasticbeanstalk.com', 'askkit-prod-env.elasticbeanstalk.com', 'askkit.net', 'www.askkit.net',]
# Application definition
INSTALLED_APPS = (
#'admin_tools',
#'admin_tools.theming',
#'admin_tools.menu',
#'admin_tools.dashboard',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# The Django sites framework is required
'django.contrib.sites',
'core',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.facebook',
'users',
'questions',
#'debug_toolbar',
'crispy_forms',
#'rest_framework',
#'betterforms',
'datetimewidget',
'redactor',
'imagekit',
'captcha',
'django_ses',
'storages',
'admin_honeypot',
'compressor',
'djangosecure',
'sanitizer',
)
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'core.middleware.LocaleMiddleware',
'htmlmin.middleware.HtmlMinifyMiddleware',
'htmlmin.middleware.MarkRequestMiddleware',
)
ROOT_URLCONF = 'askkit.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# `allauth` needs this from django
'django.core.context_processors.request',
# `allauth` specific context processors
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'core.context_processors.common_timezones',
'core.context_processors.debug',
'core.context_processors.get_adsense_user',
'core.context_processors.get_adsense_main',
'core.context_processors.get_adsense_yes',
'core.context_processors.get_analytics_id',
#'core.context_processors.current_timezone',
'django.template.context_processors.i18n',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
WSGI_APPLICATION = 'askkit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
#LANGUAGE_COOKIE_NAME = 'askkit_language'
LANGUAGES = (
('en', _('English')),
#('es', _('Spanish')),
#('it', _('Italian')),
#('fr', _('French')),
#('de', _('German')),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "files", "static"),
)
STATIC_URL = '/static/'
#STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "files", "media")
############################################################################################
### COMPRESS CONFIG ########################################################################
############################################################################################
COMPRESS_STORAGE = 'custom_storages.StaticStorage'
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = os.path.join(BASE_DIR, "files")
############################################################################################
### AMAZON S3 STORAGES CONFIG ##############################################################
############################################################################################
### AWS4-HMAC-SHA256 ERROR WORKARROUND ###################################
os.environ['S3_USE_SIGV4'] = 'True'
if 'AWS_ACCESS_KEY_ID' in os.environ:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_HOST = 's3.eu-central-1.amazonaws.com'
S3_URL = 'https://%s.%s' % (AWS_STORAGE_BUCKET_NAME, AWS_S3_HOST)
MEDIA_URL = S3_URL + '/media/'
STATIC_URL = S3_URL + '/static/'
### django compress setting
COMPRESS_URL = S3_URL + '/'
############################################################################################
### django-allauth config ##################################################################
############################################################################################
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_ON_GET = True
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', ],
'METHOD': 'oauth2' # instead of 'js_sdk'
}
}
if 'AWS_SES_ACCESS_KEY_ID' in os.environ:
EMAIL_BACKEND = 'django_ses.SESBackend'
DEFAULT_FROM_EMAIL = os.environ['DEFAULT_FROM_EMAIL']
AWS_SES_ACCESS_KEY_ID = os.environ['AWS_SES_ACCESS_KEY_ID']
AWS_SES_SECRET_ACCESS_KEY = os.environ['AWS_SES_SECRET_ACCESS_KEY']
AWS_SES_REGION_NAME = 'eu-west-1'
AWS_SES_REGION_ENDPOINT = 'email.eu-west-1.amazonaws.com'
else:
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = BASE_DIR+'/faked-emails'
############################################################################################
### CRISPY FORMS CONFIG ####################################################################
############################################################################################
CRISPY_TEMPLATE_PACK = 'bootstrap3'
############################################################################################
### REST FRAMEWORK #########################################################################
############################################################################################
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
############################################################################################
### REDACTOR ##############################################################################
############################################################################################
#REDACTOR_UPLOAD_HANDLER = 'redactor.handlers.DateDirectoryUploader'
#REDACTOR_AUTH_DECORATOR = 'django.contrib.auth.decorators.login_required'
#REDACTOR_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
REDACTOR_OPTIONS = {'lang': 'en', 'django_lang': True,}
############################################################################################
### REDACTOR ##############################################################################
############################################################################################
MAX_REPLIES_REGISTERED = 4
############################################################################################
### HTML MINIFY ############################################################################
############################################################################################
if DEBUG:
HTML_MINIFY = False
else:
HTML_MINIFY = True
############################################################################################
### HTML MINIFY ############################################################################
############################################################################################
if 'RECAPTCHA_PUBLIC_KEY' in os.environ:
RECAPTCHA_PUBLIC_KEY = os.environ['RECAPTCHA_PUBLIC_KEY']
RECAPTCHA_PRIVATE_KEY = os.environ['RECAPTCHA_PRIVATE_KEY']
else:
RECAPTCHA_PUBLIC_KEY = ''
RECAPTCHA_PRIVATE_KEY = ''
NOCAPTCHA = True
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
############################################################################################
### ADSENSE SETTINGS #######################################################################
############################################################################################
if 'ADSENSE_YES' in os.environ:
ADSENSE_YES = True
ADSENSE_USER = os.environ['ADSENSE_USER']
ADSENSE_MAIN = os.environ['ADSENSE_MAIN']
else:
ADSENSE_YES = False
ADSENSE_USER = ''
ADSENSE_MAIN = ''
############################################################################################
### ANALYTICS SETTINGS #####################################################################
############################################################################################
G_ANALYTICS_ID = None
if 'G_ANALYTICS_ID' in os.environ:
G_ANALYTICS_ID = os.environ['G_ANALYTICS_ID']
| apache-2.0 | 6,907,194,725,354,861,000 | 31.854881 | 141 | 0.509075 | false |
lucadealfaro/cis-crowd | models/db.py | 1 | 8747 | # -*- coding: utf-8 -*-
import urllib
from gluon.custom_import import track_changes; track_changes(True) # for reloading modules
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_https()
db = DAL('google:datastore', adapter_args={'use_ndb':True})
## store sessions and tickets there
session.connect(request, response, db=db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Auth, Crud, Service, PluginManager, prettydate
auth = Auth(db)
crud, service, plugins = Crud(db), Service(), PluginManager()
# Logging in via Google Accounts
from gluon.contrib.login_methods.gae_google_account import GaeGoogleAccount
auth.settings.login_form=GaeGoogleAccount()
# No logging of auth events.
auth.settings.logging_enabled = False
# Adds a timezone field to the auth table.
from pytz.gae import pytz
from plugin_timezone import tz_nice_detector_widget
my_tz_nice_detector_widget = lambda field, value : tz_nice_detector_widget(field, value, autodetect=True)
auth.settings.extra_fields['auth_user']= [
Field('user_timezone', 'string', widget=my_tz_nice_detector_widget),
]
## create all tables needed by auth if not custom tables
auth.define_tables(username=False)
auth.settings.table_user.first_name.readable = auth.settings.table_user.first_name.writable = True
auth.settings.table_user.last_name.readable = auth.settings.table_user.last_name.writable = True
auth.settings.table_user.user_timezone.label = T('Time zone')
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' or 'smtp.gmail.com:587'
mail.settings.sender = '[email protected]'
mail.settings.login = 'username:password'
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
##### This tells web2py to use GAE logins.
if request.env.web2py_runtime_gae:
from gluon.contrib.login_methods.gae_google_account import GaeGoogleAccount
auth.settings.login_form = GaeGoogleAccount()
auth.settings.actions_disabled.append('request_reset_password')
auth.settings.actions_disabled.append('reset_password')
auth.settings.actions_disabled.append('retrieve_password')
auth.settings.actions_disabled.append('email_reset_password')
auth.settings.actions_disabled.append('change_password')
auth.settings.actions_disabled.append('retrieve_username')
auth.settings.actions_disabled.append('verify_email')
auth.settings.actions_disabled.append('register')
# auth.settings.actions_disabled.append('profile')
db.auth_user.email.writable = False
#### How to get an email address.
def get_user_email():
"""Note that this function always returns a lowercase email address."""
if request.env.web2py_runtime_gae:
from google.appengine.api import users as googleusers
u = googleusers.get_current_user()
if u is None:
return None
else:
return u.email().lower()
else:
if auth.user is None:
return None
else:
return auth.user.email.lower()
## How to get an original email address (with original capitalization).
def get_user_system_email():
"""Use this for sending emails."""
if request.env.web2py_runtime_gae:
from google.appengine.api import users as googleusers
u = googleusers.get_current_user()
if u is None:
return None
else:
return u.email()
else:
if auth.user is None:
return None
else:
return auth.user.email
## How to get a user id (Google user id, in production).
def get_user_id():
"""Note that this function always returns a lowercase email address."""
if request.env.web2py_runtime_gae:
from google.appengine.api import users as googleusers
u = googleusers.get_current_user()
if u is None:
return None
else:
return u.user_id()
else:
if auth.user is None:
return None
else:
return auth.user.email
# Stores these in the current object.
from gluon import current
current.user_email = get_user_email()
current.user_system_email = get_user_system_email()
current.user_id = get_user_id()
######################
# Logging
import logging, logging.handlers
class GAEHandler(logging.Handler):
"""
Logging handler for GAE DataStore
"""
def emit(self, record):
from google.appengine.ext import db
class Log(db.Model):
name = db.StringProperty()
level = db.StringProperty()
module = db.StringProperty()
func_name = db.StringProperty()
line_no = db.IntegerProperty()
thread = db.IntegerProperty()
thread_name = db.StringProperty()
process = db.IntegerProperty()
message = db.StringProperty(multiline=True)
args = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
log = Log()
log.name = record.name
log.level = record.levelname
log.module = record.module
log.func_name = record.funcName
log.line_no = record.lineno
log.thread = record.thread
log.thread_name = record.threadName
log.process = record.process
log.message = record.msg
log.args = str(record.args)
log.put()
def get_configured_logger(name):
logger = logging.getLogger(name)
if (len(logger.handlers) == 0):
# This logger has no handlers, so we can assume it hasn't yet been configured
# (Configure logger)
# Create default handler
if request.env.web2py_runtime_gae:
# Create GAEHandler
handler = GAEHandler()
handler.setLevel(logging.WARNING)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
else:
# Create RotatingFileHandler
import os
formatter="%(asctime)s %(levelname)s %(process)s %(thread)s %(funcName)s():%(lineno)d %(message)s"
handler = logging.handlers.RotatingFileHandler(os.path.join(request.folder,'private/app.log'),maxBytes=1024,backupCount=2)
handler.setFormatter(logging.Formatter(formatter))
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Test entry:
# logger.debug(name + ' logger created')
else:
pass
# Test entry:
# logger.debug(name + ' already exists')
return logger
# Assign application logger to a global var
logger = get_configured_logger(request.application)
# Assign application logger to a global var
if request.env.web2py_runtime_gae:
logger = logging
else:
logger = get_configured_logger(request.application)
# Makes the db and logger available also to modules.
current.db = db
current.logger = logger
# Let's log the user.
logger.info("User: %r Email: %r Id: %r" %
(current.user_email, current.user_system_email, current.user_id))
request_scheme = 'http'
if request.is_https:
request_scheme = 'https'
request_host = request_scheme + '://' + request.env.http_host
logger.info("Request host: %r" % request_host)
current.request_host = request_host
| bsd-3-clause | -8,621,622,117,824,228,000 | 35.144628 | 134 | 0.654396 | false |
SUSE-Cloud/glance | glance/tests/unit/v1/test_api.py | 1 | 119257 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- coding: utf-8 -*-
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import hashlib
import json
import StringIO
from oslo.config import cfg
import routes
import six
import webob
import glance.api
import glance.api.common
from glance.api.v1 import filters
from glance.api.v1 import images
from glance.api.v1 import router
from glance.common import exception
import glance.common.config
import glance.context
from glance.db.sqlalchemy import api as db_api
from glance.db.sqlalchemy import models as db_models
from glance.openstack.common import timeutils
from glance.openstack.common import uuidutils
import glance.store.filesystem
from glance.tests.unit import base
from glance.tests import utils as test_utils
import glance.tests.unit.utils as unit_test_utils
CONF = cfg.CONF
_gen_uuid = uuidutils.generate_uuid
UUID1 = _gen_uuid()
UUID2 = _gen_uuid()
class TestGlanceAPI(base.IsolatedUnitTest):
def setUp(self):
"""Establish a clean test environment"""
super(TestGlanceAPI, self).setUp()
self.mapper = routes.Mapper()
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper))
self.FIXTURES = [
{'id': UUID1,
'name': 'fake image #1',
'status': 'active',
'disk_format': 'ami',
'container_format': 'ami',
'is_public': False,
'created_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'deleted_at': None,
'deleted': False,
'checksum': None,
'size': 13,
'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1),
'metadata': {}}],
'properties': {'type': 'kernel'}},
{'id': UUID2,
'name': 'fake image #2',
'status': 'active',
'disk_format': 'vhd',
'container_format': 'ovf',
'is_public': True,
'created_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'deleted_at': None,
'deleted': False,
'checksum': 'abc123',
'size': 19,
'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2),
'metadata': {}}],
'properties': {}}]
self.context = glance.context.RequestContext(is_admin=True)
db_api.setup_db_env()
db_api.get_engine()
self.destroy_fixtures()
self.create_fixtures()
def tearDown(self):
"""Clear the test environment"""
super(TestGlanceAPI, self).tearDown()
self.destroy_fixtures()
def create_fixtures(self):
for fixture in self.FIXTURES:
db_api.image_create(self.context, fixture)
# We write a fake image file to the filesystem
with open("%s/%s" % (self.test_dir, fixture['id']), 'wb') as image:
image.write("chunk00000remainder")
image.flush()
def destroy_fixtures(self):
# Easiest to just drop the models and re-create them...
db_models.unregister_models(db_api._ENGINE)
db_models.register_models(db_api._ENGINE)
def _do_test_defaulted_format(self, format_key, format_value):
fixture_headers = {'x-image-meta-name': 'defaulted',
'x-image-meta-location': 'http://localhost:0/image',
format_key: format_value}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals(format_value, res_body['disk_format'])
self.assertEquals(format_value, res_body['container_format'])
def test_defaulted_amazon_format(self):
for key in ('x-image-meta-disk-format',
'x-image-meta-container-format'):
for value in ('aki', 'ari', 'ami'):
self._do_test_defaulted_format(key, value)
def test_bad_disk_format(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'invalid',
'x-image-meta-container-format': 'ami',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid disk format' in res.body, res.body)
def test_configured_disk_format_good(self):
self.config(disk_formats=['foo'])
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'foo',
'x-image-meta-container-format': 'bare',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_configured_disk_format_bad(self):
self.config(disk_formats=['foo'])
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'bar',
'x-image-meta-container-format': 'bare',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid disk format' in res.body, res.body)
def test_configured_container_format_good(self):
self.config(container_formats=['foo'])
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'raw',
'x-image-meta-container-format': 'foo',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_configured_container_format_bad(self):
self.config(container_formats=['foo'])
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'raw',
'x-image-meta-container-format': 'bar',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid container format' in res.body, res.body)
def test_container_and_disk_amazon_format_differs(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'aki',
'x-image-meta-container-format': 'ami'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
expected = ("Invalid mix of disk and container formats. "
"When setting a disk or container format to one of "
"'aki', 'ari', or 'ami', "
"the container and disk formats must match.")
self.assertEquals(res.status_int, 400)
self.assertTrue(expected in res.body, res.body)
def test_create_with_location_no_container_format(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'vhd',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid container format' in res.body)
def test_bad_container_format(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'invalid',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid container format' in res.body)
def test_bad_image_size(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://example.com/image.tar.gz',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-size': 'invalid',
'x-image-meta-container-format': 'bare',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Incoming image size' in res.body)
def test_bad_image_name(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'X' * 256,
'x-image-meta-location': 'http://example.com/image.tar.gz',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'bare',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_no_location_no_image_as_body(self):
"""Tests creates a queued image for no body and no loc header"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
image_id = res_body['id']
# Test that we are able to edit the Location field
# per LP Bug #911599
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['x-image-meta-location'] = 'http://localhost:0/images/123'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_body = json.loads(res.body)['image']
# Once the location is set, the image should be activated
# see LP Bug #939484
self.assertEquals('active', res_body['status'])
self.assertFalse('location' in res_body) # location never shown
def test_add_image_no_location_no_content_type(self):
"""Tests creates a queued image for no body and no loc header"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = "chunk00000remainder"
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_size_header_too_big(self):
"""Tests raises BadRequest for supplied image size that is too big"""
fixture_headers = {'x-image-meta-size': CONF.image_size_cap + 1,
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_size_chunked_data_too_big(self):
self.config(image_size_cap=512)
fixture_headers = {
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'ami',
'x-image-meta-disk_format': 'ami',
'transfer-encoding': 'chunked',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body_file = StringIO.StringIO('X' * (CONF.image_size_cap + 1))
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
def test_add_image_size_data_too_big(self):
self.config(image_size_cap=512)
fixture_headers = {
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'ami',
'x-image-meta-disk_format': 'ami',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = 'X' * (CONF.image_size_cap + 1)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_size_header_exceed_quota(self):
quota = 500
self.config(user_storage_quota=quota)
fixture_headers = {'x-image-meta-size': quota + 1,
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'bare',
'x-image-meta-disk_format': 'qcow2',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.body = 'X' * (quota + 1)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
def test_add_image_size_data_exceed_quota(self):
quota = 500
self.config(user_storage_quota=quota)
fixture_headers = {
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'bare',
'x-image-meta-disk_format': 'qcow2',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = 'X' * (quota + 1)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
def test_add_image_size_data_exceed_quota_readd(self):
quota = 500
self.config(user_storage_quota=quota)
fixture_headers = {
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'bare',
'x-image-meta-disk_format': 'qcow2',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = 'X' * (quota + 1)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
used_size = sum([f['size'] for f in self.FIXTURES])
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = 'X' * (quota - used_size)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def _add_check_no_url_info(self):
fixture_headers = {'x-image-meta-disk-format': 'ami',
'x-image-meta-container-format': 'ami',
'x-image-meta-size': '0',
'x-image-meta-name': 'empty image'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
res_body = json.loads(res.body)['image']
self.assertFalse('locations' in res_body)
self.assertFalse('direct_url' in res_body)
image_id = res_body['id']
# HEAD empty image
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertFalse('x-image-meta-locations' in res.headers)
self.assertFalse('x-image-meta-direct_url' in res.headers)
def test_add_check_no_url_info_ml(self):
self.config(show_multiple_locations=True)
self._add_check_no_url_info()
def test_add_check_no_url_info_direct_url(self):
self.config(show_image_direct_url=True)
self._add_check_no_url_info()
def test_add_check_no_url_info_both_on(self):
self.config(show_image_direct_url=True)
self.config(show_multiple_locations=True)
self._add_check_no_url_info()
def test_add_check_no_url_info_both_off(self):
self._add_check_no_url_info()
def test_add_image_zero_size(self):
"""Tests creating an active image with explicitly zero size"""
fixture_headers = {'x-image-meta-disk-format': 'ami',
'x-image-meta-container-format': 'ami',
'x-image-meta-size': '0',
'x-image-meta-name': 'empty image'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('active', res_body['status'])
image_id = res_body['id']
# GET empty image
req = webob.Request.blank("/images/%s" % image_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res.body), 0)
def _do_test_add_image_attribute_mismatch(self, attributes):
fixture_headers = {
'x-image-meta-name': 'fake image #3',
}
fixture_headers.update(attributes)
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "XXXX"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_checksum_mismatch(self):
attributes = {
'x-image-meta-checksum': 'asdf',
}
self._do_test_add_image_attribute_mismatch(attributes)
def test_add_image_size_mismatch(self):
attributes = {
'x-image-meta-size': str(len("XXXX") + 1),
}
self._do_test_add_image_attribute_mismatch(attributes)
def test_add_image_checksum_and_size_mismatch(self):
attributes = {
'x-image-meta-checksum': 'asdf',
'x-image-meta-size': str(len("XXXX") + 1),
}
self._do_test_add_image_attribute_mismatch(attributes)
def test_add_image_bad_store(self):
"""Tests raises BadRequest for invalid store header"""
fixture_headers = {'x-image-meta-store': 'bad',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_basic_file_store(self):
"""Tests to add a basic image in the file store"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
# Test that the Location: header is set to the URI to
# edit the newly-created image, as required by APP.
# See LP Bug #719825
self.assertTrue('location' in res.headers,
"'location' not in response headers.\n"
"res.headerlist = %r" % res.headerlist)
res_body = json.loads(res.body)['image']
self.assertTrue('/images/%s' % res_body['id']
in res.headers['location'])
self.assertEquals('active', res_body['status'])
image_id = res_body['id']
# Test that we are NOT able to edit the Location field
# per LP Bug #911599
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['x-image-meta-location'] = 'http://example.com/images/123'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_unauthorized(self):
rules = {"add_image": '!'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_add_publicize_image_unauthorized(self):
rules = {"add_image": '@', "modify_image": '@',
"publicize_image": '!'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-is-public': 'true',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_add_publicize_image_authorized(self):
rules = {"add_image": '@', "modify_image": '@',
"publicize_image": '@'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-is-public': 'true',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_add_copy_from_image_unauthorized(self):
rules = {"add_image": '@', "copy_from": '!'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-glance-api-copy-from': 'http://glance.com/i.ovf',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_add_copy_from_image_authorized(self):
rules = {"add_image": '@', "copy_from": '@'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-glance-api-copy-from': 'http://glance.com/i.ovf',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_add_copy_from_with_nonempty_body(self):
"""Tests creates an image from copy-from and nonempty body"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-glance-api-copy-from': 'http://a/b/c.ovf',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
req = webob.Request.blank("/images")
req.headers['Content-Type'] = 'application/octet-stream'
req.method = 'POST'
req.body = "chunk00000remainder"
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_location_with_nonempty_body(self):
"""Tests creates an image from location and nonempty body"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-location': 'http://a/b/c.tar.gz',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
req = webob.Request.blank("/images")
req.headers['Content-Type'] = 'application/octet-stream'
req.method = 'POST'
req.body = "chunk00000remainder"
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_location_with_conflict_image_size(self):
"""Tests creates an image from location and conflict image size"""
self.stubs.Set(glance.api.v1.images, 'get_size_from_backend',
lambda *args, **kwargs: 2)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-location': 'http://a/b/c.tar.gz',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F',
'x-image-meta-size': '1'}
req = webob.Request.blank("/images")
req.headers['Content-Type'] = 'application/octet-stream'
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 409)
def test_add_copy_from_with_location(self):
"""Tests creates an image from copy-from and location"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-glance-api-copy-from': 'http://a/b/c.ovf',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F',
'x-image-meta-location': 'http://a/b/c.tar.gz'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def _do_test_post_image_content_missing_format(self, missing):
"""Tests creation of an image with missing format"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
header = 'x-image-meta-' + missing.replace('_', '-')
del fixture_headers[header]
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_add_copy_from_with_restricted_sources(self):
"""Tests creates an image from copy-from with restricted sources"""
header_template = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
schemas = ["file:///etc/passwd",
"swift+config:///xxx",
"filesystem:///etc/passwd"]
for schema in schemas:
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in six.iteritems(header_template):
req.headers[k] = v
req.headers['x-glance-api-copy-from'] = schema
res = req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_post_image_content_missing_disk_format(self):
"""Tests creation of an image with missing disk format"""
self._do_test_post_image_content_missing_format('disk_format')
def test_post_image_content_missing_container_type(self):
"""Tests creation of an image with missing container format"""
self._do_test_post_image_content_missing_format('container_format')
def _do_test_put_image_content_missing_format(self, missing):
"""Tests delayed activation of an image with missing format"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
header = 'x-image-meta-' + missing.replace('_', '-')
del fixture_headers[header]
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
image_id = res_body['id']
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_put_image_content_missing_disk_format(self):
"""Tests delayed activation of image with missing disk format"""
self._do_test_put_image_content_missing_format('disk_format')
def test_put_image_content_missing_container_type(self):
"""Tests delayed activation of image with missing container format"""
self._do_test_put_image_content_missing_format('container_format')
def test_update_deleted_image(self):
"""Tests that exception raised trying to update a deleted image"""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
fixture = {'name': 'test_del_img'}
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(image=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
self.assertTrue('Forbidden to update deleted image' in res.body)
def test_delete_deleted_image(self):
"""Tests that exception raised trying to delete a deleted image"""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the status is deleted
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEqual("deleted", res.headers['x-image-meta-status'])
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
msg = "Image %s not found." % UUID2
self.assertTrue(msg in res.body)
# Verify the status is still deleted
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEqual("deleted", res.headers['x-image-meta-status'])
def test_delete_pending_delete_image(self):
"""
Tests that correct response returned when deleting
a pending_delete image
"""
# First deletion
self.config(delayed_delete=True, scrubber_datadir='/tmp/scrubber')
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the status is pending_delete
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEqual("pending_delete", res.headers['x-image-meta-status'])
# Second deletion
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
self.assertTrue('Forbidden to delete a pending_delete image'
in res.body)
# Verify the status is still pending_delete
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEqual("pending_delete", res.headers['x-image-meta-status'])
def test_register_and_upload(self):
"""
Test that the process of registering an image with
some metadata, then uploading an image file with some
more metadata doesn't mark the original metadata deleted
:see LP Bug#901534
"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3',
'x-image-meta-property-key1': 'value1'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertTrue('id' in res_body)
image_id = res_body['id']
self.assertTrue('/images/%s' % image_id in res.headers['location'])
# Verify the status is queued
self.assertTrue('status' in res_body)
self.assertEqual('queued', res_body['status'])
# Check properties are not deleted
self.assertTrue('properties' in res_body)
self.assertTrue('key1' in res_body['properties'])
self.assertEqual('value1', res_body['properties']['key1'])
# Now upload the image file along with some more
# metadata and verify original metadata properties
# are not marked deleted
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/octet-stream'
req.headers['x-image-meta-property-key2'] = 'value2'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the status is queued
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertTrue('x-image-meta-property-key1' in res.headers,
"Did not find required property in headers. "
"Got headers: %r" % res.headers)
self.assertEqual("active", res.headers['x-image-meta-status'])
def test_disable_purge_props(self):
"""
Test the special x-glance-registry-purge-props header controls
the purge property behaviour of the registry.
:see LP Bug#901534
"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3',
'x-image-meta-property-key1': 'value1'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertTrue('id' in res_body)
image_id = res_body['id']
self.assertTrue('/images/%s' % image_id in res.headers['location'])
# Verify the status is queued
self.assertTrue('status' in res_body)
self.assertEqual('active', res_body['status'])
# Check properties are not deleted
self.assertTrue('properties' in res_body)
self.assertTrue('key1' in res_body['properties'])
self.assertEqual('value1', res_body['properties']['key1'])
# Now update the image, setting new properties without
# passing the x-glance-registry-purge-props header and
# verify that original properties are marked deleted.
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['x-image-meta-property-key2'] = 'value2'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the original property no longer in headers
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertTrue('x-image-meta-property-key2' in res.headers,
"Did not find required property in headers. "
"Got headers: %r" % res.headers)
self.assertFalse('x-image-meta-property-key1' in res.headers,
"Found property in headers that was not expected. "
"Got headers: %r" % res.headers)
# Now update the image, setting new properties and
# passing the x-glance-registry-purge-props header with
# a value of "false" and verify that second property
# still appears in headers.
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['x-image-meta-property-key3'] = 'value3'
req.headers['x-glance-registry-purge-props'] = 'false'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the second and third property in headers
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertTrue('x-image-meta-property-key2' in res.headers,
"Did not find required property in headers. "
"Got headers: %r" % res.headers)
self.assertTrue('x-image-meta-property-key3' in res.headers,
"Did not find required property in headers. "
"Got headers: %r" % res.headers)
def test_publicize_image_unauthorized(self):
"""Create a non-public image then fail to make public"""
rules = {"add_image": '@', "publicize_image": '!'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-is-public': 'false',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'PUT'
req.headers['x-image-meta-is-public'] = 'true'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_update_image_size_header_too_big(self):
"""Tests raises BadRequest for supplied image size that is too big"""
fixture_headers = {'x-image-meta-size': CONF.image_size_cap + 1}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'PUT'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_update_image_size_data_too_big(self):
self.config(image_size_cap=512)
fixture_headers = {'content-type': 'application/octet-stream'}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'PUT'
req.body = 'X' * (CONF.image_size_cap + 1)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_update_image_size_chunked_data_too_big(self):
self.config(image_size_cap=512)
# Create new image that has no data
req = webob.Request.blank("/images")
req.method = 'POST'
req.headers['x-image-meta-name'] = 'something'
req.headers['x-image-meta-container_format'] = 'ami'
req.headers['x-image-meta-disk_format'] = 'ami'
res = req.get_response(self.api)
image_id = json.loads(res.body)['image']['id']
fixture_headers = {
'content-type': 'application/octet-stream',
'transfer-encoding': 'chunked',
}
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.body_file = StringIO.StringIO('X' * (CONF.image_size_cap + 1))
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
def test_update_non_existing_image(self):
self.config(image_size_cap=100)
req = webob.Request.blank("images/%s" % _gen_uuid)
req.method = 'PUT'
req.body = 'test'
req.headers['x-image-meta-name'] = 'test'
req.headers['x-image-meta-container_format'] = 'ami'
req.headers['x-image-meta-disk_format'] = 'ami'
req.headers['x-image-meta-is_public'] = 'False'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 404)
def test_update_public_image(self):
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-is-public': 'true',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'PUT'
req.headers['x-image-meta-name'] = 'updated public image'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
def test_get_index_sort_name_asc(self):
"""
Tests that the /images registry API returns list of
public images sorted alphabetically by name in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/images?sort_key=name&sort_dir=asc')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 3)
self.assertEquals(images[0]['id'], UUID3)
self.assertEquals(images[1]['id'], UUID2)
self.assertEquals(images[2]['id'], UUID4)
def test_get_details_filter_changes_since(self):
"""
Tests that the /images/detail registry API returns list of
public images that have a size less than or equal to size_max
"""
dt1 = timeutils.utcnow() - datetime.timedelta(1)
iso1 = timeutils.isotime(dt1)
date_only1 = dt1.strftime('%Y-%m-%d')
date_only2 = dt1.strftime('%Y%m%d')
date_only3 = dt1.strftime('%Y-%m%d')
dt2 = timeutils.utcnow() + datetime.timedelta(1)
iso2 = timeutils.isotime(dt2)
image_ts = timeutils.utcnow() + datetime.timedelta(2)
hour_before = image_ts.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00')
hour_after = image_ts.strftime('%Y-%m-%dT%H:%M:%S-01:00')
dt4 = timeutils.utcnow() + datetime.timedelta(3)
iso4 = timeutils.isotime(dt4)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'fake image #3',
'size': 18,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
db_api.image_destroy(self.context, UUID3)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'ami',
'container_format': 'ami',
'name': 'fake image #4',
'size': 20,
'checksum': None,
'created_at': image_ts,
'updated_at': image_ts}
db_api.image_create(self.context, extra_fixture)
# Check a standard list, 4 images in db (2 deleted)
req = webob.Request.blank('/images/detail')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 2)
self.assertEqual(images[0]['id'], UUID4)
self.assertEqual(images[1]['id'], UUID2)
# Expect 3 images (1 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' % iso1)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 3)
self.assertEqual(images[0]['id'], UUID4)
self.assertEqual(images[1]['id'], UUID3) # deleted
self.assertEqual(images[2]['id'], UUID2)
# Expect 1 images (0 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' % iso2)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 1)
self.assertEqual(images[0]['id'], UUID4)
# Expect 1 images (0 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' %
hour_before)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 1)
self.assertEqual(images[0]['id'], UUID4)
# Expect 0 images (0 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' %
hour_after)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 0)
# Expect 0 images (0 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' % iso4)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 0)
for param in [date_only1, date_only2, date_only3]:
# Expect 3 images (1 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' %
param)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 3)
self.assertEqual(images[0]['id'], UUID4)
self.assertEqual(images[1]['id'], UUID3) # deleted
self.assertEqual(images[2]['id'], UUID2)
# Bad request (empty changes-since param)
req = webob.Request.blank('/images/detail?changes-since=')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_get_images_bad_urls(self):
"""Check that routes collections are not on (LP bug 1185828)"""
req = webob.Request.blank('/images/detail.xxx')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
req = webob.Request.blank('/images.xxx')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
req = webob.Request.blank('/images/new')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
req = webob.Request.blank("/images/%s/members" % UUID1)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank("/images/%s/members.xxx" % UUID1)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_get_images_detailed_unauthorized(self):
rules = {"get_images": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank('/images/detail')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_get_images_unauthorized(self):
rules = {"get_images": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank('/images/detail')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_store_location_not_revealed(self):
"""
Test that the internal store location is NOT revealed
through the API server
"""
# Check index and details...
for url in ('/images', '/images/detail'):
req = webob.Request.blank(url)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
num_locations = sum([1 for record in images
if 'location' in record.keys()])
self.assertEquals(0, num_locations, images)
# Check GET
req = webob.Request.blank("/images/%s" % UUID2)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertFalse('X-Image-Meta-Location' in res.headers)
# Check HEAD
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertFalse('X-Image-Meta-Location' in res.headers)
# Check PUT
req = webob.Request.blank("/images/%s" % UUID2)
req.body = res.body
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_body = json.loads(res.body)
self.assertFalse('location' in res_body['image'])
# Check POST
req = webob.Request.blank("/images")
headers = {'x-image-meta-location': 'http://localhost',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
for k, v in headers.iteritems():
req.headers[k] = v
req.method = 'POST'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
res_body = json.loads(res.body)
self.assertFalse('location' in res_body['image'])
def test_image_is_checksummed(self):
"""Test that the image contents are checksummed properly"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
image_contents = "chunk00000remainder"
image_checksum = hashlib.md5(image_contents).hexdigest()
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = image_contents
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals(image_checksum, res_body['checksum'],
"Mismatched checksum. Expected %s, got %s" %
(image_checksum, res_body['checksum']))
def test_etag_equals_checksum_header(self):
"""Test that the ETag header matches the x-image-meta-checksum"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
image_contents = "chunk00000remainder"
image_checksum = hashlib.md5(image_contents).hexdigest()
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = image_contents
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
image = json.loads(res.body)['image']
# HEAD the image and check the ETag equals the checksum header...
expected_headers = {'x-image-meta-checksum': image_checksum,
'etag': image_checksum}
req = webob.Request.blank("/images/%s" % image['id'])
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
for key in expected_headers.keys():
self.assertTrue(key in res.headers,
"required header '%s' missing from "
"returned headers" % key)
for key, value in expected_headers.iteritems():
self.assertEquals(value, res.headers[key])
def test_bad_checksum_prevents_image_creation(self):
"""Test that the image contents are checksummed properly"""
image_contents = "chunk00000remainder"
bad_checksum = hashlib.md5("invalid").hexdigest()
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3',
'x-image-meta-checksum': bad_checksum,
'x-image-meta-is-public': 'true'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = image_contents
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
# Test that only one image was returned (that already exists)
req = webob.Request.blank("/images")
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
images = json.loads(res.body)['images']
self.assertEqual(len(images), 1)
def test_image_meta(self):
"""Test for HEAD /images/<ID>"""
expected_headers = {'x-image-meta-id': UUID2,
'x-image-meta-name': 'fake image #2'}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
for key, value in expected_headers.iteritems():
self.assertEquals(value, res.headers[key])
def test_image_meta_unauthorized(self):
rules = {"get_image": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_show_image_basic(self):
req = webob.Request.blank("/images/%s" % UUID2)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, 'application/octet-stream')
self.assertEqual('chunk00000remainder', res.body)
def test_show_non_exists_image(self):
req = webob.Request.blank("/images/%s" % _gen_uuid())
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_show_image_unauthorized(self):
rules = {"get_image": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank("/images/%s" % UUID2)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
def test_show_image_unauthorized_download(self):
rules = {"download_image": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank("/images/%s" % UUID2)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
def test_delete_image(self):
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(res.body, '')
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404,
res.body)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(res.headers['x-image-meta-deleted'], 'True')
self.assertEquals(res.headers['x-image-meta-status'], 'deleted')
def test_delete_non_exists_image(self):
req = webob.Request.blank("/images/%s" % _gen_uuid())
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_delete_not_allowed(self):
# Verify we can get the image data
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.headers['X-Auth-Token'] = 'user:tenant:'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res.body), 19)
# Verify we cannot delete the image
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
# Verify the image data is still there
req.method = 'GET'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res.body), 19)
def test_delete_queued_image(self):
"""Delete an image in a queued state
Bug #747799 demonstrated that trying to DELETE an image
that had had its save process killed manually results in failure
because the location attribute is None.
Bug #1048851 demonstrated that the status was not properly
being updated to 'deleted' from 'queued'.
"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
# Now try to delete the image...
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s' % res_body['id'])
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(res.headers['x-image-meta-deleted'], 'True')
self.assertEquals(res.headers['x-image-meta-status'], 'deleted')
def test_delete_queued_image_delayed_delete(self):
"""Delete an image in a queued state when delayed_delete is on
Bug #1048851 demonstrated that the status was not properly
being updated to 'deleted' from 'queued'.
"""
self.config(delayed_delete=True)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
# Now try to delete the image...
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s' % res_body['id'])
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(res.headers['x-image-meta-deleted'], 'True')
self.assertEquals(res.headers['x-image-meta-status'], 'deleted')
def test_delete_protected_image(self):
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-name': 'fake image #3',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-protected': 'True'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
# Now try to delete the image...
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_delete_image_unauthorized(self):
rules = {"delete_image": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_get_details_invalid_marker(self):
"""
Tests that the /images/detail registry API returns a 400
when an invalid marker is provided
"""
req = webob.Request.blank('/images/detail?marker=%s' % _gen_uuid())
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_get_image_members(self):
"""
Tests members listing for existing images
"""
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
memb_list = json.loads(res.body)
num_members = len(memb_list['members'])
self.assertEquals(num_members, 0)
def test_get_image_members_allowed_by_policy(self):
rules = {"get_members": '@'}
self.set_policy_rules(rules)
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
memb_list = json.loads(res.body)
num_members = len(memb_list['members'])
self.assertEquals(num_members, 0)
def test_get_image_members_forbidden_by_policy(self):
rules = {"get_members": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPForbidden.code)
def test_get_image_members_not_existing(self):
"""
Tests proper exception is raised if attempt to get members of
non-existing image
"""
req = webob.Request.blank('/images/%s/members' % _gen_uuid())
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_add_member(self):
"""
Tests adding image members
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=True)
req = webob.Request.blank('/images/%s/members/test' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_get_member_images(self):
"""
Tests image listing for members
"""
req = webob.Request.blank('/shared-images/pattieblack')
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
memb_list = json.loads(res.body)
num_members = len(memb_list['shared_images'])
self.assertEquals(num_members, 0)
def test_replace_members(self):
"""
Tests replacing image members raises right exception
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=False)
fixture = dict(member_id='pattieblack')
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(image_memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 401)
def test_active_image_immutable_props_for_user(self):
"""
Tests user cannot update immutable props of active image
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=False)
fixture_header_list = [{'x-image-meta-checksum': '1234'},
{'x-image-meta-size': '12345'}]
for fixture_header in fixture_header_list:
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'PUT'
for k, v in fixture_header.iteritems():
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
orig_value = res.headers[k]
req = webob.Request.blank('/images/%s' % UUID2)
req.headers[k] = v
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
prop = k[len('x-image-meta-'):]
self.assertNotEqual(res.body.find("Forbidden to modify \'%s\' "
"of active "
"image" % prop), -1)
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(orig_value, res.headers[k])
def test_props_of_active_image_mutable_for_admin(self):
"""
Tests admin can update 'immutable' props of active image
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=True)
fixture_header_list = [{'x-image-meta-checksum': '1234'},
{'x-image-meta-size': '12345'}]
for fixture_header in fixture_header_list:
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'PUT'
for k, v in fixture_header.iteritems():
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
orig_value = res.headers[k]
req = webob.Request.blank('/images/%s' % UUID2)
req.headers[k] = v
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(v, res.headers[k])
def test_replace_members_non_existing_image(self):
"""
Tests replacing image members raises right exception
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=True)
fixture = dict(member_id='pattieblack')
req = webob.Request.blank('/images/%s/members' % _gen_uuid())
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(image_memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_replace_members_bad_request(self):
"""
Tests replacing image members raises bad request if body is wrong
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=True)
fixture = dict(member_id='pattieblack')
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(image_memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_replace_members_positive(self):
"""
Tests replacing image members
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
fixture = [dict(member_id='pattieblack', can_share=False)]
# Replace
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
def test_replace_members_forbidden_by_policy(self):
rules = {"modify_member": '!'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
req = webob.Request.blank('/images/%s/members' % UUID1)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPForbidden.code)
def test_replace_members_allowed_by_policy(self):
rules = {"modify_member": '@'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
req = webob.Request.blank('/images/%s/members' % UUID1)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
def test_add_member(self):
"""
Tests adding image members raises right exception
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=False)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 401)
def test_add_member_non_existing_image(self):
"""
Tests adding image members raises right exception
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
test_uri = '/images/%s/members/pattieblack'
req = webob.Request.blank(test_uri % _gen_uuid())
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_add_member_positive(self):
"""
Tests adding image members
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
def test_add_member_with_body(self):
"""
Tests adding image members
"""
fixture = dict(can_share=True)
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
req.body = json.dumps(dict(member=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
def test_add_member_forbidden_by_policy(self):
rules = {"modify_member": '!'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID1)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPForbidden.code)
def test_add_member_allowed_by_policy(self):
rules = {"modify_member": '@'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID1)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
def test_get_members_of_deleted_image_raises_404(self):
"""
Tests members listing for deleted image raises 404.
"""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNotFound.code)
self.assertTrue(
'Image with identifier %s has been deleted.' % UUID2 in res.body)
def test_delete_member_of_deleted_image_raises_404(self):
"""
Tests deleting members of deleted image raises 404.
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNotFound.code)
self.assertTrue(
'Image with identifier %s has been deleted.' % UUID2 in res.body)
def test_update_members_of_deleted_image_raises_404(self):
"""
Tests update members of deleted image raises 404.
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNotFound.code)
self.assertTrue(
'Image with identifier %s has been deleted.' % UUID2 in res.body)
def test_create_member_to_deleted_image_raises_404(self):
"""
Tests adding members to deleted image raises 404.
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNotFound.code)
self.assertTrue(
'Image with identifier %s has been deleted.' % UUID2 in res.body)
def test_delete_member(self):
"""
Tests deleting image members raises right exception
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=False)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 401)
def test_delete_member_on_non_existing_image(self):
"""
Tests deleting image members raises right exception
"""
test_router = router.API(self.mapper)
api = test_utils.FakeAuthMiddleware(test_router, is_admin=True)
test_uri = '/images/%s/members/pattieblack'
req = webob.Request.blank(test_uri % _gen_uuid())
req.method = 'DELETE'
res = req.get_response(api)
self.assertEquals(res.status_int, 404)
def test_delete_non_exist_member(self):
"""
Test deleting image members raises right exception
"""
test_router = router.API(self.mapper)
api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
req = webob.Request.blank('/images/%s/members/test_user' % UUID2)
req.method = 'DELETE'
res = req.get_response(api)
self.assertEquals(res.status_int, 404)
def test_delete_image_member(self):
test_rserver = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_rserver, is_admin=True)
# Add member to image:
fixture = dict(can_share=True)
test_uri = '/images/%s/members/test_add_member_positive'
req = webob.Request.blank(test_uri % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(member=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
# Delete member
test_uri = '/images/%s/members/test_add_member_positive'
req = webob.Request.blank(test_uri % UUID2)
req.headers['X-Auth-Token'] = 'test1:test1:'
req.method = 'DELETE'
req.content_type = 'application/json'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
self.assertTrue('Forbidden' in res.body)
def test_delete_member_allowed_by_policy(self):
rules = {"delete_member": '@', "modify_member": '@'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_member_forbidden_by_policy(self):
rules = {"delete_member": '!', "modify_member": '@'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPForbidden.code)
class TestImageSerializer(base.IsolatedUnitTest):
def setUp(self):
"""Establish a clean test environment"""
super(TestImageSerializer, self).setUp()
self.receiving_user = 'fake_user'
self.receiving_tenant = 2
self.context = glance.context.RequestContext(
is_admin=True,
user=self.receiving_user,
tenant=self.receiving_tenant)
self.serializer = images.ImageSerializer()
def image_iter():
for x in ['chunk', '678911234', '56789']:
yield x
self.FIXTURE = {
'image_iterator': image_iter(),
'image_meta': {
'id': UUID2,
'name': 'fake image #2',
'status': 'active',
'disk_format': 'vhd',
'container_format': 'ovf',
'is_public': True,
'created_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'deleted_at': None,
'deleted': False,
'checksum': '06ff575a2856444fbe93100157ed74ab92eb7eff',
'size': 19,
'owner': _gen_uuid(),
'location': "file:///tmp/glance-tests/2",
'properties': {},
}
}
def test_meta(self):
exp_headers = {'x-image-meta-id': UUID2,
'x-image-meta-location': 'file:///tmp/glance-tests/2',
'ETag': self.FIXTURE['image_meta']['checksum'],
'x-image-meta-name': 'fake image #2'}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
req.remote_addr = "1.2.3.4"
req.context = self.context
response = webob.Response(request=req)
self.serializer.meta(response, self.FIXTURE)
for key, value in exp_headers.iteritems():
self.assertEquals(value, response.headers[key])
def test_meta_utf8(self):
# We get unicode strings from JSON, and therefore all strings in the
# metadata will actually be unicode when handled internally. But we
# want to output utf-8.
FIXTURE = {
'image_meta': {
'id': unicode(UUID2),
'name': u'fake image #2 with utf-8 éàè',
'status': u'active',
'disk_format': u'vhd',
'container_format': u'ovf',
'is_public': True,
'created_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'deleted_at': None,
'deleted': False,
'checksum': u'06ff575a2856444fbe93100157ed74ab92eb7eff',
'size': 19,
'owner': unicode(_gen_uuid()),
'location': u"file:///tmp/glance-tests/2",
'properties': {
u'prop_éé': u'ça marche',
u'prop_çé': u'çé',
}
}
}
exp_headers = {'x-image-meta-id': UUID2.encode('utf-8'),
'x-image-meta-location': 'file:///tmp/glance-tests/2',
'ETag': '06ff575a2856444fbe93100157ed74ab92eb7eff',
'x-image-meta-size': '19', # str, not int
'x-image-meta-name': 'fake image #2 with utf-8 éàè',
'x-image-meta-property-prop_éé': 'ça marche',
'x-image-meta-property-prop_çé': u'çé'.encode('utf-8')}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
req.remote_addr = "1.2.3.4"
req.context = self.context
response = webob.Response(request=req)
self.serializer.meta(response, FIXTURE)
self.assertNotEqual(type(FIXTURE['image_meta']['name']),
type(response.headers['x-image-meta-name']))
self.assertEqual(response.headers['x-image-meta-name'].decode('utf-8'),
FIXTURE['image_meta']['name'])
for key, value in exp_headers.iteritems():
self.assertEquals(value, response.headers[key])
FIXTURE['image_meta']['properties'][u'prop_bad'] = 'çé'
self.assertRaises(UnicodeDecodeError,
self.serializer.meta, response, FIXTURE)
def test_show(self):
exp_headers = {'x-image-meta-id': UUID2,
'x-image-meta-location': 'file:///tmp/glance-tests/2',
'ETag': self.FIXTURE['image_meta']['checksum'],
'x-image-meta-name': 'fake image #2'}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.context = self.context
response = webob.Response(request=req)
self.serializer.show(response, self.FIXTURE)
for key, value in exp_headers.iteritems():
self.assertEquals(value, response.headers[key])
self.assertEqual(response.body, 'chunk67891123456789')
def test_show_notify(self):
"""Make sure an eventlet posthook for notify_image_sent is added."""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.context = self.context
response = webob.Response(request=req)
response.request.environ['eventlet.posthooks'] = []
self.serializer.show(response, self.FIXTURE)
#just make sure the app_iter is called
for chunk in response.app_iter:
pass
self.assertNotEqual(response.request.environ['eventlet.posthooks'], [])
def test_image_send_notification(self):
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.remote_addr = '1.2.3.4'
req.context = self.context
image_meta = self.FIXTURE['image_meta']
called = {"notified": False}
expected_payload = {
'bytes_sent': 19,
'image_id': UUID2,
'owner_id': image_meta['owner'],
'receiver_tenant_id': self.receiving_tenant,
'receiver_user_id': self.receiving_user,
'destination_ip': '1.2.3.4',
}
def fake_info(_event_type, _payload):
self.assertEqual(_payload, expected_payload)
called['notified'] = True
self.stubs.Set(self.serializer.notifier, 'info', fake_info)
glance.api.common.image_send_notification(19, 19, image_meta, req,
self.serializer.notifier)
self.assertTrue(called['notified'])
def test_image_send_notification_error(self):
"""Ensure image.send notification is sent on error."""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.remote_addr = '1.2.3.4'
req.context = self.context
image_meta = self.FIXTURE['image_meta']
called = {"notified": False}
expected_payload = {
'bytes_sent': 17,
'image_id': UUID2,
'owner_id': image_meta['owner'],
'receiver_tenant_id': self.receiving_tenant,
'receiver_user_id': self.receiving_user,
'destination_ip': '1.2.3.4',
}
def fake_error(_event_type, _payload):
self.assertEqual(_payload, expected_payload)
called['notified'] = True
self.stubs.Set(self.serializer.notifier, 'error', fake_error)
#expected and actually sent bytes differ
glance.api.common.image_send_notification(17, 19, image_meta, req,
self.serializer.notifier)
self.assertTrue(called['notified'])
def test_redact_location(self):
"""Ensure location redaction does not change original metadata"""
image_meta = {'size': 3, 'id': '123', 'location': 'http://localhost'}
redacted_image_meta = {'size': 3, 'id': '123'}
copy_image_meta = copy.deepcopy(image_meta)
tmp_image_meta = glance.api.v1.images.redact_loc(image_meta)
self.assertEqual(image_meta, copy_image_meta)
self.assertEqual(tmp_image_meta, redacted_image_meta)
def test_noop_redact_location(self):
"""Check no-op location redaction does not change original metadata"""
image_meta = {'size': 3, 'id': '123'}
redacted_image_meta = {'size': 3, 'id': '123'}
copy_image_meta = copy.deepcopy(image_meta)
tmp_image_meta = glance.api.v1.images.redact_loc(image_meta)
self.assertEqual(image_meta, copy_image_meta)
self.assertEqual(tmp_image_meta, redacted_image_meta)
self.assertEqual(image_meta, redacted_image_meta)
class TestFilterValidator(base.IsolatedUnitTest):
def test_filter_validator(self):
self.assertFalse(glance.api.v1.filters.validate('size_max', -1))
self.assertTrue(glance.api.v1.filters.validate('size_max', 1))
self.assertTrue(glance.api.v1.filters.validate('protected', 'True'))
self.assertTrue(glance.api.v1.filters.validate('protected', 'FALSE'))
self.assertFalse(glance.api.v1.filters.validate('protected', '-1'))
class TestAPIProtectedProps(base.IsolatedUnitTest):
def setUp(self):
"""Establish a clean test environment"""
super(TestAPIProtectedProps, self).setUp()
self.mapper = routes.Mapper()
# turn on property protections
self.set_property_protections()
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper))
db_api.setup_db_env()
db_api.get_engine()
db_models.unregister_models(db_api._ENGINE)
db_models.register_models(db_api._ENGINE)
def tearDown(self):
"""Clear the test environment"""
super(TestAPIProtectedProps, self).tearDown()
self.destroy_fixtures()
def destroy_fixtures(self):
# Easiest to just drop the models and re-create them...
db_models.unregister_models(db_api._ENGINE)
db_models.register_models(db_api._ENGINE)
def _create_admin_image(self, props={}):
request = unit_test_utils.get_fake_request(path='/images')
headers = {'x-image-meta-disk-format': 'ami',
'x-image-meta-container-format': 'ami',
'x-image-meta-name': 'foo',
'x-image-meta-size': '0',
'x-auth-token': 'user:tenant:admin'}
headers.update(props)
for k, v in headers.iteritems():
request.headers[k] = v
created_image = request.get_response(self.api)
res_body = json.loads(created_image.body)['image']
image_id = res_body['id']
return image_id
def test_prop_protection_with_create_and_permitted_role(self):
"""
As admin role, create and image and verify permitted role 'member' can
create a protected property
"""
image_id = self._create_admin_image()
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'x-image-meta-property-x_owner_foo': 'bar'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['x_owner_foo'], 'bar')
def test_prop_protection_with_create_and_unpermitted_role(self):
"""
As admin role, create an image and verify unpermitted role
'fake_member' can *not* create a protected property
"""
image_id = self._create_admin_image()
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:fake_member',
'x-image-meta-property-x_owner_foo': 'bar'}
for k, v in headers.iteritems():
another_request.headers[k] = v
another_request.get_response(self.api)
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, webob.exc.HTTPForbidden.code)
self.assertIn("Property '%s' is protected" %
"x_owner_foo", output.body)
def test_prop_protection_with_show_and_permitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'member' can read that protected property via HEAD
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='HEAD', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:member'}
for k, v in headers.iteritems():
another_request.headers[k] = v
res2 = another_request.get_response(self.api)
self.assertEqual(res2.headers['x-image-meta-property-x_owner_foo'],
'bar')
def test_prop_protection_with_show_and_unpermitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'fake_role' can *not* read that protected property via
HEAD
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='HEAD', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:fake_role'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
self.assertEqual('', output.body)
self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers)
def test_prop_protection_with_get_and_permitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'member' can read that protected property via GET
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='GET', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:member'}
for k, v in headers.iteritems():
another_request.headers[k] = v
res2 = another_request.get_response(self.api)
self.assertEqual(res2.headers['x-image-meta-property-x_owner_foo'],
'bar')
def test_prop_protection_with_get_and_unpermitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'fake_role' can *not* read that protected property via
GET
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='GET', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:fake_role'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
self.assertEqual('', output.body)
self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers)
def test_prop_protection_with_detail_and_permitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'member' can read that protected property via
/images/detail
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='GET', path='/images/detail')
headers = {'x-auth-token': 'user:tenant:member'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
res_body = json.loads(output.body)['images'][0]
self.assertEqual(res_body['properties']['x_owner_foo'], 'bar')
def test_prop_protection_with_detail_and_unpermitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'fake_role' can *not* read that protected property via
/images/detail
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='GET', path='/images/detail')
headers = {'x-auth-token': 'user:tenant:fake_role'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
res_body = json.loads(output.body)['images'][0]
self.assertNotIn('x-image-meta-property-x_owner_foo',
res_body['properties'])
def test_prop_protection_with_update_and_permitted_role(self):
"""
As admin role, create an image with protected property, and verify
permitted role 'member' can update that protected property
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'x-image-meta-property-x_owner_foo': 'baz'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['x_owner_foo'], 'baz')
def test_prop_protection_with_update_and_unpermitted_role(self):
"""
As admin role, create an image with protected property, and verify
unpermitted role 'fake_role' can *not* update that protected property
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:fake_role',
'x-image-meta-property-x_owner_foo': 'baz'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, webob.exc.HTTPForbidden.code)
self.assertIn("Property '%s' is protected" %
"x_owner_foo", output.body)
def test_prop_protection_update_without_read(self):
"""
Test protected property cannot be updated without read permission
"""
image_id = self._create_admin_image(
{'x-image-meta-property-spl_update_only_prop': 'foo'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_update_only_prop': 'bar'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, webob.exc.HTTPForbidden.code)
self.assertIn("Property '%s' is protected" %
"spl_update_only_prop", output.body)
def test_prop_protection_update_noop(self):
"""
Test protected property update is allowed as long as the user has read
access and the value is unchanged
"""
image_id = self._create_admin_image(
{'x-image-meta-property-spl_read_prop': 'foo'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_read_prop': 'foo'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['spl_read_prop'], 'foo')
self.assertEquals(output.status_int, 200)
def test_prop_protection_with_delete_and_permitted_role(self):
"""
As admin role, create an image with protected property, and verify
permitted role 'member' can can delete that protected property
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties'], {})
def test_prop_protection_with_delete_and_unpermitted_read(self):
"""
Test protected property cannot be deleted without read permission
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:fake_role',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, 200)
self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers)
another_request = unit_test_utils.get_fake_request(
method='HEAD', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:admin'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
self.assertEqual('', output.body)
self.assertEqual(output.headers['x-image-meta-property-x_owner_foo'],
'bar')
def test_prop_protection_with_delete_and_unpermitted_delete(self):
"""
Test protected property cannot be deleted without delete permission
"""
image_id = self._create_admin_image(
{'x-image-meta-property-spl_update_prop': 'foo'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, 403)
self.assertIn("Property '%s' is protected" %
"spl_update_prop", output.body)
another_request = unit_test_utils.get_fake_request(
method='HEAD', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:admin'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
self.assertEqual('', output.body)
self.assertEqual(
output.headers['x-image-meta-property-spl_update_prop'], 'foo')
def test_read_protected_props_leak_with_update(self):
"""
Verify when updating props that ones we don't have read permission for
are not disclosed
"""
image_id = self._create_admin_image(
{'x-image-meta-property-spl_update_prop': '0',
'x-image-meta-property-foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_update_prop': '1',
'X-Glance-Registry-Purge-Props': 'False'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['spl_update_prop'], '1')
self.assertNotIn('foo', res_body['properties'])
def test_update_protected_props_mix_no_read(self):
"""
Create an image with two props - one only readable by admin, and one
readable/updatable by member. Verify member can sucessfully update
their property while the admin owned one is ignored transparently
"""
image_id = self._create_admin_image(
{'x-image-meta-property-admin_foo': 'bar',
'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'x-image-meta-property-x_owner_foo': 'baz'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['x_owner_foo'], 'baz')
self.assertNotIn('admin_foo', res_body['properties'])
def test_update_protected_props_mix_read(self):
"""
Create an image with two props - one readable/updatable by admin, but
also readable by spl_role. The other is readable/updatable by
spl_role. Verify spl_role can successfully update their property but
not the admin owned one
"""
custom_props = {
'x-image-meta-property-spl_read_only_prop': '1',
'x-image-meta-property-spl_update_prop': '2'
}
image_id = self._create_admin_image(custom_props)
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
# verify spl_role can update it's prop
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_read_only_prop': '1',
'x-image-meta-property-spl_update_prop': '1'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(output.status_int, 200)
self.assertEqual(res_body['properties']['spl_read_only_prop'], '1')
self.assertEqual(res_body['properties']['spl_update_prop'], '1')
# verify spl_role can not update admin controlled prop
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_read_only_prop': '2',
'x-image-meta-property-spl_update_prop': '1'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 403)
def test_delete_protected_props_mix_no_read(self):
"""
Create an image with two props - one only readable by admin, and one
readable/deletable by member. Verify member can sucessfully delete
their property while the admin owned one is ignored transparently
"""
image_id = self._create_admin_image(
{'x-image-meta-property-admin_foo': 'bar',
'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertNotIn('x_owner_foo', res_body['properties'])
self.assertNotIn('admin_foo', res_body['properties'])
def test_delete_protected_props_mix_read(self):
"""
Create an image with two props - one readable/deletable by admin, but
also readable by spl_role. The other is readable/deletable by
spl_role. Verify spl_role is forbidden to purge_props in this scenario
without retaining the readable prop.
"""
custom_props = {
'x-image-meta-property-spl_read_only_prop': '1',
'x-image-meta-property-spl_delete_prop': '2'
}
image_id = self._create_admin_image(custom_props)
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 403)
| apache-2.0 | -5,985,045,226,187,337,000 | 39.847893 | 79 | 0.570034 | false |
Sid1057/obstacle_detector | sample_extra_plane_transformer.py | 1 | 1461 | #!/usr/bin/python3
import cv2
import numpy as np
from obstacle_detector.perspective import inv_persp_new
from obstacle_detector.distance_calculator import spline_dist
def video_test(input_video_path=None):
cx = 603
cy = 297
roi_width = 25
roi_length = 90
px_height_of_roi_length = 352
#int(
# spline_dist.get_rails_px_height_by_distance(roi_length))
#print(px_height_of_roi_length)
cap = cv2.VideoCapture(
input_video_path \
if input_video_path is not None \
else input('enter video path: '))
ret, frame = cap.read()
while(ret):
ret, frame = cap.read()
transformed_plane, pts1, M = inv_persp_new(
frame, (cx, cy), (roi_width, roi_length),
px_height_of_roi_length, 200)
extra_transformed_plane, pts1, M = inv_persp_new(
frame, (cx, cy), (roi_width, roi_length),
px_height_of_roi_length, 200,
extra_width=200 * 2)
cv2.imshow(
'plane of the way',
transformed_plane)
cv2.imshow(
'plane',
extra_transformed_plane)
cv2.imshow(
'original frame',
frame)
k = cv2.waitKey(1) & 0xff
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('screen.png', extra_transformed_plane)
cap.release()
cv2.destroyAllWindows()
video_test('../../video/6.mp4')
| mit | 90,747,233,213,776,740 | 23.35 | 65 | 0.557153 | false |
zzjkf2009/Midterm_Astar | opencv/platforms/ios/build_framework.py | 1 | 10978 | #!/usr/bin/env python
"""
The script builds OpenCV.framework for iOS.
The built framework is universal, it can be used to build app and run it on either iOS simulator or real device.
Usage:
./build_framework.py <outputdir>
By cmake conventions (and especially if you work with OpenCV repository),
the output dir should not be a subdirectory of OpenCV source tree.
Script will create <outputdir>, if it's missing, and a few its subdirectories:
<outputdir>
build/
iPhoneOS-*/
[cmake-generated build tree for an iOS device target]
iPhoneSimulator-*/
[cmake-generated build tree for iOS simulator]
opencv2.framework/
[the framework content]
The script should handle minor OpenCV updates efficiently
- it does not recompile the library from scratch each time.
However, opencv2.framework directory is erased and recreated on each run.
Adding --dynamic parameter will build opencv2.framework as App Store dynamic framework. Only iOS 8+ versions are supported.
"""
from __future__ import print_function
import glob, re, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing
from subprocess import check_call, check_output, CalledProcessError
def execute(cmd, cwd = None):
print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr)
retcode = check_call(cmd, cwd = cwd)
if retcode != 0:
raise Exception("Child returned:", retcode)
def getXCodeMajor():
ret = check_output(["xcodebuild", "-version"])
m = re.match(r'XCode\s+(\d)\..*', ret, flags=re.IGNORECASE)
if m:
return int(m.group(1))
return 0
class Builder:
def __init__(self, opencv, contrib, dynamic, bitcodedisabled, exclude, targets):
self.opencv = os.path.abspath(opencv)
self.contrib = None
if contrib:
modpath = os.path.join(contrib, "modules")
if os.path.isdir(modpath):
self.contrib = os.path.abspath(modpath)
else:
print("Note: contrib repository is bad - modules subfolder not found", file=sys.stderr)
self.dynamic = dynamic
self.bitcodedisabled = bitcodedisabled
self.exclude = exclude
self.targets = targets
def getBD(self, parent, t):
if len(t[0]) == 1:
res = os.path.join(parent, 'build-%s-%s' % (t[0][0].lower(), t[1].lower()))
else:
res = os.path.join(parent, 'build-%s' % t[1].lower())
if not os.path.isdir(res):
os.makedirs(res)
return os.path.abspath(res)
def _build(self, outdir):
outdir = os.path.abspath(outdir)
if not os.path.isdir(outdir):
os.makedirs(outdir)
mainWD = os.path.join(outdir, "build")
dirs = []
xcode_ver = getXCodeMajor()
if self.dynamic:
alltargets = self.targets
else:
# if we are building a static library, we must build each architecture separately
alltargets = []
for t in self.targets:
for at in t[0]:
current = ( [at], t[1] )
alltargets.append(current)
for t in alltargets:
mainBD = self.getBD(mainWD, t)
dirs.append(mainBD)
cmake_flags = []
if self.contrib:
cmake_flags.append("-DOPENCV_EXTRA_MODULES_PATH=%s" % self.contrib)
if xcode_ver >= 7 and t[1] == 'iPhoneOS' and self.bitcodedisabled == False:
cmake_flags.append("-DCMAKE_C_FLAGS=-fembed-bitcode")
cmake_flags.append("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
self.buildOne(t[0], t[1], mainBD, cmake_flags)
if self.dynamic == False:
self.mergeLibs(mainBD)
self.makeFramework(outdir, dirs)
def build(self, outdir):
try:
self._build(outdir)
except Exception as e:
print("="*60, file=sys.stderr)
print("ERROR: %s" % e, file=sys.stderr)
print("="*60, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.exit(1)
def getToolchain(self, arch, target):
return None
def getCMakeArgs(self, arch, target):
args = [
"cmake",
"-GXcode",
"-DAPPLE_FRAMEWORK=ON",
"-DCMAKE_INSTALL_PREFIX=install",
"-DCMAKE_BUILD_TYPE=Release",
] + ([
"-DBUILD_SHARED_LIBS=ON",
"-DCMAKE_MACOSX_BUNDLE=ON",
"-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED=NO",
] if self.dynamic else [])
if len(self.exclude) > 0:
args += ["-DBUILD_opencv_world=OFF"] if not self.dynamic else []
args += ["-DBUILD_opencv_%s=OFF" % m for m in self.exclude]
return args
def getBuildCommand(self, archs, target):
buildcmd = [
"xcodebuild",
]
if self.dynamic:
buildcmd += [
"IPHONEOS_DEPLOYMENT_TARGET=8.0",
"ONLY_ACTIVE_ARCH=NO",
]
for arch in archs:
buildcmd.append("-arch")
buildcmd.append(arch.lower())
else:
arch = ";".join(archs)
buildcmd += [
"IPHONEOS_DEPLOYMENT_TARGET=6.0",
"ARCHS=%s" % arch,
]
buildcmd += [
"-sdk", target.lower(),
"-configuration", "Release",
"-parallelizeTargets",
"-jobs", str(multiprocessing.cpu_count()),
] + (["-target","ALL_BUILD"] if self.dynamic else [])
return buildcmd
def getInfoPlist(self, builddirs):
return os.path.join(builddirs[0], "ios", "Info.plist")
def buildOne(self, arch, target, builddir, cmakeargs = []):
# Run cmake
toolchain = self.getToolchain(arch, target)
cmakecmd = self.getCMakeArgs(arch, target) + \
(["-DCMAKE_TOOLCHAIN_FILE=%s" % toolchain] if toolchain is not None else [])
if target.lower().startswith("iphoneos"):
cmakecmd.append("-DENABLE_NEON=ON")
cmakecmd.append(self.opencv)
cmakecmd.extend(cmakeargs)
execute(cmakecmd, cwd = builddir)
# Clean and build
clean_dir = os.path.join(builddir, "install")
if os.path.isdir(clean_dir):
shutil.rmtree(clean_dir)
buildcmd = self.getBuildCommand(arch, target)
execute(buildcmd + ["-target", "ALL_BUILD", "build"], cwd = builddir)
execute(["cmake", "-P", "cmake_install.cmake"], cwd = builddir)
def mergeLibs(self, builddir):
res = os.path.join(builddir, "lib", "Release", "libopencv_merged.a")
libs = glob.glob(os.path.join(builddir, "install", "lib", "*.a"))
libs3 = glob.glob(os.path.join(builddir, "install", "share", "OpenCV", "3rdparty", "lib", "*.a"))
print("Merging libraries:\n\t%s" % "\n\t".join(libs + libs3), file=sys.stderr)
execute(["libtool", "-static", "-o", res] + libs + libs3)
def makeFramework(self, outdir, builddirs):
name = "opencv2"
# set the current dir to the dst root
framework_dir = os.path.join(outdir, "%s.framework" % name)
if os.path.isdir(framework_dir):
shutil.rmtree(framework_dir)
os.makedirs(framework_dir)
if self.dynamic:
dstdir = framework_dir
libname = "opencv2.framework/opencv2"
else:
dstdir = os.path.join(framework_dir, "Versions", "A")
libname = "libopencv_merged.a"
# copy headers from one of build folders
shutil.copytree(os.path.join(builddirs[0], "install", "include", "opencv2"), os.path.join(dstdir, "Headers"))
# make universal static lib
libs = [os.path.join(d, "lib", "Release", libname) for d in builddirs]
lipocmd = ["lipo", "-create"]
lipocmd.extend(libs)
lipocmd.extend(["-o", os.path.join(dstdir, name)])
print("Creating universal library from:\n\t%s" % "\n\t".join(libs), file=sys.stderr)
execute(lipocmd)
# dynamic framework has different structure, just copy the Plist directly
if self.dynamic:
resdir = dstdir
shutil.copyfile(self.getInfoPlist(builddirs), os.path.join(resdir, "Info.plist"))
else:
# copy Info.plist
resdir = os.path.join(dstdir, "Resources")
os.makedirs(resdir)
shutil.copyfile(self.getInfoPlist(builddirs), os.path.join(resdir, "Info.plist"))
# make symbolic links
links = [
(["A"], ["Versions", "Current"]),
(["Versions", "Current", "Headers"], ["Headers"]),
(["Versions", "Current", "Resources"], ["Resources"]),
(["Versions", "Current", name], [name])
]
for l in links:
s = os.path.join(*l[0])
d = os.path.join(framework_dir, *l[1])
os.symlink(s, d)
class iOSBuilder(Builder):
def getToolchain(self, arch, target):
toolchain = os.path.join(self.opencv, "platforms", "ios", "cmake", "Toolchains", "Toolchain-%s_Xcode.cmake" % target)
return toolchain
def getCMakeArgs(self, arch, target):
arch = ";".join(arch)
args = Builder.getCMakeArgs(self, arch, target)
args = args + [
'-DIOS_ARCH=%s' % arch
]
return args
if __name__ == "__main__":
folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for iOS.')
parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework')
parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)')
parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)')
parser.add_argument('--disable-bitcode', default=False, dest='bitcodedisabled', action='store_true', help='disable bitcode (enabled by default)')
args = parser.parse_args()
b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without,
[
(["armv7", "arm64"], "iPhoneOS"),
] if os.environ.get('BUILD_PRECOMMIT', None) else
[
(["armv7", "armv7s", "arm64"], "iPhoneOS"),
(["i386", "x86_64"], "iPhoneSimulator"),
])
b.build(args.out)
| mit | -2,232,820,552,741,659,100 | 37.250871 | 159 | 0.578338 | false |
arteria/django-ar-organizations | organizations/backends/tokens.py | 1 | 1567 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.crypto import constant_time_compare
from django.utils.http import base36_to_int
REGISTRATION_TIMEOUT_DAYS = getattr(settings, 'REGISTRATION_TIMEOUT_DAYS', 15)
class RegistrationTokenGenerator(PasswordResetTokenGenerator):
"""
Very similar to the password reset token generator, but should
allow slightly greater time for timeout, so it only updates one
method, replacing PASSWORD_RESET_TIMEOUT_DAYS from the global
settings with REGISTRATION_TIMEOUT_DAYS from application
settings.
Has the additional interface method:
-- make_token(user): Returns a token that can be used once to do a
password reset for the given user.
"""
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > REGISTRATION_TIMEOUT_DAYS:
return False
return True
| bsd-2-clause | -1,714,996,012,051,425,000 | 31.645833 | 87 | 0.653478 | false |
mrcodehang/cqut-chat-server | configs/__init__.py | 1 | 1131 |
configs = {}
configs['app_key'] = 'cbe36a100c9977c74c296a6777e920ec'
configs['enviroment'] = 'development'
configs['appid'] = '12353'
configs['content'] = '【CQUT-CHAT】您的验证码是:'
def save_config(key, value):
configs[key] = value
def get_config(key):
return configs.get(key)
username_invalid = { 'code': 1, 'msg': '用户名长度在4到16之间, 不能有空格, 引号' }
password_invalid = { 'code': 2, 'msg': '密码长度在6到16之间, 不能有引号' }
username_unique = { 'code': 3, 'msg': '用户名不能重复' }
mob_number_unique = { 'code': 4, 'msg': '手机号不能重复' }
mob_number_invalid = { 'code': 5, 'msg': '手机号格式不合法' }
vcode_invalid = { 'code': 6, 'msg': '验证码错误, 重新发送验证码' }
account_is_none = { 'code': 7, 'msg': '登陆账号(手机号/用户名)不能为空' }
token_invalid = { 'code':8, 'msg': 'token不正确或者token已过期' }
__all__ = [save_config, get_config,
username_invalid, password_invalid,
username_unique, mob_number_unique,
vcode_invalid, account_is_none, token_invalid
] | mpl-2.0 | 8,151,344,901,222,302,000 | 31.551724 | 66 | 0.625663 | false |
DigiThinkIT/stem | test/integ/process.py | 1 | 4793 | """
Tests the stem.process functions with various use cases.
"""
import shutil
import subprocess
import tempfile
import time
import unittest
import stem.prereq
import stem.process
import stem.socket
import stem.util.system
import stem.version
import test.runner
try:
# added in python 3.3
from unittest.mock import patch
except ImportError:
from mock import patch
class TestProcess(unittest.TestCase):
def setUp(self):
self.data_directory = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.data_directory)
def test_launch_tor_with_config(self):
"""
Exercises launch_tor_with_config.
"""
if test.runner.only_run_once(self, 'test_launch_tor_with_config'):
return
# Launch tor without a torrc, but with a control port. Confirms that this
# works by checking that we're still able to access the new instance.
runner = test.runner.get_runner()
tor_process = stem.process.launch_tor_with_config(
tor_cmd = runner.get_tor_command(),
config = {
'SocksPort': '2777',
'ControlPort': '2778',
'DataDirectory': self.data_directory,
},
completion_percent = 5
)
control_socket = None
try:
control_socket = stem.socket.ControlPort(port = 2778)
stem.connection.authenticate(control_socket, chroot_path = runner.get_chroot())
# exercises the socket
control_socket.send('GETCONF ControlPort')
getconf_response = control_socket.recv()
self.assertEquals('ControlPort=2778', str(getconf_response))
finally:
if control_socket:
control_socket.close()
tor_process.kill()
tor_process.wait()
def test_launch_tor_with_timeout(self):
"""
Runs launch_tor where it times out before completing.
"""
if test.runner.only_run_once(self, 'test_launch_tor_with_timeout'):
return
runner = test.runner.get_runner()
start_time = time.time()
config = {'SocksPort': '2777', 'DataDirectory': self.data_directory}
self.assertRaises(OSError, stem.process.launch_tor_with_config, config, runner.get_tor_command(), 100, None, 2)
runtime = time.time() - start_time
if not (runtime > 2 and runtime < 3):
self.fail('Test should have taken 2-3 seconds, took %i instead' % runtime)
@patch('os.getpid')
def test_take_ownership_via_pid(self, getpid_mock):
"""
Checks that the tor process quits after we do if we set take_ownership. To
test this we spawn a process and trick tor into thinking that it is us.
"""
if not stem.util.system.is_available('sleep'):
test.runner.skip(self, "('sleep' command is unavailable)")
return
elif test.runner.only_run_once(self, 'test_take_ownership_via_pid'):
return
elif test.runner.require_version(self, stem.version.Requirement.TAKEOWNERSHIP):
return
sleep_process = subprocess.Popen(['sleep', '60'])
getpid_mock.return_value = str(sleep_process.pid)
tor_process = stem.process.launch_tor_with_config(
tor_cmd = test.runner.get_runner().get_tor_command(),
config = {
'SocksPort': '2777',
'ControlPort': '2778',
'DataDirectory': self.data_directory,
},
completion_percent = 5,
take_ownership = True,
)
# Kill the sleep command. Tor should quit shortly after.
sleep_process.kill()
sleep_process.communicate()
# tor polls for the process every fifteen seconds so this may take a
# while...
for seconds_waited in xrange(30):
if tor_process.poll() == 0:
return # tor exited
time.sleep(1)
self.fail("tor didn't quit after the process that owned it terminated")
def test_take_ownership_via_controller(self):
"""
Checks that the tor process quits after the controller that owns it
connects, then disconnects..
"""
if test.runner.only_run_once(self, 'test_take_ownership_via_controller'):
return
elif test.runner.require_version(self, stem.version.Requirement.TAKEOWNERSHIP):
return
tor_process = stem.process.launch_tor_with_config(
tor_cmd = test.runner.get_runner().get_tor_command(),
config = {
'SocksPort': '2777',
'ControlPort': '2778',
'DataDirectory': self.data_directory,
},
completion_percent = 5,
take_ownership = True,
)
# We're the controlling process. Just need to connect then disconnect.
controller = stem.control.Controller.from_port(port = 2778)
controller.authenticate()
controller.close()
# give tor a few seconds to quit
for seconds_waited in xrange(5):
if tor_process.poll() == 0:
return # tor exited
time.sleep(1)
self.fail("tor didn't quit after the controller that owned it disconnected")
| lgpl-3.0 | -1,561,313,229,695,803,600 | 27.700599 | 115 | 0.662007 | false |
josdaza/deep-toolbox | TensorFlow/seq2seq/Main_simple.py | 1 | 4953 | import numpy as np
import tensorflow as tf
import helpers
tf.reset_default_graph()
PAD = 0
EOS = 1
VOCAB_SIZE = 10
EMBEDDINGS_SIZE = 20
ENC_HIDDEN_UNITS = 20
DEC_HIDDEN_UNITS = 20
# Fake Function to Emulate a series of encoder and decoder sequences
# Given encoder_inputs [5, 6, 7], decoder_targets would be [5, 6, 7, 1],
# where 1 is for EOS, and decoder_inputs would be [1, 5, 6, 7]
# decoder_inputs are lagged by 1 step, passing previous token as input at current step.
def next_feed():
batch = next(batches)
encoder_inputs_, _ = helpers.batch(batch)
decoder_targets_, _ = helpers.batch(
[(sequence) + [EOS] for sequence in batch]
)
decoder_inputs_, _ = helpers.batch(
[[EOS] + (sequence) for sequence in batch]
)
return {
encoder_inputs: encoder_inputs_,
decoder_inputs: decoder_inputs_,
decoder_targets: decoder_targets_,
}
if __name__ == "__main__":
sess = tf.InteractiveSession()
# Loading Data [Toy example]
batch_ = [[6], [3, 4], [9, 8, 7]]
batch_, batch_length_ = helpers.batch(batch_)
print('batch_encoded:\n' + str(batch_))
din_, dlen_ = helpers.batch(np.ones(shape=(3, 1), dtype=np.int32),max_sequence_length=4)
print('decoder inputs:\n' + str(din_))
# Random Initialization of Embeddings --> here we share them for Encoder and Decoder (could be different)
embeddings = tf.Variable(tf.random_uniform([VOCAB_SIZE, EMBEDDINGS_SIZE], -1.0, 1.0), dtype=tf.float32)
# ----- ENCODER -----
encoder_inputs = tf.placeholder(shape=(None, None),
dtype=tf.int32, name='encoder_inputs')# [encoder_max_time, batch_size]
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
encoder_cell = tf.contrib.rnn.LSTMCell(ENC_HIDDEN_UNITS)
# RNN
encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(
encoder_cell, encoder_inputs_embedded,
dtype=tf.float32, time_major=True)
# We are only interested in the Encoder Final State (Thought Vector!),
# to feed (condition) the decoder with it
del encoder_outputs
# ----- DECODER -----
decoder_targets = tf.placeholder(shape=(None, None),
dtype=tf.int32, name='decoder_targets') # [decoder_max_time, batch_size]
decoder_inputs = tf.placeholder(shape=(None, None),
dtype=tf.int32, name='decoder_inputs') # [decoder_max_time, batch_size]
decoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, decoder_inputs)
decoder_cell = tf.contrib.rnn.LSTMCell(DEC_HIDDEN_UNITS)
# RNN
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(
decoder_cell, decoder_inputs_embedded,
initial_state=encoder_final_state,
dtype=tf.float32, time_major=True, scope="plain_decoder")
# Projection Layer [max_time, batch_size, hidden_units] --> [max_time, batch_size, VOCAB_SIZE]
decoder_logits = tf.contrib.layers.linear(decoder_outputs, VOCAB_SIZE) # shape=[?, ?, VOCAB_SIZE]
decoder_prediction = tf.argmax(decoder_logits, axis=2) # Predict Output over vocabulary
# ----- LOSS & OPTIMIZATION -----
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=VOCAB_SIZE, dtype=tf.float32),
logits=decoder_logits)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
# Test Forward Pass [To check if Everything is wired correctly!]
pred_ = sess.run(decoder_prediction,
feed_dict={
encoder_inputs: batch_,
decoder_inputs: din_,
})
print('decoder predictions:\n' + str(pred_))
# ----- TRAINING -----
batch_size = 100
batches = helpers.random_sequences(length_from=3, length_to=8,
vocab_lower=2, vocab_upper=10,
batch_size=batch_size)
print('head of the batch:')
for seq in next(batches)[:10]:
print(seq)
loss_track = []
max_batches = 3001
batches_in_epoch = 1000
try:
for batch in range(max_batches):
fd = next_feed()
_, l = sess.run([train_op, loss], fd)
loss_track.append(l)
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
print(' minibatch loss: {}'.format(sess.run(loss, fd)))
predict_ = sess.run(decoder_prediction, fd)
for i, (inp, pred) in enumerate(zip(fd[encoder_inputs].T, predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
if i >= 2:
break
print()
except KeyboardInterrupt:
print('training interrupted')
| mit | -3,960,815,499,132,609,000 | 38.943548 | 109 | 0.608924 | false |
jaklinger/nesta_dataflow | collect_data/utils/uae/business_directory/dcc.py | 1 | 3272 | '''
dcc
----
'''
from bs4 import BeautifulSoup
import logging
import requests
import time
# Local imports
from utils.common.browser import SelfClosingBrowser
from utils.common.datapipeline import DataPipeline
def get_field_from_box(field, box):
''''''
for row in box.find("ul"):
# Accept rows containing spans
try:
spans = row.find_all("span")
except AttributeError:
continue
# Match the first span to the field name
if spans[0].text != field:
continue
# Return the field data
return spans[1].text
raise ValueError("Could not find field "+field)
def get_response_from_url(url, max_tries=3):
'''
Returns response if no ConnectionError exception
'''
n_tries = 0
while True:
n_tries += 1
# Try to get the URL
try:
r = requests.get(url)
return r
# Allow connection error, then retry
except requests.exceptions.ConnectionError as err:
if n_tries == max_tries:
raise err
logging.warning("Connection error to %s", (url))
time.sleep(10)
def run(config):
''''''
# Fire up a browser at the top url
top_url = config["parameters"]["src"]
cat_pages = {}
with SelfClosingBrowser(top_url=top_url) as b:
# Scrape pages until no page found
found_page = True
while found_page:
print("Found page")
# Get the category web pages
html_list = b.find_element_by_class_name("dcci_cat")
list_items = html_list.find_elements_by_tag_name("li")
for item in list_items:
link = item.find_element_by_tag_name("a")
cat_pages[link.text] = link.get_attribute('href')
# Click the next page and get the table
found_page = b.find_and_click_link("Next »")
# Process each category's URL to find companies
data = {}
for cat, url in cat_pages.items():
r = get_response_from_url(url)
# No bad statuses
if r.status_code != 200:
continue
# Loop over text boxes in the soup
soup = BeautifulSoup(r.text, "lxml")
boxes = soup.find_all("div", class_="result_box")
for box in boxes:
# Get the company name
title_box = box.find("div", class_="title")
title_link = title_box.find("a")
company_name = title_link.text
# Get the website
company_url = get_field_from_box("Website", box)
city = get_field_from_box("City", box)
# Check whether this URL has been processed before
if company_name not in data:
data[company_name] = dict(category=[cat], url=company_url,
city=city, company_name=company_name)
else:
data[company_name]["category"].append(cat)
logging.info("\tGot %s rows", len(data))
# Write data
logging.info("\tWriting to table")
with DataPipeline(config) as dp:
for _,row in data.items():
row["category"] = ";".join(row["category"]) # Pretty hacky
dp.insert(row)
| mit | 3,339,632,406,249,094,700 | 31.386139 | 79 | 0.558239 | false |
vasily-v-ryabov/pywinauto-64 | pywinauto/unittests/test_HwndWrapper.py | 1 | 19988 | # encoding: utf-8
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from __future__ import print_function
from __future__ import unicode_literals
"Tests for HwndWrapper"
import time
import pprint
import pdb
import warnings
import ctypes
import locale
import sys
sys.path.append(".")
from pywinauto.application import Application
from pywinauto.controls.HwndWrapper import HwndWrapper
from pywinauto import win32structures, win32defines
from pywinauto.findwindows import WindowNotFoundError
from pywinauto.sysinfo import is_x64_Python, is_x64_OS
__revision__ = "$Revision: 234 $"
try:
from pywinauto.controls.HwndWrapper import *
except ImportError:
# allow it to be imported in a dev environment
import sys
pywinauto_imp = "\\".join(__file__.split('\\')[:-3])
print("sdfdsf", pywinauto_imp)
sys.path.append(pywinauto_imp)
from pywinauto.controls.HwndWrapper import *
import unittest
class HwndWrapperTests(unittest.TestCase):
"Unit tests for the TreeViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
self.app = Application()
if is_x64_Python() or not is_x64_OS():
self.app.start_(r"C:\Windows\System32\calc.exe")
else:
self.app.start_(r"C:\Windows\SysWOW64\calc.exe")
self.dlg = self.app.Calculator
self.dlg.MenuSelect('View->Scientific\tAlt+2')
self.ctrl = HwndWrapper(self.dlg.Button2.handle) # Backspace
def tearDown(self):
"Close the application after tests"
# close the application
#self.dlg.TypeKeys("%{F4}")
#self.dlg.Close()
self.app.kill_()
def testInvalidHandle(self):
"Test that an exception is raised with an invalid window handle"
self.assertRaises(InvalidWindowHandle, HwndWrapper, -1)
#def testText(self):
# "Test getting the window Text of the dialog"
# self.assertEquals(self.dlg.WindowText(), "Untitled - Notepad")
def testFriendlyClassName(self):
"Test getting the friendly classname of the dialog"
self.assertEquals(self.ctrl.FriendlyClassName(), "Button")
def testClass(self):
"Test getting the classname of the dialog"
self.assertEquals(self.ctrl.Class(), "Button")
def testWindowText(self):
"Test getting the window Text of the dialog"
self.assertEquals(self.ctrl.WindowText(), '\uf013') #"Backspace")
def testStyle(self):
self.dlg.Style()
self.assertEquals(self.ctrl.Style(),
win32defines.WS_CHILD |
win32defines.WS_VISIBLE |
win32defines.BS_PUSHBUTTON |
win32defines.BS_TEXT)
def testExStyle(self):
self.assertEquals(self.ctrl.ExStyle(),
win32defines.WS_EX_NOPARENTNOTIFY |
win32defines.WS_EX_LEFT |
win32defines.WS_EX_LTRREADING |
win32defines.WS_EX_RIGHTSCROLLBAR)
"""self.assertEquals(self.dlg.ExStyle(),
win32defines.WS_EX_WINDOWEDGE |
win32defines.WS_EX_LEFT |
win32defines.WS_EX_LTRREADING |
win32defines.WS_EX_RIGHTSCROLLBAR |
win32defines.WS_EX_CONTROLPARENT |
win32defines.WS_EX_APPWINDOW)"""
def testControlID(self):
self.assertEquals(self.ctrl.ControlID(), 83)
self.dlg.ControlID()
def testUserData(self):
self.ctrl.UserData()
self.dlg.UserData()
def testContextHelpID(self):
self.ctrl.ContextHelpID()
self.dlg.ContextHelpID()
def testIsVisible(self):
self.assertEqual(self.ctrl.IsVisible(), True)
self.assertEqual(self.dlg.IsVisible(), True)
def testIsUnicode(self):
self.assertEqual(self.ctrl.IsUnicode(), True)
self.assertEqual(self.dlg.IsUnicode(), True)
def testIsEnabled(self):
self.assertEqual(self.ctrl.IsEnabled(), True)
self.assertEqual(self.dlg.IsEnabled(), True)
self.assertEqual(self.dlg.ChildWindow(
title = '%', enabled_only = False).IsEnabled(), False)
def testCloseClick_bug(self):
self.dlg.MenuSelect('Help->About Calculator')
self.app.AboutCalculator.CloseButton.CloseClick()
Timings.closeclick_dialog_close_wait = .5
try:
self.app.AboutCalculator.CloseClick()
except timings.TimeoutError:
pass
self.app.AboutCalculator.Close()
#self.assertEquals(self.app.StatisticsBox.Exists(), False)
def testRectangle(self):
"Test getting the rectangle of the dialog"
rect = self.dlg.Rectangle()
self.assertNotEqual(rect.top, None)
self.assertNotEqual(rect.left, None)
self.assertNotEqual(rect.bottom, None)
self.assertNotEqual(rect.right, None)
self.assertEqual(rect.height(), 310)
self.assertEqual(rect.width(), 413)
def testClientRect(self):
rect = self.dlg.Rectangle()
cli = self.dlg.ClientRect()
self.assertEqual(cli.left , 0)
self.assertEqual(cli.top , 0)
assert(cli.width() < rect.width())
assert(cli.height() < rect.height())
def testFont(self):
self.assertNotEqual(self.dlg.Font(), self.ctrl.Font())
def ProcessID(self):
self.assertEqual(self.ctrl.ProcessID(), self.dlg.ProcessID)
self.assertNotEqual(self.ctrl.ProcessID(), 0)
def testHasStyle(self):
self.assertEqual(self.ctrl.HasStyle(win32defines.WS_CHILD), True)
self.assertEqual(self.dlg.HasStyle(win32defines.WS_CHILD), False)
self.assertEqual(self.ctrl.HasStyle(win32defines.WS_SYSMENU), False)
self.assertEqual(self.dlg.HasStyle(win32defines.WS_SYSMENU), True)
def testHasExStyle(self):
self.assertEqual(self.ctrl.HasExStyle(win32defines.WS_EX_NOPARENTNOTIFY), True)
self.assertEqual(self.dlg.HasExStyle(win32defines.WS_EX_NOPARENTNOTIFY), False)
self.assertEqual(self.ctrl.HasExStyle(win32defines.WS_EX_APPWINDOW), False)
#self.assertEqual(self.dlg.HasExStyle(win32defines.WS_EX_APPWINDOW), True)
def testIsDialog(self):
self.assertEqual(self.ctrl.IsDialog(), False)
self.assertEqual(self.dlg.IsDialog(), True)
def testMenuItems(self):
self.assertEqual(self.ctrl.MenuItems(), [])
self.assertEqual(self.dlg.MenuItems()[1]['Text'], '&Edit')
def testParent(self):
self.assertEqual(self.ctrl.Parent().Parent().Parent(), self.dlg.handle)
def testTopLevelParent(self):
self.assertEqual(self.ctrl.TopLevelParent(), self.dlg.handle)
self.assertEqual(self.dlg.TopLevelParent(), self.dlg.handle)
def testTexts(self):
self.assertEqual(self.dlg.Texts(), ['Calculator'])
self.assertEqual(self.ctrl.Texts(), ['\uf013']) #u'Backspace'])
self.assertEqual(self.dlg.ChildWindow(class_name='Static', ctrl_index=5).Texts(), ['0'])
def testClientRects(self):
self.assertEqual(self.ctrl.ClientRects()[0], self.ctrl.ClientRect())
self.assertEqual(self.dlg.ClientRects()[0], self.dlg.ClientRect())
def testFonts(self):
self.assertEqual(self.ctrl.Fonts()[0], self.ctrl.Font())
self.assertEqual(self.dlg.Fonts()[0], self.dlg.Font())
def testChildren(self):
self.assertEqual(self.ctrl.Children(), [])
self.assertNotEqual(self.dlg.Children(), [])
def testIsChild(self):
self.assertEqual(self.ctrl.IsChild(self.dlg.WrapperObject()), True)
self.assertEqual(self.dlg.IsChild(self.ctrl), False)
def testSendMessage(self):
vk = self.dlg.SendMessage(win32defines.WM_GETDLGCODE)
self.assertEqual(0, vk)
code = self.dlg.Inv.SendMessage(win32defines.WM_GETDLGCODE)
self.assertEqual(0, vk)
def testSendMessageTimeout(self):
vk = self.dlg.SendMessageTimeout(win32defines.WM_GETDLGCODE)
self.assertEqual(0, vk)
code = self.dlg.Inv.SendMessageTimeout(win32defines.WM_GETDLGCODE)
self.assertEqual(0, vk)
def testPostMessage(self):
self.assertNotEquals(0, self.dlg.PostMessage(win32defines.WM_PAINT))
self.assertNotEquals(0, self.dlg.Inv.PostMessage(win32defines.WM_PAINT))
# def testNotifyMenuSelect(self):
# "Call NotifyMenuSelect to ensure it does not raise"
# self.ctrl.NotifyMenuSelect(1234)
# self.dlg.NotifyMenuSelect(1234)
def testNotifyParent(self):
"Call NotifyParent to ensure it does not raise"
self.ctrl.NotifyParent(1234)
#self.dlg.NotifyParent(1234)
def testGetProperties(self):
"Test getting the properties for the HwndWrapped control"
props = self.dlg.GetProperties()
self.assertEquals(
self.dlg.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.dlg.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.dlg, prop_name)(), props[prop_name])
# def testCaptureAsImage(self):
# pass
def testEquals(self):
self.assertNotEqual(self.ctrl, self.dlg.handle)
self.assertEqual(self.ctrl, self.ctrl.handle)
self.assertEqual(self.ctrl, self.ctrl)
# def testVerifyActionable(self):
# self.assertRaises()
# def testVerifyEnabled(self):
# self.assertRaises()
# def testVerifyVisible(self):
# self.assertRaises()
def testMoveWindow_same(self):
"Test calling movewindow without any parameters"
prevRect = self.dlg.Rectangle()
self.dlg.MoveWindow()
self.assertEquals(prevRect, self.dlg.Rectangle())
def testMoveWindow(self):
"Test moving the window"
dlgClientRect = self.dlg.Rectangle() #.ClientAreaRect()
prev_rect = self.ctrl.Rectangle() - dlgClientRect
new_rect = win32structures.RECT(prev_rect)
new_rect.left -= 1
new_rect.top -= 1
new_rect.right += 2
new_rect.bottom += 2
self.ctrl.MoveWindow(
new_rect.left,
new_rect.top,
new_rect.width(),
new_rect.height(),
)
time.sleep(0.1)
print('prev_rect = ', prev_rect)
print('new_rect = ', new_rect)
print('dlgClientRect = ', dlgClientRect)
print('self.ctrl.Rectangle() = ', self.ctrl.Rectangle())
self.assertEquals(
self.ctrl.Rectangle(),
new_rect + dlgClientRect)
self.ctrl.MoveWindow(prev_rect)
self.assertEquals(
self.ctrl.Rectangle(),
prev_rect + dlgClientRect)
def testMaximize(self):
self.dlg.Maximize()
self.assertEquals(self.dlg.GetShowState(), win32defines.SW_SHOWMAXIMIZED)
self.dlg.Restore()
def testMinimize(self):
self.dlg.Minimize()
self.assertEquals(self.dlg.GetShowState(), win32defines.SW_SHOWMINIMIZED)
self.dlg.Restore()
def testRestore(self):
self.dlg.Maximize()
self.dlg.Restore()
self.assertEquals(self.dlg.GetShowState(), win32defines.SW_SHOWNORMAL)
self.dlg.Minimize()
self.dlg.Restore()
self.assertEquals(self.dlg.GetShowState(), win32defines.SW_SHOWNORMAL)
def testGetFocus(self):
self.assertNotEqual(self.dlg.GetFocus(), None)
self.assertEqual(self.dlg.GetFocus(), self.ctrl.GetFocus())
self.dlg.Radians.SetFocus()
self.assertEqual(self.dlg.GetFocus(), self.dlg.Radians.handle)
def testSetFocus(self):
self.assertNotEqual(self.dlg.GetFocus(), self.dlg.Radians.handle)
self.dlg.Radians.SetFocus()
self.assertEqual(self.dlg.GetFocus(), self.dlg.Radians.handle)
def testMenuSelect(self):
"Test selecting a menu item"
if not self.dlg.MenuItem("View -> Digit grouping").IsChecked():
self.dlg.MenuSelect("View -> Digit grouping")
self.dlg.TypeKeys("1234567")
self.dlg.MenuSelect("Edit->Copy\tCtrl+C")
self.dlg.CE.Click()
self.assertEquals(self.dlg.ChildWindow(class_name='Static', ctrl_index=5).Texts()[0], "0")
self.dlg.MenuSelect("Edit->Paste\tCtrl+V")
self.assertEquals(self.dlg.ChildWindow(class_name='Static', ctrl_index=5).Texts()[0], "1 234 567")
def testClose(self):
"Test the Close() method of windows"
# open about dialog
self.dlg.MenuSelect('Help->About Calculator')
# make sure it is open and visible
self.assertTrue(self.app.Window_(title='About Calculator').IsVisible(), True)
# close it
self.app.Window_(title='About Calculator', class_name='#32770').Close(1)
# make sure that it is not visible
try:
#self.assertRaises(WindowNotFoundError, self.app.Window_(title='About Calculator', class_name='#32770').WrapperObject())
# vvryabov: TimeoutError is caught by assertRaises, so the second raise is not caught correctly
self.app.Window_(title='About Calculator', class_name='#32770').WrapperObject()
except WindowNotFoundError:
print('WindowNotFoundError exception is raised as expected. OK.')
# make sure the main calculator dialog is still open
self.assertEquals(self.dlg.IsVisible(), True)
class HwndWrapperMouseTests(unittest.TestCase):
"Unit tests for mouse actions of the HwndWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
self.app = Application()
if is_x64_Python() or not is_x64_OS():
self.app.start_(r"C:\Windows\System32\notepad.exe")
else:
self.app.start_(r"C:\Windows\SysWOW64\notepad.exe")
# Get the old font
self.app.UntitledNotepad.MenuSelect("Format->Font")
self.old_font = self.app.Font.FontComboBox.SelectedIndex()
self.old_font_style = self.app.Font.FontStyleCombo.SelectedIndex()
# ensure we have the correct settings for this test
self.app.Font.FontStyleCombo.Select(0)
self.app.Font.FontComboBox.Select("Lucida Console")
self.app.Font.OK.Click()
self.dlg = self.app.Window_(title='Untitled - Notepad', class_name='Notepad')
self.ctrl = HwndWrapper(self.dlg.Edit.handle)
self.dlg.edit.SetEditText("Here is some text\r\n and some more")
def tearDown(self):
"Close the application after tests"
# Set the old font again
self.app.UntitledNotepad.MenuSelect("Format->Font")
self.app.Font.FontComboBox.Select(self.old_font)
self.app.Font.FontStyleCombo.Select(self.old_font_style)
self.app.Font.OK.Click()
self.app.Font.WaitNot('visible')
# close the application
try:
self.dlg.Close(0.5)
if self.app.Notepad["Do&n't Save"].Exists():
self.app.Notepad["Do&n't Save"].Click()
self.app.Notepad["Do&n't Save"].WaitNot('visible')
except: # timings.TimeoutError:
pass
finally:
self.app.kill_()
#def testText(self):
# "Test getting the window Text of the dialog"
# self.assertEquals(self.dlg.WindowText(), "Untitled - Notepad")
def testClick(self):
self.ctrl.Click(coords = (50, 10))
self.assertEquals(self.dlg.Edit.SelectionIndices(), (6,6))
def testClickInput(self):
self.ctrl.ClickInput(coords = (50, 10))
self.assertEquals(self.dlg.Edit.SelectionIndices(), (6,6))
def testDoubleClick(self):
self.ctrl.DoubleClick(coords = (60, 30))
self.assertEquals(self.dlg.Edit.SelectionIndices(), (24,29))
def testDoubleClickInput(self):
self.ctrl.DoubleClickInput(coords = (60, 30))
self.assertEquals(self.dlg.Edit.SelectionIndices(), (24,29))
def testMenuSelectNotepad_bug(self):
"In notepad - MenuSelect Edit->Paste did not work"
text = b'Here are some unicode characters \xef\xfc\r\n'
app2 = Application.start("notepad")
app2.UntitledNotepad.Edit.SetEditText(text)
app2.UntitledNotepad.MenuSelect("Edit->Select All")
app2.UntitledNotepad.MenuSelect("Edit->Copy")
self.dlg.MenuSelect("Edit->Select All")
self.dlg.MenuSelect("Edit->Paste")
self.dlg.MenuSelect("Edit->Paste")
self.dlg.MenuSelect("Edit->Paste")
app2.UntitledNotepad.MenuSelect("File->Exit")
app2.Window_(title='Notepad', class_name='#32770')["Don't save"].Click()
self.assertEquals(self.dlg.Edit.TextBlock().encode(locale.getpreferredencoding()), text*3)
#
# def testRightClick(self):
# pass
#
# def testPressMouse(self):
# pass
#
# def testReleaseMouse(self):
# pass
#
# def testMoveMouse(self):
# pass
#
# def testDragMouse(self):
# pass
#
# def testSetWindowText(self):
# pass
#
# def testTypeKeys(self):
# pass
#
# def testDebugMessage(self):
# pass
#
# def testDrawOutline(self):
# pass
#
class GetDialogPropsFromHandleTest(unittest.TestCase):
"Unit tests for mouse actions of the HwndWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
self.app = Application()
if is_x64_Python() or not is_x64_OS():
self.app.start_(r"C:\Windows\System32\notepad.exe")
else:
self.app.start_(r"C:\Windows\SysWOW64\notepad.exe")
self.dlg = self.app.UntitledNotepad
self.ctrl = HwndWrapper(self.dlg.Edit.handle)
def tearDown(self):
"Close the application after tests"
# close the application
#self.dlg.TypeKeys("%{F4}")
self.dlg.Close(0.5)
self.app.kill_()
def test_GetDialogPropsFromHandle(self):
"Test some small stuff regarding GetDialogPropsFromHandle"
props_from_handle = GetDialogPropsFromHandle(self.dlg.handle)
props_from_dialog = GetDialogPropsFromHandle(self.dlg)
props_from_ctrl = GetDialogPropsFromHandle(self.ctrl)
self.assertEquals(props_from_handle, props_from_dialog)
##====================================================================
#def _unittests():
# "do some basic testing"
# from pywinauto.findwindows import find_windows
# import sys
#
# if len(sys.argv) < 2:
# handle = win32functions.GetDesktopWindow()
# else:
# try:
# handle = int(eval(sys.argv[1]))
#
# except ValueError:
#
# handle = find_windows(
# title_re = "^" + sys.argv[1],
# class_name = "#32770",
# visible_only = False)
#
# if not handle:
# print "dialog not found"
# sys.exit()
#
# props = GetDialogPropsFromHandle(handle)
# print len(props)
# #pprint(GetDialogPropsFromHandle(handle))
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 | -8,459,652,665,241,181,000 | 31.183575 | 132 | 0.641449 | false |
ge0rgi/cinder | cinder/volume/drivers/netapp/dataontap/fc_cmode.py | 1 | 5379 | # Copyright (c) - 2014, Clinton Knight. All rights reserved.
# Copyright (c) - 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.zonemanager import utils as fczm_utils
@interface.volumedriver
class NetAppCmodeFibreChannelDriver(driver.BaseVD,
driver.ManageableVD):
"""NetApp C-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION
def __init__(self, *args, **kwargs):
super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
@fczm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_fc(volume, connector)
@fczm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_fc(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_consistencygroup(self, context, group):
return self.library.create_consistencygroup(group)
def delete_consistencygroup(self, context, group, volumes):
return self.library.delete_consistencygroup(group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return self.library.update_consistencygroup(group, add_volumes=None,
remove_volumes=None)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.delete_cgsnapshot(cgsnapshot, snapshots)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
return self.library.create_consistencygroup_from_src(
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
source_cg=source_cg, source_vols=source_vols)
def failover_host(self, context, volumes, secondary_id=None):
return self.library.failover_host(
context, volumes, secondary_id=secondary_id)
| apache-2.0 | -4,857,598,255,072,615,000 | 38.844444 | 78 | 0.671314 | false |
StackStorm/mistral | mistral/tests/unit/engine/test_task_publish.py | 1 | 2807 | # Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workbooks as wb_service
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
SIMPLE_WORKBOOK = """
---
version: '2.0'
name: wb1
workflows:
wf1:
type: direct
tasks:
t1:
action: std.echo output="Task 1"
publish:
v1: <% $.t1.get($foobar) %>
on-success:
- t2
t2:
action: std.echo output="Task 2"
on-success:
- t3
t3:
action: std.echo output="Task 3"
"""
class TaskPublishTest(base.EngineTestCase):
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(
side_effect=[
'Task 1', # Mock task1 success.
'Task 2', # Mock task2 success.
'Task 3' # Mock task3 success.
]
)
)
def test_publish_failure(self):
wb_service.create_workbook_v2(SIMPLE_WORKBOOK)
# Run workflow and fail task.
wf_ex = self.engine.start_workflow('wb1.wf1')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(states.ERROR, wf_ex.state)
self.assertEqual(1, len(task_execs))
task_1_ex = self._assert_single_item(task_execs, name='t1')
# Task 1 should have failed.
self.assertEqual(states.ERROR, task_1_ex.state)
self.assertIn('Can not evaluate YAQL expression', task_1_ex.state_info)
# Action execution of task 1 should have succeeded.
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id
)
self.assertEqual(1, len(task_1_action_exs))
self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
| apache-2.0 | -4,910,604,444,682,694,000 | 28.861702 | 79 | 0.625223 | false |
mlwithtf/mlwithtf | chapter_10/models.py | 1 | 1545 | import tensorflow as tf
def compute_loss(logits, labels):
labels = tf.squeeze(tf.cast(labels, tf.int32))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
cross_entropy_loss= tf.reduce_mean(cross_entropy)
reg_loss = tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
return cross_entropy_loss + reg_loss, cross_entropy_loss, reg_loss
def compute_accuracy(logits, labels):
labels = tf.squeeze(tf.cast(labels, tf.int32))
batch_predictions = tf.cast(tf.argmax(logits, 1), tf.int32)
predicted_correctly = tf.equal(batch_predictions, labels)
accuracy = tf.reduce_mean(tf.cast(predicted_correctly, tf.float32))
return accuracy
def get_learning_rate(global_step, initial_value, decay_steps, decay_rate):
learning_rate = tf.train.exponential_decay(initial_value, global_step, decay_steps, decay_rate, staircase=True)
return learning_rate
def train(total_loss, learning_rate, global_step):
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step)
return train_op
def average_gradients(gradients):
average_grads = []
for grad_and_vars in zip(*gradients):
grads = []
for g, _ in grad_and_vars:
grads.append(tf.expand_dims(g, 0))
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| apache-2.0 | -2,445,733,855,455,899,000 | 32.586957 | 115 | 0.68932 | false |
devilry/devilry-django | devilry/devilry_admin/cradminextensions/listfilter/listfilter_relateduser.py | 1 | 3399 | from django.conf import settings
from django.db import models
from django.db.models.functions import Lower, Concat
from django.utils.translation import gettext_lazy, pgettext_lazy
from cradmin_legacy.viewhelpers import listfilter
from cradmin_legacy.viewhelpers.listfilter.basefilters.single import abstractselect
from devilry.devilry_admin.cradminextensions.listfilter import listfilter_tags
class OrderRelatedStudentsFilter(listfilter.django.single.select.AbstractOrderBy):
def get_ordering_options(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
shortname_ascending_label = gettext_lazy('Email')
shortname_descending_label = gettext_lazy('Email descending')
else:
shortname_ascending_label = gettext_lazy('Username')
shortname_descending_label = gettext_lazy('Username descending')
# NOTE: We use Concat below to get sorting that works even when the user
# does not have a fullname, and we use Lower to sort ignoring case.
return [
('', {
'label': gettext_lazy('Name'),
'order_by': [Lower(Concat(
'user__fullname', 'user__shortname', output_field=models.CharField()))],
}),
('name_descending', {
'label': gettext_lazy('Name descending'),
'order_by': [Lower(Concat(
'user__fullname', 'user__shortname', output_field=models.CharField())).desc()],
}),
('lastname_ascending', {
'label': gettext_lazy('Last name'),
'order_by': [Lower('user__lastname')],
}),
('lastname_descending', {
'label': gettext_lazy('Last name descending'),
'order_by': [Lower('user__lastname').desc()],
}),
('shortname_ascending', {
'label': shortname_ascending_label,
'order_by': ['user__shortname'],
}),
('shortname_descending', {
'label': shortname_descending_label,
'order_by': ['-user__shortname'],
}),
]
def get_slug(self):
return 'orderby'
def get_label(self):
return pgettext_lazy('orderby', 'Sort')
class IsActiveFilter(listfilter.django.single.select.Boolean):
def get_slug(self):
return 'active'
def get_label(self):
return pgettext_lazy('listfilter relateduser', 'Is active?')
class Search(listfilter.django.single.textinput.Search):
def get_modelfields(self):
return [
'user__fullname',
'user__shortname',
'periodtag__tag',
]
def get_label_is_screenreader_only(self):
return True
def get_slug(self):
return 'search'
def get_label(self):
return gettext_lazy('Search')
def get_placeholder(self):
return gettext_lazy('Search listed objects ...')
class TagSelectFilter(listfilter_tags.AbstractTagSelectFilter):
def filter(self, queryobject):
cleaned_value = self.get_cleaned_value() or ''
if cleaned_value == self.get_notag_value():
queryobject = queryobject.filter(periodtag__isnull=True)
elif cleaned_value != '':
queryobject = queryobject.filter(periodtag__id=cleaned_value)
return queryobject
| bsd-3-clause | -3,571,475,830,365,769,700 | 35.159574 | 99 | 0.597823 | false |
tonyqiu1019/BulletAPI | models/api/models.py | 1 | 1264 | from django.db import models
# the bullet object
class Bullet(models.Model):
# required fields
content = models.CharField(max_length=512)
ret_time = models.DateTimeField(blank=True, null=True)
post_time = models.DateTimeField(blank=True, null=True)
# optional fields
info = models.ForeignKey('Info', on_delete=models.SET_NULL,
blank=True, null=True)
color = models.CharField(max_length=6, blank=True, default="ffffff")
font_size = models.PositiveSmallIntegerField(blank=True, default=12)
num_repeat = models.PositiveSmallIntegerField(blank=True, default=1)
display_mode = models.CharField(max_length=1, blank=True, choices=(
('f', 'fixed'),
('s', 'scroll'),
), default='s')
def __unicode__(self):
ret = self.content[:10]+'...' if len(self.content) > 13 else self.content
return u'%s' % (ret,)
class Meta:
ordering = ['ret_time', 'post_time']
# the user info about a bullet
class Info(models.Model):
fingerprint = models.CharField(max_length=64, unique=True)
user_agent = models.CharField(max_length=1024, blank=True)
is_banned = models.BooleanField(blank=True, default=False)
def __unicode__(self):
return u'%s' % (self.fingerprint,)
| mit | 3,125,209,969,669,040,000 | 35.114286 | 81 | 0.662184 | false |
siko/xyz-get | src/ext/ipnli.py | 1 | 2052 | #!/usr/bin/env python
# -- coding:utf-8 --
import os, re, sys, time, datetime
import urllib,urllib2,threading,requests
from bs4 import BeautifulSoup
downurls = []
threads=[]
f=open('log.txt', 'w+')
class downloader(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.info=[]
def run(self):
for i in self.info:
print '下载 %s\r\n' % i['url']
try:
# urllib.urlretrieve(i['url'], i['name'].decode('utf8'))
urllib.urlretrieve(i['url'], i['name'])
except Exception as e:
f.write('url:' + i['url'] + '\r\n' + str(e) + '\r\n')
def createurls(channel,begin,end):
# print channel
page = requests.get(channel)
soup = BeautifulSoup(page.text)
articles = soup.findAll('article')[begin:end]
for art in articles:
filename = art.find('h1').find('a').contents[2].replace(' ','').replace('\n','')
audiourl = channel+art.find('a',class_='button fa-download')['href']
downurls.append([filename,audiourl])
def downfiles():
i=0
for g in downurls:
name=g[0] + ".mp3"
path=g[1]
print 'name=',name,'path=',path
if i%6==0:
t=downloader()
threads.append(t)
t.info.append({'url':path, 'name':name})
i=i+1
if __name__ == '__main__':
channel = int(input('将从「IT 公论」(http://ipn.li/)下载,请选择节目- 1,IT公论 2,内核恐慌 3,太医来了 4,味之道 :'))
channels = {
1: 'itgonglun/',
2: 'kernelpanic/',
3: 'taiyilaile/',
4: 'weizhidao/',
}
channelurl = 'http://ipn.li/'+channels.get(channel,'itgonglun/')
begin = int(input('请输入开始的期数:'))
end = int(input('请输入结束的期数:'))
createurls(channelurl,begin,end)
downfiles()
print 'threads length is : %d' % len(threads)
for t in threads:
t.start()
time.sleep(1)
f.flush()
for t in threads:
t.join() | mit | -2,164,372,479,213,248,500 | 25.808219 | 91 | 0.542434 | false |
z23han/Wrangling-MongoDB | Lesson_5_Analyzing_Data/14-Using_push/push.py | 1 | 2297 | #!/usr/bin/env python
"""
$push is similar to $addToSet. The difference is that rather than accumulating only unique values
it aggregates all values into an array.
Using an aggregation query, count the number of tweets for each user. In the same $group stage,
use $push to accumulate all the tweet texts for each user. Limit your output to the 5 users
with the most tweets.
Your result documents should include only the fields:
"_id" (screen name of user),
"count" (number of tweets found for the user),
"tweet_texts" (a list of the tweet texts found for the user).
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used in
examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
pipeline = [
{"$group": {"_id": "$user.screen_name",
"count": {"$sum": 1},
"tweet_texts": {
"$push": "$text"
}}},
{"$sort": {"count": -1}},
{"$limit": 5}
]
return pipeline
def aggregate(db, pipeline):
result = db.tweets.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
#assert len(result["result"]) == 5
#assert result["result"][0]["count"] > result["result"][4]["count"]
import pprint
pprint.pprint(result)
| agpl-3.0 | -3,282,531,151,248,877,000 | 38.603448 | 101 | 0.684806 | false |
sorenh/python-django-greenfan | greenfan/management/commands/provision-users.py | 1 | 1260 | #
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import urlparse
from subprocess import Popen
from time import sleep, time
from django.core.management.base import BaseCommand
from django.template import Context, Template
from fabric.api import env as fabric_env
from fabric.api import run, local, sudo, put
from greenfan import utils
from greenfan.models import Configuration, Job, Server
def run_cmd(args):
proc = Popen(args)
return proc.communicate()
class Command(BaseCommand):
def handle(self, job_id, **options):
job = Job.objects.get(id=job_id)
job.redirect_output()
job.provision_users()
| apache-2.0 | 8,509,709,529,045,808,000 | 29 | 76 | 0.734127 | false |
flyte/xbee-helper | setup.py | 1 | 1972 | # -*- coding: utf-8 -*-
"""
Setuptools script for the xbee-helper project.
"""
import os
from textwrap import fill, dedent
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
def required(fname):
return open(
os.path.join(
os.path.dirname(__file__), fname
)
).read().split('\n')
setup(
name="xbee-helper",
version="0.0.7",
packages=find_packages(
exclude=[
"*.tests",
"*.tests.*",
"tests.*",
"tests",
"*.ez_setup",
"*.ez_setup.*",
"ez_setup.*",
"ez_setup",
"*.examples",
"*.examples.*",
"examples.*",
"examples"
]
),
scripts=[],
entry_points={},
include_package_data=True,
setup_requires='pytest-runner',
tests_require='pytest',
install_requires=required('requirements.txt'),
test_suite='pytest',
zip_safe=False,
# Metadata for upload to PyPI
author='Ellis Percival',
author_email="[email protected]",
description=fill(dedent("""\
This project offers a high level API to an XBee device running an
up-to-date version of the ZigBee firmware. It builds upon the existing
XBee project by abstracting more functionality into methods.
""")),
classifiers=[
"Programming Language :: Python",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Communications",
"Topic :: Home Automation",
"Topic :: Software Development :: Embedded Systems",
"Topic :: System :: Networking"
],
license="MIT",
keywords="",
url="https://github.com/flyte/xbee-helper"
)
| mit | -8,647,648,249,815,953,000 | 25.648649 | 78 | 0.567951 | false |
aaiijmrtt/WORDVECTORS | shiftreducer.py | 1 | 4286 | import neuralnet, numpy
SHIFT = -1
REDUCELEFT = 0
REDUCERIGHT = 1
def transition(stack, queue, arcs, dependencies):
if len(stack) < 2:
return (SHIFT, SHIFT, SHIFT)
for dependency in dependencies:
if stack[-1] == dependency[0] and stack[-2] == dependency[1]:
return dependency
for dependency in dependencies:
if stack[-2] == dependency[0] and stack[-1] == dependency[1]:
flag = True
for dependence in dependencies:
if dependence[0] == stack[-1] and dependence not in arcs:
flag = False
if flag:
return dependency
return (SHIFT, SHIFT, SHIFT)
def trainoracle(inputs, outputs, oracle, labels):
vectorizednonlinearity = neuralnet.VECTORIZEDNONLINEARITY
embeddingsize = inputs[0].shape[0]
stack = [inputs[0]]
queue = inputs[1: ]
stackindices = [0]
queueindices = range(1, len(inputs))
arcs = list()
ins = list()
outs = list()
while len(stack) > 1 or len(queue) > 0:
besttransition = transition(stackindices, queueindices, arcs, outputs)
if len(stack) > 1 and len(queue) > 0:
ins.append(numpy.concatenate([stack[-2], stack[-1], queue[0]]))
elif len(stack) > 1:
ins.append(numpy.concatenate([stack[-2], stack[-1], numpy.zeros((embeddingsize, 1), dtype = float)]))
else:
ins.append(numpy.concatenate([numpy.zeros((embeddingsize, 1), dtype = float), stack[-1], queue[0]]))
outs.append(numpy.zeros((2 * (labels + 1) + 1, 1), dtype = float))
outs[-1][besttransition[2]][0] = 1.0
if besttransition == (SHIFT, SHIFT, SHIFT):
stack.append(queue.pop(0))
stackindices.append(queueindices.pop(0))
else:
arcs.append(besttransition)
# stack[stackindices.index(besttransition[0])] = neuralnet.forwardpass(vectorin, oracle['weights'], oracle['biases'], vectorizednonlinearity)[1]
del stack[stackindices.index(besttransition[1])]
del stackindices[stackindices.index(besttransition[1])]
for i in range(len(inputs)):
oracle['weights'], oracle['biases'] = neuralnet.train(ins, outs, oracle['weights'], oracle['biases'], alpha = [0.05, 0.05], gamma = [0.5, 0.5], history = i, hiddeninitializer = [numpy.zeros((embeddingsize, 1), dtype = float), numpy.zeros((2 * labels + 3, 1), dtype = float)])
return oracle
def shiftreduce(inputs, oracle):
vectorizednonlinearity = neuralnet.VECTORIZEDNONLINEARITY
embeddingsize = inputs[0].shape[0]
classes = oracle['biases'][1].shape[0]
stack = [inputs[0]]
queue = inputs[1: ]
stackindices = [0]
queueindices = range(1, len(inputs))
arcs = list()
hidden = [numpy.zeros((embeddingsize, 1), dtype = float), numpy.zeros((classes, 1), dtype = float)]
while len(stack) > 1 or len(queue) > 0:
bestscore = float("-inf")
besttransition = None
bestcombination = None
bestlabel = None
if len(stack) > 1:
if len(queue) > 0:
vectorin = numpy.concatenate([stack[-2], stack[-1], queue[0]])
else:
vectorin = numpy.concatenate([stack[-2], stack[-1], numpy.zeros((embeddingsize, 1), dtype = float)])
activations = neuralnet.forwardpass(vectorin, oracle['weights'], oracle['biases'], vectorizednonlinearity, hidden)
if numpy.max(activations[-1][0: -1]) > bestscore:
bestscore = numpy.max(activations[2][0: -1])
bestcombination = activations[1]
bestlabel = numpy.argmax(activations[2][0: -1])
besttransition = REDUCELEFT if bestlabel < classes // 2 else REDUCERIGHT
besthidden = activations[1: ]
if len(queue) > 0:
if len(stack) > 1:
vectorin = numpy.concatenate([stack[-2], stack[-1], queue[0]])
else:
vectorin = numpy.concatenate([numpy.zeros((embeddingsize, 1), dtype = float), stack[-1], queue[0]])
activations = neuralnet.forwardpass(vectorin, oracle['weights'], oracle['biases'], vectorizednonlinearity, hidden)
if activations[-1][-1][0] > bestscore:
bestscore = activations[2][-1][0]
bestcombination = None
bestlabel = SHIFT
besttransition = SHIFT
besthidden = activations[1: ]
hidden = besthidden
if besttransition == SHIFT:
stack.append(queue.pop(0))
stackindices.append(queueindices.pop(0))
else:
arcs.append((stackindices[-1 - besttransition] + 1, stackindices[-2 + besttransition] + 1, bestlabel))
del stack[-2 + besttransition]
del stackindices[-2 + besttransition]
# stack[-1] = bestcombination
arcs.append((0, stackindices[0] + 1, REDUCERIGHT))
return arcs
| mit | 8,538,488,397,566,068,000 | 40.211538 | 277 | 0.685254 | false |
jittat/ku-eng-direct-admission | scripts/import_final_results.py | 1 | 1966 | import codecs
import sys
if len(sys.argv)!=2:
print "Usage: import_final_results [results.csv]"
quit()
file_name = sys.argv[1]
from django.conf import settings
from django_bootstrap import bootstrap
bootstrap(__file__)
from result.models import AdmissionResult
from application.models import Applicant, SubmissionInfo, PersonalInfo, Major
applicants = []
def read_results():
f = codecs.open(file_name, encoding="utf-8", mode="r")
lines = f.readlines()
for l in lines[1:]:
items = l.strip().split(',')
app = {'national_id': items[0],
'major': items[2] }
applicants.append(app)
def standardize_major_number(major):
return ('0' * (3 - len(major))) + major
def import_results():
print 'Importing results...'
majors = Major.get_all_majors()
major_dict = dict([(m.number, m) for m in majors])
not_found_list = []
app_order = 1
for a in applicants:
personal_infos = (PersonalInfo.objects
.filter(national_id=a['national_id'])
.select_related(depth=1))
if len(personal_infos)==0:
print "NOT-FOUND:", a['national_id']
not_found_list.append(a['national_id'])
continue
for pinfo in personal_infos:
applicant = pinfo.applicant
try:
aresult = applicant.admission_result
except:
aresult = AdmissionResult.new_for_applicant(applicant)
major_number = standardize_major_number(a['major'])
major = major_dict[major_number]
aresult.is_final_admitted = True
aresult.final_admitted_major = major
aresult.save()
print a['national_id']
print '-------NOT-FOUND-------'
for nid in not_found_list:
print nid
def main():
read_results()
import_results()
if __name__ == '__main__':
main()
| agpl-3.0 | 2,766,383,835,464,029,000 | 24.532468 | 77 | 0.573245 | false |
glaslos/waechter | samples/hello_job_lock.py | 1 | 1122 | # Waechter - Job Scheduling Helper
# Copyright (C) 2016 Lukas Rist
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import sleep
import waechter.scheduler
class HelloJobLock(waechter.scheduler.BaseJob):
def __init__(self, interval=None):
super(HelloJobLock, self).__init__(interval)
self.interval = interval if interval else 1
@classmethod
def work(cls):
print('hello work lock')
sleep(1.5)
if __name__ == '__main__':
main_worker = waechter.scheduler.JobScheduler().run()
| gpl-3.0 | -2,088,577,196,151,638 | 32 | 71 | 0.720143 | false |
lesion/indicator-stickynotes | stickynotes/backend.py | 1 | 8331 | # Copyright © 2012-2015 Umang Varma <[email protected]>
#
# This file is part of indicator-stickynotes.
#
# indicator-stickynotes is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# indicator-stickynotes is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# indicator-stickynotes. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
import uuid
import json
from os.path import expanduser
from stickynotes.info import FALLBACK_PROPERTIES
class Note:
def __init__(self, content=None, gui_class=None, noteset=None,
category=None):
self.gui_class = gui_class
self.noteset = noteset
content = content or {}
self.uuid = content.get('uuid')
self.body = content.get('body','')
self.title = content.get('title','')
self.properties = content.get("properties", {})
self.category = category or content.get("cat", "")
if not self.category in self.noteset.categories:
self.category = ""
last_modified = content.get('last_modified')
if last_modified:
self.last_modified = datetime.strptime(last_modified,
"%Y-%m-%dT%H:%M:%S")
else:
self.last_modified = datetime.now()
# Don't create GUI until show is called
self.gui = None
def extract(self):
if not self.uuid:
self.uuid = str(uuid.uuid4())
if self.gui != None:
self.gui.update_note()
self.properties = self.gui.properties()
return {"uuid":self.uuid, "body":self.body,
"last_modified":self.last_modified.strftime(
"%Y-%m-%dT%H:%M:%S"), "properties":self.properties,
"cat": self.category, "title": self.title}
def update(self,body=None,title=None):
if not body == None:
self.body = body
self.last_modified = datetime.now()
if not title == None:
self.title = title
self.last_modified = datetime.now()
def delete(self):
self.noteset.notes.remove(self)
self.noteset.save()
del self
def show(self, *args, **kwargs):
# If GUI has not been created, create it now
if self.gui == None:
self.gui = self.gui_class(note=self)
else:
self.gui.show(*args, **kwargs)
def hide(self):
if self.gui != None:
self.gui.hide()
def set_locked_state(self, locked):
# if gui hasn't been initialized, just change the property
if self.gui == None:
self.properties["locked"] = locked
else:
self.gui.set_locked_state(locked)
def cat_prop(self, prop):
"""Gets a property of the note's category"""
return self.noteset.get_category_property(self.category, prop)
class NoteSet:
def __init__(self, gui_class, data_file, indicator):
self.notes = []
self.properties = {}
self.categories = {}
self.gui_class = gui_class
self.data_file = data_file
self.indicator = indicator
def _loads_updater(self, dnoteset):
"""Parses old versions of the Notes structure and updates them"""
return dnoteset
def loads(self, snoteset):
"""Loads notes into their respective objects"""
notes = self._loads_updater(json.loads(snoteset))
self.properties = notes.get("properties", {})
self.categories = notes.get("categories", {})
self.notes = [Note(note, gui_class=self.gui_class, noteset=self)
for note in notes.get("notes",[])]
def dumps(self):
return json.dumps({"notes":[x.extract() for x in self.notes],
"properties": self.properties, "categories": self.categories})
def save(self, path=''):
output = self.dumps()
with open(path or expanduser(self.data_file),
mode='w', encoding='utf-8') as fsock:
fsock.write(output)
def open(self, path=''):
with open(path or expanduser(self.data_file),
encoding='utf-8') as fsock:
self.loads(fsock.read())
def load_fresh(self):
"""Load empty data"""
self.loads('{}')
self.new()
def merge(self, data):
"""Update notes based on new data"""
jdata = self._loads_updater(json.loads(data))
self.hideall()
# update categories
if "categories" in jdata:
self.categories.update(jdata["categories"])
# make a dictionary of notes so we can modify existing notes
dnotes = {n.uuid : n for n in self.notes}
for newnote in jdata.get("notes", []):
if "uuid" in newnote and newnote["uuid"] in dnotes:
# Update notes that are already in the noteset
orignote = dnotes[newnote["uuid"]]
# make sure it's an 'Update'
if datetime.strptime(newnote["last_modified"], \
"%Y-%m-%dT%H:%M:%S") > orignote.last_modified:
if "body" in newnote:
orignote.body = newnote["body"]
if "properties" in newnote:
orignote.properties = newnote["properties"]
if "cat" in newnote:
orignote.category = newnote["cat"]
else:
# otherwise create a new note
if "uuid" in newnote:
uuid = newnote["uuid"]
else:
uuid = str(uuid.uuid4())
dnotes[uuid] = Note(newnote, gui_class=self.gui_class,
noteset=self)
# copy notes over from dictionary to list
self.notes = list(dnotes.values())
self.showall(reload_from_backend=True)
def find_category(self, name=""):
# return cid of the first matched category
if name:
try: cid = (cat for cat in self.categories if \
self.categories[cat]["name"] == name).__next__()
# not found
except Exception: cid = None
else:
cid = None
return cid
def new(self, notebody='', category=''):
"""Creates a new note and adds it to the note set"""
cid = self.find_category(name=category)
if category and not cid:
cid = str(uuid.uuid4())
self.categories[cid]={'name':category}
note = Note(gui_class=self.gui_class, noteset=self,
category=cid)
note.body=notebody
note.set_locked_state(not not notebody)
self.notes.append(note)
self.gui_class and note.show() # show if created with gui
return note
def showall(self, *args, **kwargs):
for note in self.notes:
note.show(*args, **kwargs)
self.properties["all_visible"] = True
def hideall(self, *args):
self.save()
for note in self.notes:
note.hide(*args)
self.properties["all_visible"] = False
def get_category_property(self, cat, prop):
"""Get a property of a category or the default"""
if ((not cat) or (not cat in self.categories)) and \
self.properties.get("default_cat", None):
cat = self.properties["default_cat"]
cat_data = self.categories.get(cat, {})
if prop in cat_data:
return cat_data[prop]
# Otherwise, use fallback categories
if prop in FALLBACK_PROPERTIES:
return FALLBACK_PROPERTIES[prop]
else:
raise ValueError("Unknown property")
class dGUI:
"""Dummy GUI"""
def __init__(self, *args, **kwargs):
pass
def show(self):
pass
def hide(self):
pass
def update_note(self):
pass
def properties(self):
return None
| gpl-3.0 | -2,976,877,124,460,908,000 | 34.751073 | 78 | 0.569868 | false |
vietdh85/vh-utility | script/hyip_stop.py | 1 | 1859 | import sys
import os.path
import urllib2
import re
from pyquery import PyQuery as pq
import common
def getId(url):
arr = url.split("/")
id = arr[len(arr) - 2]
return id
def getSiteUrl(urlRequest, monitor, rcbUrl):
result = ""
print("REQUEST: {0}".format(urlRequest))
try:
req = urllib2.urlopen(urlRequest, timeout=30)
url = req.geturl()
arr = url.split("/?")
arr1 = arr[0].split("//")
result = arr1[1].replace("www.", "")
result = result.split("/")[0]
except :
print("========== ERROR ===========")
#common.insertUnknowSite(rcbUrl, monitor)
return result
def getRcb(monitor):
print("hyip_stop.getRcb()")
rcb_url = "http://{0}/new".format(monitor)
d = pq(url=rcb_url)
list = d("a.joinnw")
siteList = []
for item in list:
obj = {}
obj['id'] = getId(item.get("href"))
if common.getSiteMonitorByRefSiteId(monitor, obj['id']) == None:
obj['siteRCBUrl'] = "http://{0}/details/aj/rcb/lid/{1}/".format(monitor, obj['id'])
obj['url'] = getSiteUrl(item.get("href"), monitor, obj['siteRCBUrl'])
obj['siteId'] = ""
if obj['url'] != '':
siteId = common.insertSite(obj)
obj['siteId'] = siteId
siteList.append(obj)
print("{0} - {1} - {2}".format(obj['id'], obj['url'], obj['siteId']))
for item in siteList:
common.insertSiteMonitor(item, monitor)
def checkPaid(siteUrl):
d = pq(url=siteUrl)
tables = d("#content2 table.listbody tr td:nth-child(6) center")
result = False
#print(tables)
for item in tables:
if re.search('paid', item.text_content(), re.IGNORECASE):
result = True
return result
def checkRcb(monitor):
siteMonitors = common.getSiteMonitor(monitor)
for item in siteMonitors:
print(item)
if item[2] == 0:
if checkPaid(item[1]):
common.setPaid(item[0])
def run():
MONITOR = "hyipstop.com"
getRcb(MONITOR)
#checkRcb(MONITOR)
| gpl-3.0 | -9,091,527,717,578,031,000 | 21.39759 | 86 | 0.629371 | false |
mscoutermarsh/exercism_coveralls | assignments/python/space-age/space_age_test.py | 1 | 1640 | try:
from space_age import SpaceAge
except ImportError:
raise SystemExit('Could not find space_age.py. Does it exist?')
import unittest
class SpaceAgeTest(unittest.TestCase):
def test_age_in_seconds(self):
age = SpaceAge(1e6)
self.assertEqual(1e6, age.seconds)
def test_age_in_earth_years(self):
age = SpaceAge(1e9)
self.assertEqual(31.69, age.on_earth())
def test_age_in_mercury_years(self):
age = SpaceAge(2134835688)
self.assertEqual(67.65, age.on_earth())
self.assertEqual(280.88, age.on_mercury())
def test_age_in_venus_years(self):
age = SpaceAge(189839836)
self.assertEqual(6.02, age.on_earth())
self.assertEqual(9.78, age.on_venus())
def test_age_on_mars(self):
age = SpaceAge(2329871239)
self.assertEqual(73.83, age.on_earth())
self.assertEqual(39.25, age.on_mars())
def test_age_on_jupiter(self):
age = SpaceAge(901876382)
self.assertEqual(28.58, age.on_earth())
self.assertEqual(2.41, age.on_jupiter())
def test_age_on_saturn(self):
age = SpaceAge(3e9)
self.assertEqual(95.06, age.on_earth())
self.assertEqual(3.23, age.on_saturn())
def test_age_on_uranus(self):
age = SpaceAge(3210123456)
self.assertEqual(101.72, age.on_earth())
self.assertEqual(1.21, age.on_uranus())
def test_age_on_neptune(self):
age = SpaceAge(8210123456)
self.assertEqual(260.16, age.on_earth())
self.assertEqual(1.58, age.on_neptune())
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -6,469,365,441,323,217,000 | 29.943396 | 67 | 0.615244 | false |
hulifox008/bitbake | lib/bb/msg.py | 1 | 5659 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'msg' implementation
Message handling infrastructure for bitbake
"""
# Copyright (C) 2006 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import logging
import collections
from itertools import groupby
import warnings
import bb
import bb.event
class BBLogFormatter(logging.Formatter):
"""Formatter which ensures that our 'plain' messages (logging.INFO + 1) are used as is"""
DEBUG3 = logging.DEBUG - 2
DEBUG2 = logging.DEBUG - 1
DEBUG = logging.DEBUG
VERBOSE = logging.INFO - 1
NOTE = logging.INFO
PLAIN = logging.INFO + 1
ERROR = logging.ERROR
WARNING = logging.WARNING
CRITICAL = logging.CRITICAL
levelnames = {
DEBUG3 : 'DEBUG',
DEBUG2 : 'DEBUG',
DEBUG : 'DEBUG',
VERBOSE: 'NOTE',
NOTE : 'NOTE',
PLAIN : '',
WARNING : 'WARNING',
ERROR : 'ERROR',
CRITICAL: 'ERROR',
}
def getLevelName(self, levelno):
try:
return self.levelnames[levelno]
except KeyError:
self.levelnames[levelno] = value = 'Level %d' % levelno
return value
def format(self, record):
record.levelname = self.getLevelName(record.levelno)
if record.levelno == self.PLAIN:
return record.getMessage()
else:
return logging.Formatter.format(self, record)
class Loggers(dict):
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
log = logging.getLogger("BitBake.%s" % domain._fields[key])
dict.__setitem__(self, key, log)
return log
class DebugLevel(dict):
def __getitem__(self, key):
if key == "default":
key = domain.Default
return get_debug_level(key)
def _NamedTuple(name, fields):
Tuple = collections.namedtuple(name, " ".join(fields))
return Tuple(*range(len(fields)))
domain = _NamedTuple("Domain", (
"Default",
"Build",
"Cache",
"Collection",
"Data",
"Depends",
"Fetcher",
"Parsing",
"PersistData",
"Provider",
"RunQueue",
"TaskData",
"Util"))
logger = logging.getLogger("BitBake")
loggers = Loggers()
debug_level = DebugLevel()
# Message control functions
#
def set_debug_level(level):
for log in loggers.itervalues():
log.setLevel(logging.NOTSET)
if level:
logger.setLevel(logging.DEBUG - level + 1)
else:
logger.setLevel(logging.INFO)
def get_debug_level(msgdomain = domain.Default):
if not msgdomain:
level = logger.getEffectiveLevel()
else:
level = loggers[msgdomain].getEffectiveLevel()
return max(0, logging.DEBUG - level + 1)
def set_verbose(level):
if level:
logger.setLevel(BBLogFormatter.VERBOSE)
else:
logger.setLevel(BBLogFormatter.INFO)
def set_debug_domains(domainargs):
for (domainarg, iterator) in groupby(domainargs):
for index, msgdomain in enumerate(domain._fields):
if msgdomain == domainarg:
level = len(tuple(iterator))
if level:
loggers[index].setLevel(logging.DEBUG - level + 1)
break
else:
warn(None, "Logging domain %s is not valid, ignoring" % domainarg)
#
# Message handling functions
#
def debug(level, msgdomain, msg):
warnings.warn("bb.msg.debug is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
level = logging.DEBUG - (level - 1)
if not msgdomain:
logger.debug(level, msg)
else:
loggers[msgdomain].debug(level, msg)
def plain(msg):
warnings.warn("bb.msg.plain is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
logger.plain(msg)
def note(level, msgdomain, msg):
warnings.warn("bb.msg.note is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
if level > 1:
if msgdomain:
logger.verbose(msg)
else:
loggers[msgdomain].verbose(msg)
else:
if msgdomain:
logger.info(msg)
else:
loggers[msgdomain].info(msg)
def warn(msgdomain, msg):
warnings.warn("bb.msg.warn is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
if not msgdomain:
logger.warn(msg)
else:
loggers[msgdomain].warn(msg)
def error(msgdomain, msg):
warnings.warn("bb.msg.error is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
if not msgdomain:
logger.error(msg)
else:
loggers[msgdomain].error(msg)
def fatal(msgdomain, msg):
if not msgdomain:
logger.critical(msg)
else:
loggers[msgdomain].critical(msg)
sys.exit(1)
| gpl-2.0 | -3,374,870,527,087,330,000 | 27.580808 | 93 | 0.626436 | false |
retux/google-finance-storage | quotes-exporter.py | 1 | 1531 | #!/usr/bin/env python
'''
quotes-exporter: exposes quotes as prometheus exporter api
Usage: quotes-exporter.py <port>
'''
import sys
from prometheus_client import start_http_server, Metric, REGISTRY
from stockwatch import *
def cat_to_string (Symbols):
strSymbols = ' '
o = 0
for i in Symbols:
if o == 0:
strSymbols = i
else:
strSymbols = strSymbols + ',' + i
o += 1
return strSymbols
class QuoteCollector(object):
def __init__(self):
self._endpoint = ''
def collect(self):
Symbols = [ 'GOOG', 'CSCO', 'BABA', 'APPL', 'IBM', 'GLOB' ]
#Symbols = [ 'GOOG' ]
strSymbols = cat_to_string(Symbols)
JSp = GoogleFinanceAPI()
if JSp.get(strSymbols):
#JSp.Quotes2Stdout() # // Show a little data, just for testing
JSp.JsonQot2Obj()
metric = Metric('stock_quotes', 'stock quotes last price', 'gauge')
for quote in JSp.QuotesList:
# Convert quotes to metric
metric.add_sample('stock_quotes', value=float(quote.Last), labels={'symbol': quote.Symbol})
yield metric
def main():
"""
Symbols list contain a list of pairs which describes stock symbols as used by Google API.
Each element should be 'EXCHANGE:SYMBOL' examples:
[ 'NASDAQ:GOOG', 'NASDAQ:CSCO', 'NYSE:IBM', 'BCBA:YPFD' ]
"""
start_http_server(int(sys.argv[1]))
REGISTRY.register(QuoteCollector())
while True: time.sleep(1)
if __name__ == "__main__":
main()
| gpl-2.0 | 4,092,831,948,622,307,300 | 25.396552 | 103 | 0.601568 | false |
moserand/crosswater | crosswater/catchment_model/convert_hdf.py | 1 | 2967 | """Convert the HDF5 from one big table into a table with one group with
one table per time step.
"""
import tables
from crosswater.read_config import read_config
from crosswater.tools.time_helper import ProgressDisplay
def make_index(table):
"""Create a completely sorted index (CSI) for `timestep`.
"""
col = table.cols.timestep
if not col.index or not col.index.is_csi:
if col.is_indexed:
print('removing old index')
table.cols.timestep.remove_index()
print('indexing')
indexrows = table.cols.timestep.create_csindex()
print('indexed {} rows'.format(indexrows))
def count_ids(id_col):
"""Count number of unique IDs.
"""
ids = set()
for id_ in id_col:
ids.add(id_)
return len(ids)
def convert(in_file_name, out_file_name, batch_size=2, total=365 * 24):
"""Convert on gigantic table into one per timesstep.
"""
prog = ProgressDisplay(total)
filters = tables.Filters(complevel=5, complib='zlib')
in_file = tables.open_file(in_file_name, mode='a')
table = in_file.get_node('/output')
make_index(table)
nrows = table.nrows # pylint: disable=no-member
nids = count_ids(table.cols.catchment) # pylint: disable=no-member
assert nrows == total * nids
out_file = tables.open_file(out_file_name, mode='w')
start = 0
stop = nids
read_start = 0
read_stop = nids * batch_size
for step in range(total):
prog.show_progress(step + 1)
if step % batch_size == 0:
# pylint: disable=no-member
batch_data = table.read_sorted('timestep', start=read_start,
stop=read_stop)
read_start = read_stop
read_stop += nids * batch_size
read_stop = min(read_stop, nrows)
start = 0
stop = start + nids
id_data = batch_data[start:stop]
start = stop
stop += nids
try:
assert len(set(id_data['timestep'])) == 1
except AssertionError:
print(set(id_data['timestep']))
print(id_data)
values = id_data[['catchment', 'concentration', 'discharge', 'local_discharge', 'load']]
group = out_file.create_group('/', 'step_{}'.format(step))
out_file.create_table(group, 'values', values,
filters=filters)
prog.show_progress(step + 1, force=True)
in_file.close()
out_file.close()
def run_convertion(config_file, batch_size):
"""Convert the output to one table per time step.
"""
print()
print('converting output')
config = read_config(config_file)
in_file_name = config['catchment_model']['output_catchments_path']
out_file_name = config['catchment_model']['output_steps_path']
convert(in_file_name, out_file_name, batch_size=batch_size)
| gpl-3.0 | -2,922,754,829,609,514,500 | 32.905882 | 96 | 0.584092 | false |
haandol/review_crawler | crawler.py | 1 | 1108 | # coding: utf-8
import time
import json
import urllib
import urllib2
import logging
from bs4 import BeautifulSoup as Soup
logging.basicConfig(level=logging.DEBUG)
url = 'https://play.google.com/store/getreviews?authuser=0'
headers = {
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'user-agent': 'Mozilla/5.0'
}
payload = {
'id': 'com.google.android.apps.maps',
'reviewType': 0,
'pageNum': 0,
'reviewSortOrder': 4,
'xhr': 1,
'hl': 'ko'
}
def parse():
values = urllib.urlencode(payload)
req = urllib2.Request(url, values, headers)
response = urllib2.urlopen(req)
data = json.loads(response.read()[5:])
soup = Soup(data[0][2])
for review in soup.select('.single-review'):
body = review.select('.review-body')[0].text
rating = int(review.select('.current-rating')[0]['style'].split(':')[1].strip()[:-2])/20
if 1 == rating:
logging.warning(body)
while True:
logging.info('start parsing')
parse()
logging.info('parsing ends')
logging.info('sleep in 60s')
time.sleep(60)
| mit | 6,187,855,021,797,479,000 | 21.612245 | 96 | 0.633574 | false |
tforrest/soda-automation | app/runserver.py | 1 | 1857 | from flask_script import Manager
from flask_restful import Api
from models.user import User
from redis_ops.init_redis import RedisPopulater
from config import app
from config import db
from api import api
import logging
import os
import sys
manager = Manager(app)
def setup_api(app):
"""
Config resources with flask app
"""
service = Api(app)
service.add_resource(api.MailChimpListCheck,'/api/lists/',endpoint='check_mailchimp')
service.add_resource(api.MailChimpList,'/api/lists/<list_id>/<asu_id>',endpoint='member_list')
service.add_resource(api.GenerateAuthToken,'/api/gen_token/',endpoint='token')
return app
serviced_app = setup_api(app)
def setup_redis():
try:
RedisPopulater().init_redis_dbs()
except Exception as e:
logging.fatal(e)
logging.fatal("Failure to init redis")
sys.exit(1)
# Deploy for development
def setup_dev():
# setup database for admin
db.create_all()
try:
admin_user_name = os.environ['DEV_ADMIN_USER_NAME']
admin_password = os.environ['DEV_ADMIN_PASSWORD']
except KeyError as e:
logging.warning(e)
logging.fatal("Error cannot setup dev environment")
sys.exit(2)
admin = User(admin_user_name,admin_password)
try:
db.session.add(admin)
db.session.commit()
except Exception as e:
logging.fatal(e)
logging.fatal("Error cannot setup dev environment")
sys.exit(2)
# init redis and populate with mailchimp
setup_redis()
@manager.command
def run_dev():
setup_dev()
serviced_app.run(debug=True)
# Deploy for intergation tests
@manager.command
def run_test():
# To-Do
pass
# Deploy for production
@manager.command
def run_production():
# TO-DO
pass
if __name__ == '__main__':
manager.run()
| mit | -2,609,925,388,766,893,600 | 20.604651 | 98 | 0.658051 | false |
thomas-schmid-ubnt/avocado | selftests/functional/test_output.py | 1 | 20899 | import json
import tempfile
import os
import re
import shutil
import unittest
from xml.dom import minidom
import pkg_resources
from avocado.core import exit_codes
from avocado.core.output import TermSupport
from avocado.utils import process
from avocado.utils import script
from avocado.utils import path as utils_path
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
PERL_TAP_PARSER_SNIPPET = """#!/bin/env perl
use TAP::Parser;
my $parser = TAP::Parser->new( { exec => ['%s', 'run', 'passtest.py', 'errortest.py', 'warntest.py', '--tap', '-', '--sysinfo', 'off', '--job-results-dir', '%%s'] } );
while ( my $result = $parser->next ) {
$result->is_unknown && die "Unknown line \\"" . $result->as_string . "\\" in the TAP output!\n";
}
$parser->parse_errors == 0 || die "Parser errors!\n";
$parser->is_good_plan || die "Plan is not a good plan!\n";
$parser->plan eq '1..3' || die "Plan does not match what was expected!\n";
""" % AVOCADO
def image_output_uncapable():
try:
import PIL
return False
except ImportError:
return True
def html_uncapable():
try:
pkg_resources.require('avocado_result_html')
return False
except pkg_resources.DistributionNotFound:
return True
def perl_tap_parser_uncapable():
return os.system("perl -e 'use TAP::Parser;'") != 0
def missing_binary(binary):
try:
utils_path.find_command(binary)
return False
except utils_path.CmdNotFoundError:
return True
class OutputTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
@unittest.skipIf(missing_binary('cc'),
"C compiler is required by the underlying doublefree.py test")
def test_output_doublefree(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'doublefree.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
output = result.stdout + result.stderr
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
bad_string = 'double free or corruption'
self.assertNotIn(bad_string, output,
"Libc double free can be seen in avocado "
"doublefree output:\n%s" % output)
def tearDown(self):
shutil.rmtree(self.tmpdir)
class OutputPluginTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
def check_output_files(self, debug_log):
base_dir = os.path.dirname(debug_log)
json_output = os.path.join(base_dir, 'results.json')
self.assertTrue(os.path.isfile(json_output))
with open(json_output, 'r') as fp:
json.load(fp)
xunit_output = os.path.join(base_dir, 'results.xml')
self.assertTrue(os.path.isfile(json_output))
try:
minidom.parse(xunit_output)
except Exception as details:
raise AssertionError("Unable to parse xunit output: %s\n\n%s"
% (details, open(xunit_output).read()))
tap_output = os.path.join(base_dir, "results.tap")
self.assertTrue(os.path.isfile(tap_output))
tap = open(tap_output).read()
self.assertIn("..", tap)
self.assertIn("\n# debug.log of ", tap)
def test_output_incompatible_setup(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_FAIL
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_regex = re.compile(r'avocado run: error: argument ((--json)|'
'(--xunit)): Options ((--xunit --json)|'
'(--json --xunit)) are trying to use stdout '
'simultaneously\n')
self.assertIsNotNone(error_regex.match(result.stderr),
"Missing error message from output:\n%s" %
result.stderr)
@unittest.skipIf(html_uncapable(),
"Uncapable of Avocado Result HTML plugin")
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--html - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
output = result.stdout + result.stderr
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = "HTML to stdout not supported"
self.assertIn(error_excerpt, output,
"Missing excerpt error message from output:\n%s" % output)
def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--journal --xunit %s --json - passtest.py' %
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
# Check if we are producing valid outputs
json.loads(output)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
def test_output_compatible_setup_2(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json %s passtest.py' %
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
# Check if we are producing valid outputs
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parseString(output)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
@unittest.skipIf(html_uncapable(),
"Uncapable of Avocado Result HTML plugin")
def test_output_compatible_setup_3(self):
tmpfile = tempfile.mktemp(prefix='avocado_' + __name__)
tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__)
tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
tmpfile3 = tempfile.mktemp(dir=tmpdir)
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit %s --json %s --html %s passtest.py'
% (AVOCADO, self.tmpdir, tmpfile, tmpfile2, tmpfile3))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
tmpdir_contents = os.listdir(tmpdir)
self.assertEqual(len(tmpdir_contents), 4,
'Not all resources dir were created: %s' % tmpdir_contents)
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotEqual(output, "", "Output is empty")
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
os.remove(tmpfile2)
shutil.rmtree(tmpdir)
except OSError:
pass
def test_output_compatible_setup_nooutput(self):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
# Verify --silent can be supplied as app argument
cmd_line = ('%s --silent run --job-results-dir %s '
'--sysinfo=off --xunit %s --json %s passtest.py'
% (AVOCADO, self.tmpdir, tmpfile, tmpfile2))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "", "Output is not empty:\n%s" % output)
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
os.remove(tmpfile2)
except OSError:
pass
def test_nonprintable_chars(self):
cmd_line = ("%s run --external-runner /bin/ls "
"'NON_EXISTING_FILE_WITH_NONPRINTABLE_CHARS_IN_HERE\x1b' "
"--job-results-dir %s --sysinfo=off"
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
debug_log = None
for line in output.splitlines():
if "JOB LOG" in line:
debug_log = line.split(':', 1)[-1].strip()
break
self.assertTrue(debug_log, "Unable to get JOB LOG from output:\n%s"
% output)
self.check_output_files(debug_log)
def test_show_job_log(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py --show-job-log' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
job_id_list = re.findall('Job ID: (.*)', result.stdout,
re.MULTILINE)
self.assertTrue(job_id_list, 'No Job ID in stdout:\n%s' %
result.stdout)
job_id = job_id_list[0]
self.assertEqual(len(job_id), 40)
def test_silent_trumps_show_job_log(self):
os.chdir(basedir)
# Also verify --silent can be supplied as run option
cmd_line = ('%s run --silent --job-results-dir %s '
'--sysinfo=off passtest.py --show-job-log'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "")
def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
output_lines = output.splitlines()
# The current human output produces 6 lines when running a single test,
# with an optional 7th line when the HTML report generation is enabled
self.assertGreaterEqual(len(output_lines), 6,
('Basic human interface did not produce the '
'expect output. Output produced: "%s"' % output))
second_line = output_lines[1]
debug_log = second_line.split()[-1]
self.check_output_files(debug_log)
def test_verify_whiteboard_save(self):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
config = os.path.join(self.tmpdir, "conf.ini")
content = ("[datadir.paths]\nlogs_dir = %s"
% os.path.relpath(self.tmpdir, "."))
script.Script(config, content).save()
cmd_line = ('%s --config %s --show all run '
'--sysinfo=off whiteboard.py --json %s'
% (AVOCADO, config, tmpfile))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
logfile = json_results['tests'][0]['logfile']
debug_dir = os.path.dirname(logfile)
whiteboard_path = os.path.join(debug_dir, 'whiteboard')
self.assertTrue(os.path.exists(whiteboard_path),
'Missing whiteboard file %s' % whiteboard_path)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
@unittest.skipIf(image_output_uncapable(),
"Uncapable of generating images with PIL library")
def test_gendata(self):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ("%s run --job-results-dir %s "
"--sysinfo=off gendata.py --json %s" %
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
bsod_dir = None
json_dir = None
for test in json_results['tests']:
if "test_bsod" in test['url']:
bsod_dir = test['logfile']
elif "test_json" in test['url']:
json_dir = test['logfile']
self.assertTrue(bsod_dir, "Failed to get test_bsod output "
"directory")
self.assertTrue(json_dir, "Failed to get test_json output "
"directory")
bsod_dir = os.path.join(os.path.dirname(bsod_dir), "data",
"bsod.png")
json_dir = os.path.join(os.path.dirname(json_dir), "data",
"test.json")
self.assertTrue(os.path.exists(bsod_dir), "File %s produced by"
"test does not exist" % bsod_dir)
self.assertTrue(os.path.exists(json_dir), "File %s produced by"
"test does not exist" % json_dir)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
def test_redirect_output(self):
redirected_output_path = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s '
'--sysinfo=off passtest.py > %s'
% (AVOCADO, self.tmpdir, redirected_output_path))
result = process.run(cmd_line, ignore_status=True, shell=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, '',
'After redirecting to file, output is not empty: %s' % output)
with open(redirected_output_path, 'r') as redirected_output_file_obj:
redirected_output = redirected_output_file_obj.read()
for code in TermSupport.ESCAPE_CODES:
self.assertNotIn(code, redirected_output,
'Found terminal support code %s in redirected output\n%s' %
(code, redirected_output))
finally:
try:
os.remove(redirected_output_path)
except OSError:
pass
@unittest.skipIf(perl_tap_parser_uncapable(),
"Uncapable of using Perl TAP::Parser library")
def test_tap_parser(self):
perl_script = script.TemporaryScript("tap_parser.pl",
PERL_TAP_PARSER_SNIPPET
% self.tmpdir)
perl_script.save()
os.chdir(basedir)
process.run("perl %s" % perl_script)
def test_tap_totaltests(self):
os.chdir(basedir)
cmd_line = ("%s run passtest.py "
"-m examples/tests/sleeptest.py.data/sleeptest.yaml "
"--job-results-dir %s "
"--tap -" % (AVOCADO, self.tmpdir))
result = process.run(cmd_line)
expr = '1..4'
self.assertIn(expr, result.stdout, "'%s' not found in:\n%s"
% (expr, result.stdout))
def test_broken_pipe(self):
os.chdir(basedir)
cmd_line = "(%s run --help | whacky-unknown-command)" % AVOCADO
result = process.run(cmd_line, shell=True, ignore_status=True,
env={"LC_ALL": "C"})
expected_rc = 127
self.assertEqual(result.exit_status, expected_rc,
("avocado run to broken pipe did not return "
"rc %d:\n%s" % (expected_rc, result)))
self.assertEqual(len(result.stderr.splitlines()), 1)
self.assertIn("whacky-unknown-command", result.stderr)
self.assertIn("not found", result.stderr)
self.assertNotIn("Avocado crashed", result.stderr)
def test_results_plugins_no_tests(self):
os.chdir(basedir)
cmd_line = ("%s run UNEXISTING --job-results-dir %s"
% (AVOCADO, self.tmpdir))
exit_code = process.system(cmd_line, ignore_status=True)
self.assertEqual(exit_code, exit_codes.AVOCADO_JOB_FAIL)
xunit_results = os.path.join(self.tmpdir, 'latest', 'results.xml')
self.assertFalse(os.path.exists(xunit_results))
json_results = os.path.join(self.tmpdir, 'latest', 'results.json')
self.assertFalse(os.path.exists(json_results))
tap_results = os.path.join(self.tmpdir, 'latest', 'results.tap')
self.assertFalse(os.path.exists(tap_results))
def tearDown(self):
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 296,487,700,817,237,500 | 42.269151 | 167 | 0.537442 | false |
ge0rgi/cinder | cinder/volume/utils.py | 1 | 31057 | # Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume-related Utilities and helpers."""
import ast
import functools
import math
import operator
import re
import time
import uuid
from Crypto.Random import random
import eventlet
from eventlet import tpool
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
from cinder.volume import throttling
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def null_safe_str(s):
return str(s) if s else ''
def _usage_from_volume(context, volume_ref, **kw):
now = timeutils.utcnow()
launched_at = volume_ref['launched_at'] or now
created_at = volume_ref['created_at'] or now
volume_status = volume_ref['status']
if volume_status == 'error_managing_deleting':
volume_status = 'deleting'
usage_info = dict(
tenant_id=volume_ref['project_id'],
host=volume_ref['host'],
user_id=volume_ref['user_id'],
availability_zone=volume_ref['availability_zone'],
volume_id=volume_ref['id'],
volume_type=volume_ref['volume_type_id'],
display_name=volume_ref['display_name'],
launched_at=launched_at.isoformat(),
created_at=created_at.isoformat(),
status=volume_status,
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'],
replication_status=volume_ref['replication_status'],
replication_extended_status=volume_ref['replication_extended_status'],
replication_driver_data=volume_ref['replication_driver_data'],
metadata=volume_ref.get('volume_metadata'),)
usage_info.update(kw)
try:
attachments = db.volume_attachment_get_all_by_volume_id(
context, volume_ref['id'])
usage_info['volume_attachment'] = attachments
glance_meta = db.volume_glance_metadata_get(context, volume_ref['id'])
if glance_meta:
usage_info['glance_metadata'] = glance_meta
except exception.GlanceMetadataNotFound:
pass
except exception.VolumeNotFound:
LOG.debug("Can not find volume %s at notify usage", volume_ref['id'])
return usage_info
def _usage_from_backup(backup, **kw):
num_dependent_backups = backup.num_dependent_backups
usage_info = dict(tenant_id=backup.project_id,
user_id=backup.user_id,
availability_zone=backup.availability_zone,
backup_id=backup.id,
host=backup.host,
display_name=backup.display_name,
created_at=str(backup.created_at),
status=backup.status,
volume_id=backup.volume_id,
size=backup.size,
service_metadata=backup.service_metadata,
service=backup.service,
fail_reason=backup.fail_reason,
parent_id=backup.parent_id,
num_dependent_backups=num_dependent_backups,
snapshot_id=backup.snapshot_id,
)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_backup_usage(context, backup, event_suffix,
extra_usage_info=None,
host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_backup(backup, **extra_usage_info)
rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix,
usage_info)
def _usage_from_snapshot(snapshot, context, **extra_usage_info):
# (niedbalski) a snapshot might be related to a deleted
# volume, if that's the case, the volume information is still
# required for filling the usage_info, so we enforce to read
# the volume data even if the volume has been deleted.
context.read_deleted = "yes"
volume = db.volume_get(context, snapshot.volume_id)
usage_info = {
'tenant_id': snapshot.project_id,
'user_id': snapshot.user_id,
'availability_zone': volume['availability_zone'],
'volume_id': snapshot.volume_id,
'volume_size': snapshot.volume_size,
'snapshot_id': snapshot.id,
'display_name': snapshot.display_name,
'created_at': str(snapshot.created_at),
'status': snapshot.status,
'deleted': null_safe_str(snapshot.deleted),
'metadata': null_safe_str(snapshot.metadata),
}
usage_info.update(extra_usage_info)
return usage_info
@utils.if_notifications_enabled
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_snapshot(snapshot, context, **extra_usage_info)
rpc.get_notifier('snapshot', host).info(context,
'snapshot.%s' % event_suffix,
usage_info)
def _usage_from_capacity(capacity, **extra_usage_info):
capacity_info = {
'name_to_id': capacity['name_to_id'],
'total': capacity['total'],
'free': capacity['free'],
'allocated': capacity['allocated'],
'provisioned': capacity['provisioned'],
'virtual_free': capacity['virtual_free'],
'reported_at': capacity['reported_at']
}
capacity_info.update(extra_usage_info)
return capacity_info
@utils.if_notifications_enabled
def notify_about_capacity_usage(context, capacity, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_capacity(capacity, **extra_usage_info)
rpc.get_notifier('capacity', host).info(context,
'capacity.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_usage(context, volume, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_usage_info)
rpc.get_notifier('replication', host).info(context,
'replication.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_error(context, volume, suffix,
extra_error_info=None, host=None):
if not host:
host = CONF.host
if not extra_error_info:
extra_error_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_error_info)
rpc.get_notifier('replication', host).error(context,
'replication.%s' % suffix,
usage_info)
def _usage_from_consistencygroup(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
consistencygroup_id=group_ref.id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_consistencygroup_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_consistencygroup(group,
**extra_usage_info)
rpc.get_notifier("consistencygroup", host).info(
context,
'consistencygroup.%s' % event_suffix,
usage_info)
def _usage_from_group(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
group_id=group_ref.id,
group_type=group_ref.group_type_id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_group_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group(group,
**extra_usage_info)
rpc.get_notifier("group", host).info(
context,
'group.%s' % event_suffix,
usage_info)
def _usage_from_cgsnapshot(cgsnapshot, **kw):
usage_info = dict(
tenant_id=cgsnapshot.project_id,
user_id=cgsnapshot.user_id,
cgsnapshot_id=cgsnapshot.id,
name=cgsnapshot.name,
consistencygroup_id=cgsnapshot.consistencygroup_id,
created_at=cgsnapshot.created_at.isoformat(),
status=cgsnapshot.status)
usage_info.update(kw)
return usage_info
def _usage_from_group_snapshot(group_snapshot, **kw):
usage_info = dict(
tenant_id=group_snapshot.project_id,
user_id=group_snapshot.user_id,
group_snapshot_id=group_snapshot.id,
name=group_snapshot.name,
group_id=group_snapshot.group_id,
group_type=group_snapshot.group_type_id,
created_at=group_snapshot.created_at.isoformat(),
status=group_snapshot.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_cgsnapshot(cgsnapshot,
**extra_usage_info)
rpc.get_notifier("cgsnapshot", host).info(
context,
'cgsnapshot.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group_snapshot(group_snapshot,
**extra_usage_info)
rpc.get_notifier("group_snapshot", host).info(
context,
'group_snapshot.%s' % event_suffix,
usage_info)
def _check_blocksize(blocksize):
# Check if volume_dd_blocksize is valid
try:
# Rule out zero-sized/negative/float dd blocksize which
# cannot be caught by strutils
if blocksize.startswith(('-', '0')) or '.' in blocksize:
raise ValueError
strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
LOG.warning(_LW("Incorrect value error: %(blocksize)s, "
"it may indicate that \'volume_dd_blocksize\' "
"was configured incorrectly. Fall back to default."),
{'blocksize': blocksize})
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
return blocksize
def check_for_odirect_support(src, dest, flag='oflag=direct'):
# Check whether O_DIRECT is supported
try:
# iflag=direct and if=/dev/zero combination does not work
# error: dd: failed to open '/dev/zero': Invalid argument
if (src == '/dev/zero' and flag == 'iflag=direct'):
return False
else:
utils.execute('dd', 'count=0', 'if=%s' % src,
'of=%s' % dest,
flag, run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize,
sync=False, execute=utils.execute, ionice=None,
sparse=False):
cmd = prefix[:]
if ionice:
cmd.extend(('ionice', ionice))
blocksize = _check_blocksize(blocksize)
size_in_bytes = size_in_m * units.Mi
cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % size_in_bytes, 'bs=%s' % blocksize))
# Use O_DIRECT to avoid thrashing the system buffer cache
odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct')
cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes')
if check_for_odirect_support(srcstr, deststr, 'oflag=direct'):
cmd.append('oflag=direct')
odirect = True
# If the volume is being unprovisioned then
# request the data is persisted before returning,
# so that it's not discarded from the cache.
conv = []
if sync and not odirect:
conv.append('fdatasync')
if sparse:
conv.append('sparse')
if conv:
conv_options = 'conv=' + ",".join(conv)
cmd.append(conv_options)
# Perform the copy
start_time = timeutils.utcnow()
execute(*cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
mbps = (size_in_m / duration)
LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, "
"size %(sz).2f MB, duration %(duration).2f sec",
{"src": srcstr,
"dest": deststr,
"sz": size_in_m,
"duration": duration})
LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"),
{'size_in_m': size_in_m, 'mbps': mbps})
def _open_volume_with_path(path, mode):
try:
with utils.temporary_chown(path):
handle = open(path, mode)
return handle
except Exception:
LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path})
def _transfer_data(src, dest, length, chunk_size):
"""Transfer data between files (Python IO objects)."""
chunks = int(math.ceil(length / chunk_size))
remaining_length = length
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.",
{'chunks': chunks, 'bytes': chunk_size})
for chunk in range(0, chunks):
before = time.time()
data = tpool.execute(src.read, min(chunk_size, remaining_length))
# If we have reached end of source, discard any extraneous bytes from
# destination volume if trim is enabled and stop writing.
if data == b'':
break
tpool.execute(dest.write, data)
remaining_length -= len(data)
delta = (time.time() - before)
rate = (chunk_size / delta) / units.Ki
LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).",
{'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})
# yield to any other pending operations
eventlet.sleep(0)
tpool.execute(dest.flush)
def _copy_volume_with_file(src, dest, size_in_m):
src_handle = src
if isinstance(src, six.string_types):
src_handle = _open_volume_with_path(src, 'rb')
dest_handle = dest
if isinstance(dest, six.string_types):
dest_handle = _open_volume_with_path(dest, 'wb')
if not src_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, source device unavailable."))
if not dest_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, destination device unavailable."))
start_time = timeutils.utcnow()
_transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4)
duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow()))
if isinstance(src, six.string_types):
src_handle.close()
if isinstance(dest, six.string_types):
dest_handle.close()
mbps = (size_in_m / duration)
LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at "
"%(mbps).2f MB/s)."),
{'size_in_m': size_in_m, 'mbps': mbps})
def copy_volume(src, dest, size_in_m, blocksize, sync=False,
execute=utils.execute, ionice=None, throttle=None,
sparse=False):
"""Copy data from the source volume to the destination volume.
The parameters 'src' and 'dest' are both typically of type str, which
represents the path to each volume on the filesystem. Connectors can
optionally return a volume handle of type RawIOBase for volumes that are
not available on the local filesystem for open/close operations.
If either 'src' or 'dest' are not of type str, then they are assumed to be
of type RawIOBase or any derivative that supports file operations such as
read and write. In this case, the handles are treated as file handles
instead of file paths and, at present moment, throttling is unavailable.
"""
if (isinstance(src, six.string_types) and
isinstance(dest, six.string_types)):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(src, dest) as throttle_cmd:
_copy_volume_with_path(throttle_cmd['prefix'], src, dest,
size_in_m, blocksize, sync=sync,
execute=execute, ionice=ionice,
sparse=sparse)
else:
_copy_volume_with_file(src, dest, size_in_m)
def clear_volume(volume_size, volume_path, volume_clear=None,
volume_clear_size=None, volume_clear_ionice=None,
throttle=None):
"""Unprovision old volumes to prevent data leaking between users."""
if volume_clear is None:
volume_clear = CONF.volume_clear
if volume_clear_size is None:
volume_clear_size = CONF.volume_clear_size
if volume_clear_size == 0:
volume_clear_size = volume_size
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
LOG.info(_LI("Performing secure delete on volume: %s"), volume_path)
# We pass sparse=False explicitly here so that zero blocks are not
# skipped in order to clear the volume.
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
CONF.volume_dd_blocksize,
sync=True, execute=utils.execute,
ionice=volume_clear_ionice,
throttle=throttle, sparse=False)
else:
raise exception.InvalidConfigurationValue(
option='volume_clear',
value=volume_clear)
def supports_thin_provisioning():
return brick_lvm.LVM.supports_thin_provisioning(
utils.get_root_helper())
def get_all_physical_volumes(vg_name=None):
return brick_lvm.LVM.get_all_physical_volumes(
utils.get_root_helper(),
vg_name)
def get_all_volume_groups(vg_name=None):
return brick_lvm.LVM.get_all_volume_groups(
utils.get_root_helper(),
vg_name)
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [random.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
random.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([random.choice(symbols) for _i in range(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
random.shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
DEFAULT_POOL_NAME = '_pool0'
def extract_host(host, level='backend', default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
:param default_pool_name: this flag specify what to do if level == 'pool'
and there is no 'pool' info encoded in host
string. default_pool_name=True will return
DEFAULT_POOL_NAME, otherwise we return None.
Default value of this parameter is False.
:return: expected information, string or None
:raises: exception.InvalidVolume
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if host is None:
msg = _("volume is not assigned to a host")
raise exception.InvalidVolume(reason=msg)
if level == 'host':
# make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
def matching_backend_name(src_volume_type, volume_type):
if src_volume_type.get('volume_backend_name') and \
volume_type.get('volume_backend_name'):
return src_volume_type.get('volume_backend_name') == \
volume_type.get('volume_backend_name')
else:
return False
def hosts_are_equivalent(host_1, host_2):
# In case host_1 or host_2 are None
if not (host_1 and host_2):
return host_1 == host_2
return extract_host(host_1) == extract_host(host_2)
def read_proc_mounts():
"""Read the /proc/mounts file.
It's a dummy function but it eases the writing of unit tests as mocking
__builtin__open() for a specific file only is not trivial.
"""
with open('/proc/mounts') as mounts:
return mounts.readlines()
def extract_id_from_volume_name(vol_name):
regex = re.compile(
CONF.volume_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(vol_name)
return match.group('uuid') if match else None
def check_already_managed_volume(vol_id):
"""Check cinder db for already managed volume.
:param vol_id: volume id parameter
:returns: bool -- return True, if db entry with specified
volume id exists, otherwise return False
"""
try:
return (vol_id and isinstance(vol_id, six.string_types) and
uuid.UUID(vol_id, version=4) and
objects.Volume.exists(context.get_admin_context(), vol_id))
except ValueError:
return False
def extract_id_from_snapshot_name(snap_name):
"""Return a snapshot's ID from its name on the backend."""
regex = re.compile(
CONF.snapshot_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(snap_name)
return match.group('uuid') if match else None
def paginate_entries_list(entries, marker, limit, offset, sort_keys,
sort_dirs):
"""Paginate a list of entries.
:param entries: list of dictionaries
:marker: The last element previously returned
:limit: The maximum number of items to return
:offset: The number of items to skip from the marker or from the first
element.
:sort_keys: A list of keys in the dictionaries to sort by
:sort_dirs: A list of sort directions, where each is either 'asc' or 'dec'
"""
comparers = [(operator.itemgetter(key.strip()), multiplier)
for (key, multiplier) in zip(sort_keys, sort_dirs)]
def comparer(left, right):
for fn, d in comparers:
left_val = fn(left)
right_val = fn(right)
if isinstance(left_val, dict):
left_val = sorted(left_val.values())[0]
if isinstance(right_val, dict):
right_val = sorted(right_val.values())[0]
if left_val == right_val:
continue
if d == 'asc':
return -1 if left_val < right_val else 1
else:
return -1 if left_val > right_val else 1
else:
return 0
sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer))
start_index = 0
if offset is None:
offset = 0
if marker:
start_index = -1
for i, entry in enumerate(sorted_entries):
if entry['reference'] == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker not found: %s') % marker
raise exception.InvalidInput(reason=msg)
range_end = start_index + limit
return sorted_entries[start_index + offset:range_end + offset]
def convert_config_string_to_dict(config_string):
"""Convert config file replication string to a dict.
The only supported form is as follows:
"{'key-1'='val-1' 'key-2'='val-2'...}"
:param config_string: Properly formatted string to convert to dict.
:response: dict of string values
"""
resultant_dict = {}
try:
st = config_string.replace("=", ":")
st = st.replace(" ", ", ")
resultant_dict = ast.literal_eval(st)
except Exception:
LOG.warning(_LW("Error encountered translating config_string: "
"%(config_string)s to dict"),
{'config_string': config_string})
return resultant_dict
def create_encryption_key(context, key_manager, volume_type_id):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
volume_type_encryption = (
volume_types.get_volume_type_encryption(context,
volume_type_id))
cipher = volume_type_encryption.cipher
length = volume_type_encryption.key_size
algorithm = cipher.split('-')[0] if cipher else None
encryption_key_id = key_manager.create_key(
context,
algorithm=algorithm,
length=length)
return encryption_key_id
def is_replicated_str(str):
spec = (str or '').split()
return (len(spec) == 2 and
spec[0] == '<is>' and strutils.bool_from_string(spec[1]))
def is_replicated_spec(extra_specs):
return (extra_specs and
is_replicated_str(extra_specs.get('replication_enabled')))
def group_get_by_id(group_id):
ctxt = context.get_admin_context()
group = db.group_get(ctxt, group_id)
return group
def is_group_a_cg_snapshot_type(group_or_snap):
LOG.debug("Checking if %s is a consistent snapshot group",
group_or_snap)
if group_or_snap["group_type_id"] is not None:
spec = group_types.get_group_type_specs(
group_or_snap["group_type_id"],
key="consistent_group_snapshot_enabled"
)
return spec == "<is> True"
return False
| apache-2.0 | 8,789,788,171,055,562,000 | 33.016429 | 79 | 0.601314 | false |
Astyan-42/skepticalscience | skepticalsciencewebsite/publications/forms.py | 1 | 6022 | from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django_select2.forms import Select2MultipleWidget
from django.db.models import Q
from django.contrib.auth import get_user_model
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit
from crispy_forms.bootstrap import Field
from skepticalsciencewebsite.utils import NoLinkClearableFileInput
from publications.models import Publication, Comment, EstimatedImpactFactor, CommentReview
from publications.constants import BOOLEAN_CHOICES, ABORTED, CORRECTION
from sciences.forms import ScienceModelForm
User = get_user_model()
class UserModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.get_full_name()
class UserModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return obj.get_full_name()
class PublicationCreateForm(ScienceModelForm):
"""
create an publication form with restricted field
TO ADD AUTHORS AND LAST AUTHOR
"""
first_author = UserModelChoiceField(queryset=User.objects.filter(~Q(first_name="") & ~Q(last_name="")))
last_author = UserModelChoiceField(queryset=User.objects.filter(~Q(first_name="") & ~Q(last_name="")),
required=False)
authors = UserModelMultipleChoiceField(queryset=User.objects.filter(~Q(first_name="") & ~Q(last_name="")),
required=False, widget=Select2MultipleWidget)
def __init__(self, *args, **kwargs):
super(PublicationCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id-publicationcreateForm'
self.helper.add_input(Submit('submit', _('Submit')))
class Meta:
model = Publication
fields = ["title", "resume", "pdf_creation", "source_creation", "first_author", "authors", "last_author",
"sciences", "licence"]
widgets = {'sciences': Select2MultipleWidget,
'resume': forms.Textarea(),
'pdf_creation': NoLinkClearableFileInput,
'source_creation': NoLinkClearableFileInput,}
class PublicationCorrectForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(PublicationCorrectForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_publicationcorrectionupdateForm'
self.helper.add_input(Submit('submit', _('Submit')))
class Meta:
model = Publication
fields = ["resume", "pdf_final", "source_final"]
widgets = {'resume': forms.Textarea(),
'pdf_final': NoLinkClearableFileInput,
'source_final': NoLinkClearableFileInput,}
class PublicationAbortForm(forms.ModelForm):
abort = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(PublicationAbortForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_publicationabortupdateForm'
self.helper.add_input(Submit('submit', _('Abort publication')))
def save(self, commit=True):
data = super(PublicationAbortForm, self).save(commit=False)
if self.cleaned_data["abort"] and data.status == CORRECTION:
data.status = ABORTED
data.update_status_date = timezone.now()
if commit:
data.save()
return data
class Meta:
model = Publication
fields = ["abort"]
class CommentForm(forms.ModelForm):
prefix = 'comment'
def __init__(self, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id-commentForm'
self.helper.add_input(Submit('submit', _('Submit')))
class Meta:
model = Comment
fields = ["author_fake_pseudo", "comment_type", "title", "content"]
widgets = {'content': forms.Textarea()}
class EstimatedImpactFactorForm(forms.ModelForm):
prefix = 'impact_factor'
def __init__(self, *args, **kwargs):
super(EstimatedImpactFactorForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-inline'
self.helper.field_template = 'bootstrap3/layout/inline_field.html'
self.helper.form_id = 'id-estimatedimpactfactorForm'
self.helper.layout = Layout(Field("estimated_impact_factor", min=0, max=1000, value="",
template=self.helper.field_template))
self.helper.add_input(Submit('submit', _('Evaluate')))
class Meta:
model = EstimatedImpactFactor
fields = ["estimated_impact_factor"]
class CommentReviewValidationForm(forms.ModelForm):
valid = forms.ChoiceField(choices=BOOLEAN_CHOICES, widget=forms.Select())
def __init__(self, *args, **kwargs):
super(CommentReviewValidationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id-commentreviewvalidationForm'
self.helper.add_input(Submit('submit', _('Validate')))
class Meta:
model = CommentReview
fields = ["valid", "seriousness", "reason_validation"]
widgets = {'reason_validation': forms.Textarea()}
class CommentReviewCorrectionForm(forms.ModelForm):
corrected = forms.ChoiceField(choices=BOOLEAN_CHOICES, widget=forms.Select())
def __init__(self, *args, **kwargs):
super(CommentReviewCorrectionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id-commentreviewvcorrectionForm'
self.helper.add_input(Submit('submit', _('Corrected')))
class Meta:
model = CommentReview
fields = ["corrected", "reason_correction"]
widgets = {'reason_correction': forms.Textarea()} | agpl-3.0 | 1,018,924,324,186,331,900 | 37.608974 | 113 | 0.65377 | false |
anomaly/vishnu | tests/backend/config/redis/test_redis_py.py | 1 | 1586 | import pytest
def test_default():
from vishnu.backend import Redis
from vishnu.backend.config.redis import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_DB
config = Redis()
assert config.host == DEFAULT_HOST
assert config.port == DEFAULT_PORT
assert config.db == DEFAULT_DB
def test_custom_host():
from vishnu.backend import Redis
from vishnu.backend.config.redis import DEFAULT_PORT, DEFAULT_DB
custom_host = "memcache.cloud"
config = Redis(host=custom_host)
assert config.host == custom_host
assert config.port == DEFAULT_PORT
assert config.db == DEFAULT_DB
def test_invalid_host():
from vishnu.backend import Redis
with pytest.raises(TypeError) as exp:
Redis(host=23)
def test_custom_port():
from vishnu.backend import Redis
from vishnu.backend.config.redis import DEFAULT_HOST, DEFAULT_DB
custom_port = 6380
config = Redis(port=custom_port)
assert config.host == DEFAULT_HOST
assert config.port == custom_port
assert config.db == DEFAULT_DB
def test_invalid_port():
from vishnu.backend import Redis
with pytest.raises(TypeError) as exp:
Redis(port="string")
with pytest.raises(TypeError) as exp:
Redis(port=-100)
def test_invalid_db():
from vishnu.backend import Redis
with pytest.raises(TypeError) as exp:
Redis(db=-1)
with pytest.raises(TypeError) as exp:
Redis(db="db")
def test_custom_db():
from vishnu.backend import Redis
custom_db = 1
config = Redis(db=custom_db)
assert config.db == custom_db
| apache-2.0 | -2,275,082,972,257,148,200 | 22.323529 | 82 | 0.680958 | false |
encukou/freeipa | ipalib/x509.py | 1 | 28653 | # Authors:
# Rob Crittenden <[email protected]>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Certificates should be stored internally DER-encoded. We can be passed
# a certificate several ways: read if from LDAP, read it from a 3rd party
# app (dogtag, candlepin, etc) or as user input.
# Conventions
#
# Where possible the following naming conventions are used:
#
# cert: the certificate is a PEM-encoded certificate
# dercert: the certificate is DER-encoded
# rawcert: the cert is in an unknown format
from __future__ import print_function
import os
import binascii
import datetime
import enum
import ipaddress
import ssl
import base64
import re
from cryptography import x509 as crypto_x509
from cryptography import utils as crypto_utils
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import (
Encoding, PublicFormat, PrivateFormat, load_pem_private_key
)
import pyasn1
import pyasn1.error
from pyasn1.type import univ, char, namedtype, tag
from pyasn1.codec.der import decoder, encoder
from pyasn1_modules import rfc2315, rfc2459
import six
from ipalib import errors
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
PEM = 0
DER = 1
# The first group is the whole PEM datum and the second group is
# the base64 content (with newlines). For findall() the result is
# a list of 2-tuples of the PEM and base64 data.
PEM_CERT_REGEX = re.compile(
b'(-----BEGIN CERTIFICATE-----(.*?)-----END CERTIFICATE-----)',
re.DOTALL)
PEM_PRIV_REGEX = re.compile(
b'-----BEGIN(?: ENCRYPTED)?(?: (?:RSA|DSA|DH|EC))? PRIVATE KEY-----.*?'
b'-----END(?: ENCRYPTED)?(?: (?:RSA|DSA|DH|EC))? PRIVATE KEY-----',
re.DOTALL)
EKU_SERVER_AUTH = '1.3.6.1.5.5.7.3.1'
EKU_CLIENT_AUTH = '1.3.6.1.5.5.7.3.2'
EKU_CODE_SIGNING = '1.3.6.1.5.5.7.3.3'
EKU_EMAIL_PROTECTION = '1.3.6.1.5.5.7.3.4'
EKU_PKINIT_CLIENT_AUTH = '1.3.6.1.5.2.3.4'
EKU_PKINIT_KDC = '1.3.6.1.5.2.3.5'
EKU_ANY = '2.5.29.37.0'
EKU_PLACEHOLDER = '1.3.6.1.4.1.3319.6.10.16'
SAN_UPN = '1.3.6.1.4.1.311.20.2.3'
SAN_KRB5PRINCIPALNAME = '1.3.6.1.5.2.2'
@crypto_utils.register_interface(crypto_x509.Certificate)
class IPACertificate:
"""
A proxy class wrapping a python-cryptography certificate representation for
FreeIPA purposes
"""
def __init__(self, cert, backend=None):
"""
:param cert: A python-cryptography Certificate object
:param backend: A python-cryptography Backend object
"""
self._cert = cert
self.backend = default_backend() if backend is None else backend()
# initialize the certificate fields
# we have to do it this way so that some systems don't explode since
# some field types encode-decoding is not strongly defined
self._subject = self.__get_der_field('subject')
self._issuer = self.__get_der_field('issuer')
self._serial_number = self.__get_der_field('serialNumber')
def __getstate__(self):
state = {
'_cert': self.public_bytes(Encoding.DER),
'_subject': self.subject_bytes,
'_issuer': self.issuer_bytes,
'_serial_number': self._serial_number,
}
return state
def __setstate__(self, state):
self._subject = state['_subject']
self._issuer = state['_issuer']
self._issuer = state['_serial_number']
self._cert = crypto_x509.load_der_x509_certificate(
state['_cert'], backend=default_backend())
def __eq__(self, other):
"""
Checks equality.
:param other: either cryptography.Certificate or IPACertificate or
bytes representing a DER-formatted certificate
"""
if (isinstance(other, (crypto_x509.Certificate, IPACertificate))):
return (self.public_bytes(Encoding.DER) ==
other.public_bytes(Encoding.DER))
elif isinstance(other, bytes):
return self.public_bytes(Encoding.DER) == other
else:
return False
def __ne__(self, other):
"""
Checks not equal.
"""
return not self.__eq__(other)
def __hash__(self):
"""
Computes a hash of the wrapped cryptography.Certificate.
"""
return hash(self._cert)
def __encode_extension(self, oid, critical, value):
# TODO: have another proxy for crypto_x509.Extension which would
# provide public_bytes on the top of what python-cryptography has
ext = rfc2459.Extension()
# TODO: this does not have to be so weird, pyasn1 now has codecs
# which are capable of providing python-native types
ext['extnID'] = univ.ObjectIdentifier(oid)
ext['critical'] = univ.Boolean(critical)
if pyasn1.__version__.startswith('0.3'):
# pyasn1 <= 0.3.7 needs explicit encoding
# see https://pagure.io/freeipa/issue/7685
value = encoder.encode(univ.OctetString(value))
ext['extnValue'] = univ.Any(value)
ext = encoder.encode(ext)
return ext
def __get_pyasn1_field(self, field):
"""
:returns: a field of the certificate in pyasn1 representation
"""
cert_bytes = self.tbs_certificate_bytes
cert = decoder.decode(cert_bytes, rfc2459.TBSCertificate())[0]
field = cert[field]
return field
def __get_der_field(self, field):
"""
:field: the name of the field of the certificate
:returns: bytes representing the value of a certificate field
"""
return encoder.encode(self.__get_pyasn1_field(field))
def public_bytes(self, encoding):
"""
Serializes the certificate to PEM or DER format.
"""
return self._cert.public_bytes(encoding)
def is_self_signed(self):
"""
:returns: True if this certificate is self-signed, False otherwise
"""
return self._cert.issuer == self._cert.subject
def fingerprint(self, algorithm):
"""
Counts fingerprint of the wrapped cryptography.Certificate
"""
return self._cert.fingerprint(algorithm)
@property
def serial_number(self):
return self._cert.serial_number
@property
def serial_number_bytes(self):
return self._serial_number
@property
def version(self):
return self._cert.version
@property
def subject(self):
return self._cert.subject
@property
def subject_bytes(self):
return self._subject
@property
def signature_hash_algorithm(self):
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
return self._cert.signature_hash_algorithm
@property
def signature_algorithm_oid(self):
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
return self._cert.signature_algorithm_oid
@property
def signature(self):
"""
Returns the signature bytes.
"""
return self._cert.signature
@property
def issuer(self):
return self._cert.issuer
@property
def issuer_bytes(self):
return self._issuer
@property
def not_valid_before(self):
return self._cert.not_valid_before
@property
def not_valid_after(self):
return self._cert.not_valid_after
@property
def tbs_certificate_bytes(self):
return self._cert.tbs_certificate_bytes
@property
def extensions(self):
# TODO: own Extension and Extensions classes proxying
# python-cryptography
return self._cert.extensions
def public_key(self):
return self._cert.public_key()
@property
def public_key_info_bytes(self):
return self._cert.public_key().public_bytes(
encoding=Encoding.DER, format=PublicFormat.SubjectPublicKeyInfo)
@property
def extended_key_usage(self):
try:
ext_key_usage = self._cert.extensions.get_extension_for_oid(
crypto_x509.oid.ExtensionOID.EXTENDED_KEY_USAGE).value
except crypto_x509.ExtensionNotFound:
return None
return set(oid.dotted_string for oid in ext_key_usage)
@property
def extended_key_usage_bytes(self):
eku = self.extended_key_usage
if eku is None:
return None
ekurfc = rfc2459.ExtKeyUsageSyntax()
for i, oid in enumerate(sorted(eku)):
ekurfc[i] = univ.ObjectIdentifier(oid)
ekurfc = encoder.encode(ekurfc)
return self.__encode_extension('2.5.29.37', EKU_ANY not in eku, ekurfc)
@property
def san_general_names(self):
"""
Return SAN general names from a python-cryptography
certificate object. If the SAN extension is not present,
return an empty sequence.
Because python-cryptography does not yet provide a way to
handle unrecognised critical extensions (which may occur),
we must parse the certificate and extract the General Names.
For uniformity with other code, we manually construct values
of python-crytography GeneralName subtypes.
python-cryptography does not yet provide types for
ediPartyName or x400Address, so we drop these name types.
otherNames are NOT instantiated to more specific types where
the type is known. Use ``process_othernames`` to do that.
When python-cryptography can handle certs with unrecognised
critical extensions and implements ediPartyName and
x400Address, this function (and helpers) will be redundant
and should go away.
"""
gns = self.__pyasn1_get_san_general_names()
GENERAL_NAME_CONSTRUCTORS = {
'rfc822Name': lambda x: crypto_x509.RFC822Name(unicode(x)),
'dNSName': lambda x: crypto_x509.DNSName(unicode(x)),
'directoryName': _pyasn1_to_cryptography_directoryname,
'registeredID': _pyasn1_to_cryptography_registeredid,
'iPAddress': _pyasn1_to_cryptography_ipaddress,
'uniformResourceIdentifier':
lambda x: crypto_x509.UniformResourceIdentifier(unicode(x)),
'otherName': _pyasn1_to_cryptography_othername,
}
result = []
for gn in gns:
gn_type = gn.getName()
if gn_type in GENERAL_NAME_CONSTRUCTORS:
result.append(
GENERAL_NAME_CONSTRUCTORS[gn_type](gn.getComponent()))
return result
def __pyasn1_get_san_general_names(self):
# pyasn1 returns None when the key is not present in the certificate
# but we need an iterable
extensions = self.__get_pyasn1_field('extensions') or []
OID_SAN = univ.ObjectIdentifier('2.5.29.17')
gns = []
for ext in extensions:
if ext['extnID'] == OID_SAN:
der = ext['extnValue']
if pyasn1.__version__.startswith('0.3'):
# pyasn1 <= 0.3.7 needs explicit unwrap of ANY container
# see https://pagure.io/freeipa/issue/7685
der = decoder.decode(der, asn1Spec=univ.OctetString())[0]
gns = decoder.decode(der, asn1Spec=rfc2459.SubjectAltName())[0]
break
return gns
@property
def san_a_label_dns_names(self):
gns = self.__pyasn1_get_san_general_names()
result = []
for gn in gns:
if gn.getName() == 'dNSName':
result.append(unicode(gn.getComponent()))
return result
def match_hostname(self, hostname):
match_cert = {}
match_cert['subject'] = match_subject = []
for rdn in self._cert.subject.rdns:
match_rdn = []
for ava in rdn:
if ava.oid == crypto_x509.oid.NameOID.COMMON_NAME:
match_rdn.append(('commonName', ava.value))
match_subject.append(match_rdn)
values = self.san_a_label_dns_names
if values:
match_cert['subjectAltName'] = match_san = []
for value in values:
match_san.append(('DNS', value))
ssl.match_hostname(match_cert, DNSName(hostname).ToASCII())
def load_pem_x509_certificate(data):
"""
Load an X.509 certificate in PEM format.
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
return IPACertificate(
crypto_x509.load_pem_x509_certificate(data, backend=default_backend())
)
def load_der_x509_certificate(data):
"""
Load an X.509 certificate in DER format.
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
return IPACertificate(
crypto_x509.load_der_x509_certificate(data, backend=default_backend())
)
def load_unknown_x509_certificate(data):
"""
Only use this function when you can't be sure what kind of format does
your certificate have, e.g. input certificate files in installers
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
try:
return load_pem_x509_certificate(data)
except ValueError:
return load_der_x509_certificate(data)
def load_certificate_from_file(filename):
"""
Load a certificate from a PEM file.
Returns a python-cryptography ``Certificate`` object.
"""
with open(filename, mode='rb') as f:
return load_pem_x509_certificate(f.read())
def load_certificate_list(data):
"""
Load a certificate list from a sequence of concatenated PEMs.
Return a list of python-cryptography ``Certificate`` objects.
"""
certs = PEM_CERT_REGEX.findall(data)
return [load_pem_x509_certificate(cert[0]) for cert in certs]
def load_certificate_list_from_file(filename):
"""
Load a certificate list from a PEM file.
Return a list of python-cryptography ``Certificate`` objects.
"""
with open(filename, 'rb') as f:
return load_certificate_list(f.read())
def load_private_key_list(data, password=None):
"""
Load a private key list from a sequence of concatenated PEMs.
:param data: bytes containing the private keys
:param password: bytes, the password to encrypted keys in the bundle
:returns: List of python-cryptography ``PrivateKey`` objects
"""
crypto_backend = default_backend()
priv_keys = []
for match in re.finditer(PEM_PRIV_REGEX, data):
if re.search(b"ENCRYPTED", match.group()) is not None:
if password is None:
raise RuntimeError("Password is required for the encrypted "
"keys in the bundle.")
# Load private key as encrypted
priv_keys.append(
load_pem_private_key(match.group(), password,
backend=crypto_backend))
else:
priv_keys.append(
load_pem_private_key(match.group(), None,
backend=crypto_backend))
return priv_keys
def pkcs7_to_certs(data, datatype=PEM):
"""
Extract certificates from a PKCS #7 object.
:returns: a ``list`` of ``IPACertificate`` objects.
"""
if datatype == PEM:
match = re.match(
br'-----BEGIN PKCS7-----(.*?)-----END PKCS7-----',
data,
re.DOTALL)
if not match:
raise ValueError("not a valid PKCS#7 PEM")
data = base64.b64decode(match.group(1))
content_info, tail = decoder.decode(data, rfc2315.ContentInfo())
if tail:
raise ValueError("not a valid PKCS#7 message")
if content_info['contentType'] != rfc2315.signedData:
raise ValueError("not a PKCS#7 signed data message")
signed_data, tail = decoder.decode(bytes(content_info['content']),
rfc2315.SignedData())
if tail:
raise ValueError("not a valid PKCS#7 signed data message")
result = []
for certificate in signed_data['certificates']:
certificate = encoder.encode(certificate)
certificate = load_der_x509_certificate(certificate)
result.append(certificate)
return result
def validate_pem_x509_certificate(cert):
"""
Perform cert validation by trying to load it via python-cryptography.
"""
try:
load_pem_x509_certificate(cert)
except ValueError as e:
raise errors.CertificateFormatError(error=str(e))
def validate_der_x509_certificate(cert):
"""
Perform cert validation by trying to load it via python-cryptography.
"""
try:
load_der_x509_certificate(cert)
except ValueError as e:
raise errors.CertificateFormatError(error=str(e))
def write_certificate(cert, filename):
"""
Write the certificate to a file in PEM format.
:param cert: cryptograpy ``Certificate`` object
"""
try:
with open(filename, 'wb') as fp:
fp.write(cert.public_bytes(Encoding.PEM))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
def write_certificate_list(certs, filename, mode=None):
"""
Write a list of certificates to a file in PEM format.
:param certs: a list of IPACertificate objects to be written to a file
:param filename: a path to the file the certificates should be written into
"""
try:
with open(filename, 'wb') as f:
if mode is not None:
os.fchmod(f.fileno(), mode)
for cert in certs:
f.write(cert.public_bytes(Encoding.PEM))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
def write_pem_private_key(priv_key, filename, passwd=None):
"""
Write a private key to a file in PEM format. Will force 0x600 permissions
on file.
:param priv_key: cryptography ``PrivateKey`` object
:param passwd: ``bytes`` representing the password to store the
private key with
"""
if passwd is not None:
enc_alg = serialization.BestAvailableEncryption(passwd)
else:
enc_alg = serialization.NoEncryption()
try:
with open(filename, 'wb') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(priv_key.private_bytes(
Encoding.PEM,
PrivateFormat.PKCS8,
encryption_algorithm=enc_alg))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
class _PrincipalName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name-type', univ.Integer().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('name-string', univ.SequenceOf(char.GeneralString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
)
class _KRB5PrincipalName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('realm', char.GeneralString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('principalName', _PrincipalName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
)
def _decode_krb5principalname(data):
principal = decoder.decode(data, asn1Spec=_KRB5PrincipalName())[0]
realm = (unicode(principal['realm']).replace('\\', '\\\\')
.replace('@', '\\@'))
name = principal['principalName']['name-string']
name = u'/'.join(unicode(n).replace('\\', '\\\\')
.replace('/', '\\/')
.replace('@', '\\@') for n in name)
name = u'%s@%s' % (name, realm)
return name
class KRB5PrincipalName(crypto_x509.general_name.OtherName):
def __init__(self, type_id, value):
super(KRB5PrincipalName, self).__init__(type_id, value)
self.name = _decode_krb5principalname(value)
class UPN(crypto_x509.general_name.OtherName):
def __init__(self, type_id, value):
super(UPN, self).__init__(type_id, value)
self.name = unicode(
decoder.decode(value, asn1Spec=char.UTF8String())[0])
OTHERNAME_CLASS_MAP = {
SAN_KRB5PRINCIPALNAME: KRB5PrincipalName,
SAN_UPN: UPN,
}
def process_othernames(gns):
"""
Process python-cryptography GeneralName values, yielding
OtherName values of more specific type if type is known.
"""
for gn in gns:
if isinstance(gn, crypto_x509.general_name.OtherName):
cls = OTHERNAME_CLASS_MAP.get(
gn.type_id.dotted_string,
crypto_x509.general_name.OtherName)
yield cls(gn.type_id, gn.value)
else:
yield gn
def _pyasn1_to_cryptography_directoryname(dn):
attrs = []
# Name is CHOICE { RDNSequence } (only one possibility)
for rdn in dn.getComponent():
for ava in rdn:
attr = crypto_x509.NameAttribute(
_pyasn1_to_cryptography_oid(ava['type']),
unicode(decoder.decode(ava['value'])[0])
)
attrs.append(attr)
return crypto_x509.DirectoryName(crypto_x509.Name(attrs))
def _pyasn1_to_cryptography_registeredid(oid):
return crypto_x509.RegisteredID(_pyasn1_to_cryptography_oid(oid))
def _pyasn1_to_cryptography_ipaddress(octet_string):
return crypto_x509.IPAddress(
ipaddress.ip_address(bytes(octet_string)))
def _pyasn1_to_cryptography_othername(on):
return crypto_x509.OtherName(
_pyasn1_to_cryptography_oid(on['type-id']),
bytes(on['value'])
)
def _pyasn1_to_cryptography_oid(oid):
return crypto_x509.ObjectIdentifier(str(oid))
def chunk(size, s):
"""Yield chunks of the specified size from the given string.
The input must be a multiple of the chunk size (otherwise
trailing characters are dropped).
Works on character strings only.
"""
return (u''.join(span) for span in six.moves.zip(*[iter(s)] * size))
def add_colons(s):
"""Add colons between each nibble pair in a hex string."""
return u':'.join(chunk(2, s))
def to_hex_with_colons(bs):
"""Convert bytes to a hex string with colons."""
return add_colons(binascii.hexlify(bs).decode('utf-8'))
class UTC(datetime.tzinfo):
ZERO = datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
def format_datetime(t):
if t.tzinfo is None:
t = t.replace(tzinfo=UTC())
return unicode(t.strftime("%a %b %d %H:%M:%S %Y %Z"))
class ExternalCAType(enum.Enum):
GENERIC = 'generic'
MS_CS = 'ms-cs'
class ExternalCAProfile:
"""
An external CA profile configuration. Currently the only
subclasses are for Microsoft CAs, for providing data in the
"Certificate Template" extension.
Constructing this class will actually return an instance of a
subclass.
Subclasses MUST set ``valid_for``.
"""
def __init__(self, s=None):
self.unparsed_input = s
# Which external CA types is the data valid for?
# A set of VALUES of the ExternalCAType enum.
valid_for = set()
def __new__(cls, s=None):
"""Construct the ExternalCAProfile value.
Return an instance of a subclass determined by
the format of the argument.
"""
# we are directly constructing a subclass; instantiate
# it and be done
if cls is not ExternalCAProfile:
return super(ExternalCAProfile, cls).__new__(cls)
# construction via the base class; therefore the string
# argument is required, and is used to determine which
# subclass to construct
if s is None:
raise ValueError('string argument is required')
parts = s.split(':')
try:
# Is the first part on OID?
_oid = univ.ObjectIdentifier(parts[0])
# It is; construct a V2 template
# pylint: disable=too-many-function-args
return MSCSTemplateV2.__new__(MSCSTemplateV2, s)
except pyasn1.error.PyAsn1Error:
# It is not an OID; treat as a template name
# pylint: disable=too-many-function-args
return MSCSTemplateV1.__new__(MSCSTemplateV1, s)
def __getstate__(self):
return self.unparsed_input
def __setstate__(self, state):
# explicitly call __init__ method to initialise object
self.__init__(state)
class MSCSTemplate(ExternalCAProfile):
"""
An Microsoft AD-CS Template specifier.
Subclasses MUST set ext_oid.
Subclass constructors MUST set asn1obj.
"""
valid_for = set([ExternalCAType.MS_CS.value])
ext_oid = None # extension OID, as a Python str
asn1obj = None # unencoded extension data
def get_ext_data(self):
"""Return DER-encoded extension data."""
return encoder.encode(self.asn1obj)
class MSCSTemplateV1(MSCSTemplate):
"""
A v1 template specifier, per
https://msdn.microsoft.com/en-us/library/cc250011.aspx.
::
CertificateTemplateName ::= SEQUENCE {
Name UTF8String
}
But note that a bare BMPString is used in practice.
"""
ext_oid = "1.3.6.1.4.1.311.20.2"
def __init__(self, s):
super(MSCSTemplateV1, self).__init__(s)
parts = s.split(':')
if len(parts) > 1:
raise ValueError(
"Cannot specify certificate template version when using name.")
self.asn1obj = char.BMPString(str(parts[0]))
class MSCSTemplateV2(MSCSTemplate):
"""
A v2 template specifier, per
https://msdn.microsoft.com/en-us/library/windows/desktop/aa378274(v=vs.85).aspx
::
CertificateTemplate ::= SEQUENCE {
templateID EncodedObjectID,
templateMajorVersion TemplateVersion,
templateMinorVersion TemplateVersion OPTIONAL
}
TemplateVersion ::= INTEGER (0..4294967295)
"""
ext_oid = "1.3.6.1.4.1.311.21.7"
@staticmethod
def check_version_in_range(desc, n):
if n < 0 or n >= 2**32:
raise ValueError(
"Template {} version must be in range 0..4294967295"
.format(desc))
def __init__(self, s):
super(MSCSTemplateV2, self).__init__(s)
parts = s.split(':')
obj = CertificateTemplateV2()
if len(parts) < 2 or len(parts) > 3:
raise ValueError(
"Incorrect template specification; required format is: "
"<oid>:<majorVersion>[:<minorVersion>]")
try:
obj['templateID'] = univ.ObjectIdentifier(parts[0])
major = int(parts[1])
self.check_version_in_range("major", major)
obj['templateMajorVersion'] = major
if len(parts) > 2:
minor = int(parts[2])
self.check_version_in_range("minor", minor)
obj['templateMinorVersion'] = int(parts[2])
except pyasn1.error.PyAsn1Error:
raise ValueError("Could not parse certificate template specifier.")
self.asn1obj = obj
class CertificateTemplateV2(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('templateID', univ.ObjectIdentifier()),
namedtype.NamedType('templateMajorVersion', univ.Integer()),
namedtype.OptionalNamedType('templateMinorVersion', univ.Integer())
)
| gpl-3.0 | 5,514,516,305,728,191,000 | 30.077007 | 89 | 0.621191 | false |
beiko-lab/gengis | bin/Lib/site-packages/numpy/core/tests/test_errstate.py | 1 | 1930 | # The following exec statement (or something like it) is needed to
# prevent SyntaxError on Python < 2.5. Even though this is a test,
# SyntaxErrors are not acceptable; on Debian systems, they block
# byte-compilation during install and thus cause the package to fail
# to install.
import sys
if sys.version_info[:2] >= (2, 5):
exec """
from __future__ import with_statement
import platform
from numpy.core import *
from numpy.random import rand, randint
from numpy.testing import *
class TestErrstate(TestCase):
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_invalid(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(invalid='ignore'):
sqrt(a)
# While this should fail!
try:
sqrt(a)
except FloatingPointError:
pass
else:
self.fail("Did not raise an invalid error")
def test_divide(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(divide='ignore'):
a // 0
# While this should fail!
try:
a // 0
except FloatingPointError:
pass
else:
self.fail("Did not raise divide by zero error")
def test_errcall(self):
def foo(*args):
print(args)
olderrcall = geterrcall()
with errstate(call=foo):
assert_(geterrcall() is foo, 'call is not foo')
with errstate(call=None):
assert_(geterrcall() is None, 'call is not None')
assert_(geterrcall() is olderrcall, 'call is not olderrcall')
"""
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | 8,773,401,320,805,839,000 | 29.639344 | 69 | 0.542487 | false |
BuzzFeedNews/nics-firearm-background-checks | scripts/chart-total-checks-36-months.py | 1 | 1215 | #!/usr/bin/env python
import sys, os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
from matplotlib.dates import MonthLocator
import seaborn as sb
sb.set()
checks = (
pd.read_csv(sys.stdin)
.assign(
month_dt = lambda df: pd.to_datetime(df["month"], format = "%Y-%m")
)
)
checks["year_int"] = checks["month"].apply(lambda x: int(x.split("-")[0]))
checks["month_int"] = checks["month"].apply(lambda x: int(x.split("-")[1]))
latest_month_count = (
checks
.iloc[0]
.pipe(lambda x: x["month_int"] + (x["year_int"] * 12))
)
totals = (
checks
.loc[lambda df: (df["month_int"] + (df["year_int"] * 12))
> (latest_month_count - 12*3)]
.groupby("month_dt")
["totals"]
.sum()
)
ax = totals.plot(kind="area", figsize=(12, 8), color="#000000", alpha=0.5)
ax.figure.set_facecolor("#FFFFFF")
ax.set_title(
"NICS Background Check Totals — Past 36 Months",
fontsize=24
)
plt.setp(ax.get_yticklabels(), fontsize=12)
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:,.0f}"))
ax.xaxis.set_minor_locator(MonthLocator(range(1, 13)))
ax.set_xlabel("")
plt.savefig(sys.stdout.buffer)
| mit | -1,528,021,792,744,747,500 | 23.734694 | 75 | 0.644389 | false |
nkmk/python-snippets | notebook/numpy_select_assign.py | 1 | 1394 | import numpy as np
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[0, 0] = 100
print(a_2d)
# [[100 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[0] = 100
print(a_2d)
# [[100 100 100 100]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_2d[np.ix_([False, True, True], [1, 3])] = 200
print(a_2d)
# [[100 100 100 100]
# [ 4 200 6 200]
# [ 8 200 10 200]]
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[::2, :3])
# [[ 0 1 2]
# [ 8 9 10]]
print(np.arange(6).reshape(2, 3) * 100)
# [[ 0 100 200]
# [300 400 500]]
a_2d[::2, :3] = np.arange(6).reshape(2, 3) * 100
print(a_2d)
# [[ 0 100 200 3]
# [ 4 5 6 7]
# [300 400 500 11]]
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[::2, :3])
# [[ 0 1 2]
# [ 8 9 10]]
print(np.arange(3) * 100)
# [ 0 100 200]
a_2d[::2, :3] = np.arange(3) * 100
print(a_2d)
# [[ 0 100 200 3]
# [ 4 5 6 7]
# [ 0 100 200 11]]
a_2d = np.arange(12).reshape(3, 4)
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
print(a_2d[::2, :3])
# [[ 0 1 2]
# [ 8 9 10]]
print(np.arange(2) * 100)
# [ 0 100]
# a_2d[::2, :3] = np.arange(2) * 100
# ValueError: could not broadcast input array from shape (2) into shape (2,3)
| mit | 7,102,588,987,576,330,000 | 16.425 | 77 | 0.457676 | false |
Universal-Model-Converter/UMC3.0a | dev tests and files/data backups/GUI_update.py | 1 | 17618 | #1
#v 0.001
#I don't have a TOC here yet as everything constantly changes
import COMMON #file vars and functions for import/export processing
import VIEWER #mainly for the toggles
from VIEWER import __GL,__GLU #GL functions
from VIEWER import __pyg
'''
from COMMON import Scripts
#Shapes (private)
#Widgets (private)
def Button(Text,X,Y,W,H,): pass
def Browser():
import os
Dir='C:/'; done=0
clicked = 0
while not done:
items = os.listdir(Dir)
cancel = Button('Cancel')
if not cancel:
if Button('..'):
Dir
else: #need a better RT method >_>
#TODO: parse the list and collect info first
for item in items:
if Button(item): #draw Clicked button
clicked=1
else: #draw unclicked button
if clicked: #action
clicked=0
if os.path.isdir(Dir+item):
Dir+=(item+'/')
else:
done=1
return Dir+item
else:
pass
else:
done=1
return None
'''
#the GL selection/feedback buffers are a bit complicated for me,
#so I've defined my own method derived from GL. (should be slightly faster than re-defining everything)
#this method compaires the hitdefs with the current selection and changes the state of a valid hit
W_States = {} #this stores the mouse state for the current widget
#further state processing can be done by the widget itself.
# { name: [L,M,R,O] } #O - mouseOver
W_Info = {} #this stores the state info of each widget
#this determines weather a toggle is active, or a selection has yet to be made
__UpdateHits=True #allow for hit updates
W_HitDefs = {} #this stores the hit-area for each widget
#this is constantly cleared and updated during state changes
# { name: [X1,Y1,X2,Y2] }
pw,ph = 1.0/800,1.0/600
#-----------------------------------
#I/O process functions
def __ImportModel():
pass
def __ExportModel():
pass
def __ImportAnim():
pass
def __ExportAnim():
pass
def __Browser(Scripts): #overlays GUI when activated (Clears hit-defs to avoid improper activation)
#return file_path, Module
pass
#-----------------------------------
#widget resources
FontSize=0
def __font(x,y,size,text,color=(0,0,0,255)):
global pw,ph,FontSize
#__GL.glEnable(__GL.GL_TEXTURE_2D)
#Create Font
#to increase performance, only create a new font when changing the size
if size != FontSize: F=__pyg.font.Font('fonts/tahoma.ttf',size) #don't use .fon files
w,h=F.size(text)
#_w,_h=1,1 #GL-modified width/height (binary multiple)
#while _w<w: _w<<=1
#while _h<h: _h<<=1
#fsurf=__pyg.Surface((w,h),__pyg.SRCALPHA)
#fsurf.blit(__pyg.transform.flip(F.render(text,True,color), False, True),(0,0)) #Create GL-Font Image
#w,h=fsurf.get_size()
image=__pyg.transform.flip(F.render(text,True,color), False, True).get_buffer().raw #get raw pixel data
# Create Texture __GL.glGenTextures(1)
'''__GL.glBindTexture(__GL.GL_TEXTURE_2D, 0) # 2d texture (x and y size)
__GL.glPixelStorei(__GL.GL_UNPACK_ALIGNMENT,1)
__GL.glTexImage2D(__GL.GL_TEXTURE_2D, 0, 3, _w, _h, 0, __GL.GL_BGRA, __GL.GL_UNSIGNED_BYTE, image)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_WRAP_S, __GL.GL_CLAMP)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_WRAP_T, __GL.GL_CLAMP)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_WRAP_S, __GL.GL_REPEAT)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_WRAP_T, __GL.GL_REPEAT)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_MAG_FILTER, __GL.GL_NEAREST)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_MIN_FILTER, __GL.GL_NEAREST)
__GL.glTexEnvf(__GL.GL_TEXTURE_ENV, __GL.GL_TEXTURE_ENV_MODE, __GL.GL_DECAL)
w*=pw; h*=ph
__GL.glBegin(__GL.GL_QUADS)
__GL.glColor4f(0.0,0.0,0.0,color[3]*(1.0/255))
__GL.glVertex2f(x,y); __GL.glTexCoord2f(0.0,0.0)
__GL.glVertex2f(x+w,y); __GL.glTexCoord2f(1.0,0.0)
__GL.glVertex2f(x+w,y+h); __GL.glTexCoord2f(1.0,1.0)
__GL.glVertex2f(x,y+h); __GL.glTexCoord2f(0.0,1.0)
__GL.glEnd()'''
__GL.glRasterPos2f(float(x)*pw if type(x)==int else x ,
float(y+h)*ph if type(y)==int else y+(h*ph) )
__GL.glDrawPixels(w,h,__GL.GL_BGRA,__GL.GL_UNSIGNED_BYTE,image)
del(image) #remove the old buffer
#__GL.glDisable(__GL.GL_TEXTURE_2D)
#-----------------------------------
#internal widgets (bound to change)
def __DropBox(X,Y,W,Na,Items,Def=0,Text=''):
global W_States,W_Info,W_HitDefs,__UpdateHits
global pw,ph
X2,Y2 = X+(pw*(W*10)),Y+(ph*20)
#Widget init info
try: W_States[Na]
except KeyError:
W_States.update({Na:[0,0,0,False]})
W_Info.update({Na:[Def,False]})
if __UpdateHits: W_HitDefs.update({Na:[X,Y,X2+(pw*15),Y2]})
#Widget logic
L,M,R,O = W_States[Na]
if L==2:
W_Info[Na][1]=True
W_States[Na][0]=0
State = W_Info[Na]
__GL.glBegin(__GL.GL_QUADS)
__GL.glColor4f(1.0,1.0,1.0,0.25)
__GL.glVertex2f(X,Y)
__GL.glVertex2f(X2,Y)
__GL.glVertex2f(X2,Y2)
__GL.glVertex2f(X,Y2)
__GL.glColor4f(0.0,0.0,0.0,0.1)
__GL.glVertex2f(X2,Y)
__GL.glVertex2f(X2+(pw*15),Y)
__GL.glVertex2f(X2+(pw*15),Y2)
__GL.glVertex2f(X2,Y2)
__GL.glEnd()
__font(X+(5*pw),Y+(2*ph),12,Na,(0,0,0,100))
if State[1]:
W_HitDefs={}
__UpdateHits=False #prevent hit updates from other widgets
#once we've made our selection, we can then allow hit updates
remove=False
for i,v in enumerate(Items):
#we have to create custom widget defs for each entry here
N = '%s_%s_Sel%i'%(Na,v,i) #Na+v+'_Sel'+str(i)
x1,y1,x2,y2=X,Y+((Y2-Y)*(i+1)),X2,Y2+((Y2-Y)*(i+1))
try: W_States[N]
except KeyError: W_States.update({N:[0,0,0,False]}) #mouse updates
W_HitDefs.update({N:[x1,y1,x2,y2]})
#these should be the only hits avaliable
l,m,r,o = W_States[N]
#all we need to worry about here, is the state, and the hit-def
if o: __GL.glColor4f(0.375,0.375,0.375,0.75)
else: __GL.glColor4f(0.0,0.0,0.0,0.5)
__GL.glBegin(__GL.GL_QUADS)
__GL.glVertex2f(x1,y1)
__GL.glVertex2f(x2,y1)
__GL.glVertex2f(x2,y2)
__GL.glVertex2f(x1,y2)
__GL.glEnd()
__font(x1+(5*pw),y1+(2*ph),12,v,(200,200,200,100))
if l==2:
W_Info[Na]=[i,False] #State should not be an index
remove=True
if remove:
for i,v in enumerate(Items): #clear the buffers of these widgets
n = '%s_%s_Sel%i'%(Na,v,i)
W_States.pop(n)
W_HitDefs.pop(n)
__UpdateHits=True
return State[0]
def __TButton(X,Y,Na,St=False,Text=''):
global W_States,W_Info,W_HitDefs,__UpdateHits
global pw,ph
#Widget init info
try: W_States[Na]
except KeyError:
W_States.update({Na:[0,0,0,False]})
W_Info.update({Na:St})
if __UpdateHits: W_HitDefs.update({Na:[X,Y,X+(pw*20),Y+(ph*20)]})
#Widget logic
L,M,R,O = W_States[Na]
if L==2:
W_Info[Na]=(False if W_Info[Na] else True)
W_States[Na][0]=0
State = W_Info[Na]
if State: __GL.glColor4f(0.0,0.0,0.0,0.25)
else: __GL.glColor4f(0.0,0.0,0.0,0.1)
__GL.glBegin(__GL.GL_QUADS)
__GL.glVertex2f(X,Y)
__GL.glVertex2f(X+(pw*20),Y)
__GL.glVertex2f(X+(pw*20),Y+(ph*20))
__GL.glVertex2f(X,Y+(ph*20))
__GL.glEnd()
__font(X+(25*pw),Y+(2*ph),12,Text,(0,0,0,100))
return State
def __Button(X1,Y1,X2,Y2,Na,Text=''):
global pw,ph
def __BrowseBar(X1,Y1,W):
global pw,ph
#-----------------------------------
#panel drawing functions
def __ModelPanel():
global pw,ph
__BrowseBar(pw*10,ph*40,180)
def __AnimPanel():
global pw,ph
pass
def __DisplayPanel(X1,X2):
global pw,ph
VIEWER.TOGGLE_LIGHTING = __TButton(pw*(X1+11),ph*31,'EnLight',True,'Lighting')
VIEWER.TOGGLE_WIREFRAME = __TButton(pw*(X1+11),ph*56,'EnWire',False,'Wireframe')
VIEWER.TOGGLE_BONES = __DropBox(pw*(X1+11),ph*81,10,'Draw Bones',['None','Standard','Overlay (X-Ray)'],0)
#reversed drawing order here so fonts overlay properly
if VIEWER.TOGGLE_3D==2: VIEWER.TOGGLE_3D_MODE[1] = [1./60,1./120][__DropBox(pw*(X1+251),ph*81,5,'Freq (WIP)',['60hz','120hz'],0)]
if VIEWER.TOGGLE_3D==1: VIEWER.TOGGLE_3D_MODE[0] = __DropBox(pw*(X1+251),ph*81,5,'Colors',['R|GB','G|RB','B|RG'],0)
VIEWER.TOGGLE_3D = __DropBox(pw*(X1+131),ph*81,10,'3D Drawing',['Off','Analglyph','Shutter'],0)
VIEWER.TOGGLE_ORTHO = __DropBox(pw*(X1+131),ph*56,10,'Projection',['Perspective','Orthographic'],1)
VIEWER.TOGGLE_GRID = [2 if VIEWER.TOGGLE_GRID>2 else VIEWER.TOGGLE_GRID,3,4][
__DropBox(pw*(X1+131),ph*31,10,'Display',['Grid','Floor','Off'],0)]
#'''
def __ControlPanel(X1,X2):
global pw,ph
pass
#-----------------------------------
def __ExPanel(X1,Y1,X2,Y2,EB,Na,MX=0,MY=0,St=True): #returns current state for other panels
global W_States,W_Info,W_HitDefs,__UpdateHits
global pw,ph
#Widget init info
try: W_States[Na]
except KeyError:
W_States.update({Na:[0,0,0,False]})
W_Info.update({Na:St})
#Widget logic
L,M,R,O = W_States[Na]
if L==2:
W_Info[Na]=(False if W_Info[Na] else True)
W_States[Na][0]=0
State = W_Info[Na]
if State:
__GL.glBegin(__GL.GL_QUADS)
__GL.glColor4f(0.5,0.5,0.5,0.8) #model (left) panel
__GL.glVertex2f(X1,Y1)
__GL.glVertex2f(X1,Y2)
__GL.glVertex2f(X2,Y2)
__GL.glVertex2f(X2,Y1)
__GL.glEnd()
#60x15px rectangle
if EB==0: #top
EBX1,EBY1,EBX2,EBY2=(X1+((X2-X1)/2)-(pw*30)),Y1,(X1+((X2-X1)/2)+(pw*30)),Y1+(ph*15)
TPX1,TPY1 = EBX1+(pw*25),EBY1+(ph*5)
TPX2,TPY2 = EBX1+(pw*30),EBY1+(ph*10)
TPX3,TPY3 = EBX1+(pw*35),EBY1+(ph*5)
elif EB==1: #right
EBX1,EBY1,EBX2,EBY2=X2-(pw*15),((Y2-Y1)/2)-(ph*30),X2,((Y2-Y1)/2)+(ph*30)
TPX1,TPY1 = EBX1+(pw*10),EBY1+(ph*25)
TPX2,TPY2 = EBX1+(pw*5),EBY1+(ph*30)
TPX3,TPY3 = EBX1+(pw*10),EBY1+(ph*35)
elif EB==2: #bottom
EBX1,EBY1,EBX2,EBY2=(X1+((X2-X1)/2)-(pw*30)),Y2-(ph*15),(X1+((X2-X1)/2)+(pw*30)),Y2
TPX1,TPY1 = EBX1+(pw*25),EBY1+(ph*10)
TPX2,TPY2 = EBX1+(pw*30),EBY1+(ph*5)
TPX3,TPY3 = EBX1+(pw*35),EBY1+(ph*10)
elif EB==3: #left
EBX1,EBY1,EBX2,EBY2=X1,((Y2-Y1)/2)-(ph*30),X1+(pw*15),((Y2-Y1)/2)+(ph*30)
TPX1,TPY1 = EBX1+(pw*5),EBY1+(ph*25)
TPX2,TPY2 = EBX1+(pw*10),EBY1+(ph*30)
TPX3,TPY3 = EBX1+(pw*5),EBY1+(ph*35)
#is the panel expanded?
if not State:
if EB==0: #top
Eq=((Y2-Y1)-(ph*15))
EBY1,EBY2=EBY1+Eq,EBY2+Eq
TPY1,TPY2,TPY3=TPY1+(Eq+(ph*5)),TPY2+(Eq-(ph*5)),TPY3+(Eq+(ph*5))
elif EB==1: #right
Eq=((X2-X1)-(pw*15))
EBX1,EBX2=EBX1-Eq,EBX2-Eq
TPX1,TPX2,TPX3=TPX1-(Eq+(pw*5)),TPX2-(Eq-(pw*5)),TPX3-(Eq+(pw*5))
elif EB==2: #bottom
Eq=((Y2-Y1)-(ph*15))
EBY1,EBY2=EBY1-Eq,EBY2-Eq
TPY1,TPY2,TPY3=TPY1-(Eq+(ph*5)),TPY2-(Eq-(ph*5)),TPY3-(Eq+(ph*5))
elif EB==3: #left
Eq=((X2-X1)-(pw*15))
EBX1,EBX2=EBX1+Eq,EBX2+Eq
TPX1,TPX2,TPX3=TPX1+(Eq+(pw*5)),TPX2+(Eq-(pw*5)),TPX3+(Eq+(pw*5))
__GL.glColor4f(0.5,0.5,0.5,0.8)
__GL.glBegin(__GL.GL_QUADS) #(just the BG color behind the toggle button)
__GL.glVertex2f(EBX1+MX,EBY1+MY)
__GL.glVertex2f(EBX1+MX,EBY2+MY)
__GL.glVertex2f(EBX2+MX,EBY2+MY)
__GL.glVertex2f(EBX2+MX,EBY1+MY)
__GL.glEnd()
if __UpdateHits: W_HitDefs.update({Na:[EBX1+MX,EBY1+MY,EBX2+MX,EBY2+MY]})
__GL.glColor4f(0.0,0.0,0.0,0.2)
__GL.glBegin(__GL.GL_QUADS)
__GL.glVertex2f(EBX1+MX,EBY1+MY)
__GL.glVertex2f(EBX1+MX,EBY2+MY)
__GL.glVertex2f(EBX2+MX,EBY2+MY)
__GL.glVertex2f(EBX2+MX,EBY1+MY)
__GL.glEnd()
__GL.glBegin(__GL.GL_TRIANGLES)
__GL.glVertex2f(TPX1+MX,TPY1+MY)
__GL.glVertex2f(TPX2+MX,TPY2+MY)
__GL.glVertex2f(TPX3+MX,TPY3+MY)
__GL.glEnd()
return State
def __DrawGUI(w,h,RotMatrix): #called directly by the display function after drawing the scene
global pw,ph
#the GUI is drawn over the scene by clearing the depth buffer
pw,ph=1./w,1./h
global W_HitDefs
W_HitDefs = {} #clear the hitdefs to avoid improper activation
__GL.glMatrixMode(__GL.GL_PROJECTION)
__GL.glLoadIdentity()
#glOrtho(-2*P, 2*P, -2, 2, -100, 100)
__GLU.gluOrtho2D(0.0, 1.0, 1.0, 0.0) #TODO update the viewport with the pixel range instead of 1.0 (less GUI calculations will be needed)
__GL.glMatrixMode(__GL.GL_MODELVIEW)
__GL.glClear( __GL.GL_DEPTH_BUFFER_BIT )
__GL.glPolygonMode(__GL.GL_FRONT_AND_BACK,__GL.GL_FILL)
__GL.glLoadIdentity()
__GL.glEnable(__GL.GL_BLEND)
__GL.glDisable(__GL.GL_DEPTH_TEST)
__GL.glDisable(__GL.GL_TEXTURE_2D)
__GL.glDisable(__GL.GL_LIGHTING)
__GL.glBegin(__GL.GL_QUADS)
__GL.glColor4f(0.4,0.4,0.4,0.8) #options toggle
__GL.glVertex2f(pw*0,ph*0)
__GL.glVertex2f(pw*w,ph*0)
__GL.glVertex2f(pw*w,ph*20)
__GL.glVertex2f(pw*0,ph*20)
__GL.glEnd()
__GL.glColor4f(0.0,0.0,0.0,0.2)
__GL.glBegin(__GL.GL_TRIANGLES)
__GL.glVertex2f(pw*((w/2)-10),ph*6)
__GL.glVertex2f(pw*((w/2)+10),ph*6)
__GL.glVertex2f(pw*(w/2),ph*15)
__GL.glEnd()
M = __ExPanel(pw*0,ph*21,pw*210,ph*h,1,'MODEL')
if M: __ModelPanel()
A = __ExPanel(pw*(w-210),ph*21,pw*w,ph*h,3,'ANIM')
if A: __AnimPanel()
D = __ExPanel(pw*(211 if M else 1),ph*21,pw*(w-(211 if A else 1)),ph*150,2,'DSPL',(0 if M else pw*105)+(0 if A else pw*-105))
if D: __DisplayPanel(210 if M else 0,-210 if A else 0)
C = __ExPanel(pw*(211 if M else 1),ph*(h-150),pw*(w-(211 if A else 1)),ph*h,0,'CTRL',(0 if M else pw*105)+(0 if A else pw*-105))
if C: __ControlPanel(210 if M else 0,-210 if A else 0)
#__font(40,40,14,"testing",(128,0,0,100))
__GL.glDisable(__GL.GL_BLEND)
__GL.glEnable(__GL.GL_DEPTH_TEST)
#axis
__GL.glLineWidth(1.0)
__GL.glPushMatrix()
__GL.glTranslatef(pw*(228 if M else 17),ph*(h-(167 if C else 17)),0)
__GL.glScalef(pw*600,ph*600,1)
__GL.glMultMatrixf(RotMatrix)
__GL.glColor3f(1.0,0.0,0.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.02,0.0,0.0); __GL.glEnd() #X
__GL.glTranslatef(0.0145,0.0,0.0); __GL.glRotatef(90, 0.0, 1.0, 0.0)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glRotatef(-90, 0.0, 1.0, 0.0); __GL.glTranslatef(-0.0145,0.0,0.0)
__GL.glColor3f(0.0,1.0,0.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.0,-0.02,0.0); __GL.glEnd() #Y
__GL.glTranslatef(0.0,-0.0145,0.0); __GL.glRotatef(90, 1.0, 0.0, 0.0)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glRotatef(-90, 1.0, 0.0, 0.0); __GL.glTranslatef(0.0,0.0145,0.0)
__GL.glColor3f(0.0,0.0,1.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.0,0.0,0.02); __GL.glEnd() #Z
__GL.glTranslatef(0.0,0.0,0.0145)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glTranslatef(0.0,0.0,-0.0145)
__GL.glColor3f(0.5,0.5,0.5) ; #__GLUT.glutSolidSphere(0.003, 8, 4)
__GL.glPopMatrix()
lastHit = [0,False] #last hit record to be compaired with current hit record [ button, state ]
def __CheckHit(b,x,y,s): #checks if the hit (click) executes a command
L,M,R,U,D=range(1,6)
for name in W_HitDefs: #we currently want to concentrait on if we have a hit (o is not handled here)
X1,Y1,X2,Y2 = W_HitDefs[name] #Hit Area
l,m,r,o = W_States[name] #we only want the release states to last 1 frame
if X1<x<X2 and Y1<y<Y2: #are we in the hit area of this widget?
#if we have our hit, then we can updte the name state of our hit
if b==L:
if s: W_States[name][0]=1 #we have clicked
else: W_States[name][0]=2 #we have released
if b==M:
if s: W_States[name][1]=1 #we have clicked
else: W_States[name][1]=2 #we have released
if b==R:
if s: W_States[name][2]=1 #we have clicked
else: W_States[name][2]=2 #we have released
else: #do we have any states to clean up?
#this would happen if we click a widget, then move out of it's area
if l==1: W_States[name][0]=0
if m==1: W_States[name][1]=0
if r==1: W_States[name][2]=0
#release states are to be taken care of by the widget.
def __CheckPos(x,y): #checks the new mouse position when moved
import sys
for name in W_HitDefs: #we want to concentrait on if we're over a hit area
X1,Y1,X2,Y2 = W_HitDefs[name] #Hit Area
#are we in the hit area of this widget?
if X1<x<X2 and Y1<y<Y2: W_States[name][3]=True
else: W_States[name][3]=False
def __initGUI():
__pyg.font.init()
| mit | 511,102,529,398,882,100 | 32.686424 | 142 | 0.564082 | false |
thp44/delphin_6_automation | pytest/test_inputs.py | 1 | 4179 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pandas as pd
import os
import shutil
# RiBuild Modules
from delphin_6_automation.sampling import inputs
from delphin_6_automation.delphin_setup import delphin_permutations
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def test_wall_core_materials(input_sets):
materials = inputs.wall_core_materials(input_sets)
assert materials
assert isinstance(materials, list)
assert all(isinstance(material_id, int)
for material_id in materials)
def test_plaster_materials(input_sets):
materials = inputs.plaster_materials(input_sets)
assert materials
assert isinstance(materials, list)
assert all(isinstance(material_id, int)
for material_id in materials)
def test_insulation(input_sets):
materials = inputs.insulation_type(input_sets)
assert materials
assert isinstance(materials, list)
assert all(isinstance(material_id, int)
for sublist in materials
for material_id in sublist)
def test_construction_types():
constructions = inputs.construction_types()
assert constructions
assert isinstance(constructions, list)
assert all([file.endswith('.d6p')
for file in constructions])
def test_insulation_systems(input_sets):
systems = inputs.insulation_systems(input_sets, rows_to_read=2)
assert isinstance(systems, pd.DataFrame)
assert (11, 2) == systems.shape
def test_delphin_templates(test_folder):
to_copy = inputs.delphin_templates(test_folder)
assert isinstance(to_copy, dict)
def test_construct_delphin_reference(delphin_reference_folder):
files = inputs.construct_delphin_reference(delphin_reference_folder)
for file in files:
assert os.path.exists(os.path.join(delphin_reference_folder, 'design', file))
def test_implement_system_materials(delphin_with_insulation, add_insulation_materials, dummy_systems, request):
if '3' in request.node.name:
index = 0
else:
index = 1
delphin = inputs.implement_system_materials(delphin_with_insulation, dummy_systems.loc[index])
layers = delphin_permutations.get_layers(delphin)
for material_id in dummy_systems.loc[index, 'ID'].values:
assert delphin_permutations.identify_layer(layers, str(material_id))
def test_implement_insulation_widths(delphin_with_insulation, add_insulation_materials, dummy_systems, request):
if '3' in request.node.name:
index = 0
insulation_id = 39
else:
index = 1
insulation_id = 187
delphin_with_system_materials = inputs.implement_system_materials(delphin_with_insulation, dummy_systems.loc[index])
permutated_dicts = inputs.implement_insulation_widths(delphin_with_system_materials, dummy_systems.loc[index])
assert isinstance(permutated_dicts, list)
insulation_widths = dummy_systems.loc[index, 'Dimension'].values.tolist()
for i, delphin in enumerate(permutated_dicts):
assert isinstance(delphin, dict)
layers = delphin_permutations.get_layers(delphin)
insulation_layer = delphin_permutations.identify_layer(layers, str(insulation_id))
assert insulation_widths[i] * 10e-04 == insulation_layer['x_width']
def test_construct_design_files(mock_insulation_systems,
add_insulation_materials, delphin_reference_folder):
file_names = inputs.construct_design_files(delphin_reference_folder)
for file in file_names:
assert os.path.exists(os.path.join(delphin_reference_folder, 'design', file))
def test_design_options(mock_insulation_systems,
add_insulation_materials, delphin_reference_folder):
designs = inputs.design_options(delphin_reference_folder)
assert isinstance(designs, list)
for design in designs:
assert isinstance(design, str)
assert not design.endswith('.d6p')
| mit | -6,308,856,295,466,594,000 | 29.955556 | 120 | 0.66547 | false |
CloudBrewery/duplicity-swiftkeys | duplicity/backends/swiftbackend.py | 1 | 4654 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2013 Matthieu Huin <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import duplicity.backend
from duplicity import log
from duplicity import util
from duplicity.errors import BackendException
class SwiftBackend(duplicity.backend.Backend):
"""
Backend for Swift
"""
def __init__(self, parsed_url):
try:
from swiftclient import Connection
from swiftclient import ClientException
except ImportError:
raise BackendException("This backend requires "
"the python-swiftclient library.")
self.resp_exc = ClientException
conn_kwargs = {}
# if the user has already authenticated
if 'SWIFT_PREAUTHURL' in os.environ and 'SWIFT_PREAUTHTOKEN' in os.environ:
conn_kwargs['preauthurl'] = os.environ['SWIFT_PREAUTHURL']
conn_kwargs['preauthtoken'] = os.environ['SWIFT_PREAUTHTOKEN']
else:
if 'SWIFT_USERNAME' not in os.environ:
raise BackendException('SWIFT_USERNAME environment variable '
'not set.')
if 'SWIFT_PASSWORD' not in os.environ:
raise BackendException('SWIFT_PASSWORD environment variable '
'not set.')
if 'SWIFT_AUTHURL' not in os.environ:
raise BackendException('SWIFT_AUTHURL environment variable '
'not set.')
conn_kwargs['user'] = os.environ['SWIFT_USERNAME']
conn_kwargs['key'] = os.environ['SWIFT_PASSWORD']
conn_kwargs['authurl'] = os.environ['SWIFT_AUTHURL']
if 'SWIFT_AUTHVERSION' in os.environ:
conn_kwargs['auth_version'] = os.environ['SWIFT_AUTHVERSION']
else:
conn_kwargs['auth_version'] = '1'
if 'SWIFT_TENANTNAME' in os.environ:
conn_kwargs['tenant_name'] = os.environ['SWIFT_TENANTNAME']
self.container = parsed_url.path.lstrip('/')
container_metadata = None
try:
self.conn = Connection(**conn_kwargs)
container_metadata = self.conn.head_container(self.container)
except ClientException:
pass
except Exception as e:
log.FatalError("Connection failed: %s %s"
% (e.__class__.__name__, str(e)),
log.ErrorCode.connection_failed)
if container_metadata is None:
log.Info("Creating container %s" % self.container)
try:
self.conn.put_container(self.container)
except Exception as e:
log.FatalError("Container creation failed: %s %s"
% (e.__class__.__name__, str(e)),
log.ErrorCode.connection_failed)
def _error_code(self, operation, e):
if isinstance(e, self.resp_exc):
if e.http_status == 404:
return log.ErrorCode.backend_not_found
def _put(self, source_path, remote_filename):
self.conn.put_object(self.container, remote_filename,
file(source_path.name))
def _get(self, remote_filename, local_path):
headers, body = self.conn.get_object(self.container, remote_filename)
with open(local_path.name, 'wb') as f:
for chunk in body:
f.write(chunk)
def _list(self):
headers, objs = self.conn.get_container(self.container)
return [ o['name'] for o in objs ]
def _delete(self, filename):
self.conn.delete_object(self.container, filename)
def _query(self, filename):
sobject = self.conn.head_object(self.container, filename)
return {'size': int(sobject['content-length'])}
duplicity.backend.register_backend("swift", SwiftBackend)
| gpl-2.0 | -4,472,042,862,085,545,000 | 37.46281 | 83 | 0.603997 | false |
google/ci_edit | tools/checkSpelling.py | 1 | 3269 | #!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import io
import os
import pprint
import re
import sys
from fnmatch import fnmatch
ciEditDir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ciEditDir)
import app.regex
import app.spelling
print("checking spelling")
doValues = False
root = (len(sys.argv) > 1 and sys.argv[1]) or "."
filePattern = (len(sys.argv) > 2 and sys.argv[2]) or "*.*"
kReWords = re.compile(r"""(\w+)""")
# The first group is a hack to allow upper case pluralized, e.g. URLs.
kReSubwords = re.compile(
r"((?:[A-Z]{2,}s\b)|(?:[A-Z][a-z]+)|(?:[A-Z]+(?![a-z]))|(?:[a-z]+))"
)
kReIgnoreDirs = re.compile(r"""/\.git/""")
kReIgnoreFiles = re.compile(
r"""\.(pyc|pyo|png|a|jpg|tif|mp3|mp4|cpuperf|dylib|avi|so|plist|raw|webm)$"""
)
kReIncludeFiles = re.compile(r"""\.(cc)$""")
assert kReIgnoreDirs.search("/apple/.git/orange")
assert kReIgnoreFiles.search("/apple.pyc")
dictionaryList = glob.glob(os.path.join(ciEditDir, "app/dictionary.*.words"))
dictionaryList = [os.path.basename(i)[11:-6] for i in dictionaryList]
print(pprint.pprint(dictionaryList))
pathPrefs = []
dictionary = app.spelling.Dictionary(dictionaryList, pathPrefs)
assert dictionary.is_correct(u"has", "cpp")
def handle_file(fileName, unrecognizedWords):
# print(fileName, end="")
try:
with io.open(fileName, "r") as f:
data = f.read()
if not data:
return
for sre in kReSubwords.finditer(data):
# print(repr(sre.groups()))
word = sre.groups()[0].lower()
if not dictionary.is_correct(word, "cpp"):
if word not in unrecognizedWords:
print(word, end=",")
unrecognizedWords.add(word)
except UnicodeDecodeError:
print("Error decoding:", fileName)
def walk_tree(root):
unrecognizedWords = set()
for (dirPath, dirNames, fileNames) in os.walk(root):
if kReIgnoreDirs.search(dirPath):
continue
for fileName in filter(lambda x: fnmatch(x, filePattern), fileNames):
if kReIgnoreFiles.search(fileName):
continue
if kReIncludeFiles.search(fileName):
handle_file(os.path.join(dirPath, fileName), unrecognizedWords)
if unrecognizedWords:
print("found", fileName)
print(unrecognizedWords)
print()
return unrecognizedWords
if os.path.isfile(root):
print(handle_file(root))
elif os.path.isdir(root):
words = sorted(walk_tree(root))
for i in words:
print(i)
else:
print("root is not a file or directory")
print("---- end ----")
| apache-2.0 | 4,742,552,453,476,556,000 | 30.432692 | 81 | 0.649128 | false |
nathanielksmith/prosaicweb | prosaicweb/__init__.py | 1 | 2338 | # prosaicweb
# Copyright (C) 2016 nathaniel smith
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from . import views
from .models import Base, engine
from .views.auth import login, logout, register
from .app import app, bcrypt
routes = [
# TODO
# because html is dumb and forms can only use post/get, that's all we take
# here. However, within each view function, we check for a _method on a
# POST and treat that as the method. This should really be handled by a
# middleware.
('/', 'index', views.index, {}),
('/generate', 'generate', views.generate, {'methods': ['GET', 'POST']}),
('/corpora', 'corpora', views.corpora, {'methods': ['GET', 'POST',]}),
('/sources', 'sources', views.sources, {'methods': ['GET', 'POST',]}),
('/sources/<source_id>', 'source', views.source,
{'methods': ['GET', 'POST']}),
('/corpora/<corpus_id>', 'corpus', views.corpus,
{'methods': ['GET', 'POST']}),
('/phrases', 'phrases', views.phrases, {'methods': ['POST']}),
('/templates', 'templates', views.templates, {'methods': ['GET', 'POST']}),
('/templates/<template_id>', 'template', views.template,
{'methods': ['GET', 'POST']}),
('/auth/login', 'login', login, {'methods': ['POST']}),
('/auth/register', 'register', register, {'methods':['GET', 'POST']}),
('/auth/logout', 'logout', logout, {}),
]
for [route, name, fn, opts] in routes:
app.add_url_rule(route, name, fn, **opts)
def main() -> None:
if len(sys.argv) > 1 and sys.argv[1] == 'dbinit':
print('initializing prosaic and prosaicweb database state...')
Base.metadata.create_all(bind=engine)
exit(0)
app.run()
if __name__ == '__main__':
main()
| agpl-3.0 | 1,900,292,323,592,605,700 | 39.310345 | 79 | 0.640719 | false |
liqin75/vse-vpnaas-plugin | quantum/plugins/vmware/vshield/plugin.py | 1 | 14836 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.db import api as qdbapi
from quantum.db import model_base
from quantum.db.loadbalancer import loadbalancer_db
from quantum.db import firewall_db as fw_db
from quantum.extensions import loadbalancer
from quantum.openstack.common import log as logging
from quantum.plugins.common import constants
from vseapi import VseAPI
from lbapi import LoadBalancerAPI
from fwapi import FirewallAPI
LOG = logging.getLogger(__name__)
edgeUri = 'https://fank-dev2.eng.vmware.com'
edgeId = 'edge-27'
edgeUser = 'admin'
edgePasswd = 'default'
class VShieldEdgeLBPlugin(loadbalancer_db.LoadBalancerPluginDb):
"""
Implementation of the Quantum Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas"]
def __init__(self):
"""
Do the initialization for the loadbalancer service plugin here.
"""
# Hard coded for now
vseapi = VseAPI(edgeUri, edgeUser, edgePasswd, edgeId)
self.vselb = LoadBalancerAPI(vseapi)
qdbapi.register_models(base=model_base.BASEV2)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Quantum LoadBalancer Service Plugin"
def create_vip(self, context, vip):
with context.session.begin(subtransactions=True):
v = super(VShieldEdgeLBPlugin, self).create_vip(context, vip)
self.update_status(context, loadbalancer_db.Vip, v['id'],
constants.PENDING_CREATE)
LOG.debug(_("Create vip: %s") % v['id'])
self.vselb.create_vip(context, v)
self.update_status(context, loadbalancer_db.Vip, v['id'],
constants.ACTIVE)
# If we adopt asynchronous mode, this method should return immediately
# and let client to query the object status. The plugin will listen on
# the event from device and update the object status by calling
# self.update_state(context, Vip, id, ACTIVE/ERROR)
#
# In synchronous mode, send the request to device here and wait for
# response. Eventually update the object status prior to the return.
v_query = self.get_vip(context, v['id'])
return v_query
def update_vip(self, context, id, vip):
with context.session.begin(subtransactions=True):
v_query = self.get_vip(
context, id, fields=["status"])
if v_query['status'] in [
constants.PENDING_DELETE, constants.ERROR]:
raise loadbalancer.StateInvalid(id=id,
state=v_query['status'])
v = super(VShieldEdgeLBPlugin, self).update_vip(context, id, vip)
self.update_status(context, loadbalancer_db.Vip, id,
constants.PENDING_UPDATE)
LOG.debug(_("Update vip: %s"), id)
self.vselb.update_vip(context, v)
self.update_status(context, loadbalancer_db.Vip, id,
constants.ACTIVE)
v_rt = self.get_vip(context, id)
return v_rt
def delete_vip(self, context, id):
with context.session.begin(subtransactions=True):
vip = self.get_vip(context, id)
uuid2vseid = self.vselb.get_vip_vseid(context, vip['id'])
self.update_status(context, loadbalancer_db.Vip, id,
constants.PENDING_DELETE)
LOG.debug(_("Delete vip: %s"), id)
super(VShieldEdgeLBPlugin, self).delete_vip(context, id)
vip['vseid'] = uuid2vseid
self.vselb.delete_vip(context, vip)
def get_vip(self, context, id, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_vip(context, id, fields)
LOG.debug(_("Get vip: %s"), id)
return res
def get_vips(self, context, filters=None, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_vips(
context, filters, fields)
LOG.debug(_("Get vips"))
return res
def create_pool(self, context, pool):
with context.session.begin(subtransactions=True):
p = super(VShieldEdgeLBPlugin, self).create_pool(context, pool)
self.update_status(context, loadbalancer_db.Pool, p['id'],
constants.PENDING_CREATE)
LOG.debug(_("Create pool: %s"), p['id'])
self.vselb.create_pool(context, p)
# pool may not be created if no member is specified, however we
# still update the status to ACTIVE in case the client is waiting
# for the pool to be created before pusing create member request
self.update_status(context, loadbalancer_db.Pool, p['id'],
constants.ACTIVE)
p_rt = self.get_pool(context, p['id'])
return p_rt
def update_pool(self, context, id, pool):
with context.session.begin(subtransactions=True):
p_query = self.get_pool(context, id, fields=["status"])
if p_query['status'] in [
constants.PENDING_DELETE, constants.ERROR]:
raise loadbalancer.StateInvalid(id=id,
state=p_query['status'])
p = super(VShieldEdgeLBPlugin, self).update_pool(context, id, pool)
LOG.debug(_("Update pool: %s"), p['id'])
self.vselb.update_pool(context, pool)
p_rt = self.get_pool(context, id)
return p_rt
def delete_pool(self, context, id):
with context.session.begin(subtransactions=True):
pool = self.get_pool(context, id)
self.update_status(context, loadbalancer_db.Pool, id,
constants.PENDING_DELETE)
self.vselb.delete_pool(context, pool)
super(VShieldEdgeLBPlugin, self).delete_pool(context, id)
LOG.debug(_("Delete pool: %s"), id)
def get_pool(self, context, id, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_pool(context, id, fields)
LOG.debug(_("Get pool: %s"), id)
return res
def get_pools(self, context, filters=None, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_pools(
context, filters, fields)
LOG.debug(_("Get Pools"))
return res
def stats(self, context, pool_id):
res = super(VShieldEdgeLBPlugin, self).get_stats(context, pool_id)
LOG.debug(_("Get stats of Pool: %s"), pool_id)
return res
def create_pool_health_monitor(self, context, health_monitor, pool_id):
m = super(VShieldEdgeLBPlugin, self).create_pool_health_monitor(
context, health_monitor, pool_id)
LOG.debug(_("Create health_monitor of pool: %s"), pool_id)
return m
def get_pool_health_monitor(self, context, id, pool_id, fields=None):
m = super(VShieldEdgeLBPlugin, self).get_pool_health_monitor(
context, id, pool_id, fields)
LOG.debug(_("Get health_monitor of pool: %s"), pool_id)
return m
def delete_pool_health_monitor(self, context, id, pool_id):
super(VShieldEdgeLBPlugin, self).delete_pool_health_monitor(
context, id, pool_id)
LOG.debug(_("Delete health_monitor %(id)s of pool: %(pool_id)s"),
{"id": id, "pool_id": pool_id})
def get_member(self, context, id, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_member(
context, id, fields)
LOG.debug(_("Get member: %s"), id)
return res
def get_members(self, context, filters=None, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_members(
context, filters, fields)
LOG.debug(_("Get members"))
return res
def create_member(self, context, member):
with context.session.begin(subtransactions=True):
m = super(VShieldEdgeLBPlugin, self).create_member(context, member)
self.update_status(context, loadbalancer_db.Member, m['id'],
constants.PENDING_CREATE)
LOG.debug(_("Create member: %s"), m['id'])
self.vselb.create_member(context, m)
self.update_status(context, loadbalancer_db.Member, m['id'],
constants.ACTIVE)
m_rt = self.get_member(context, m['id'])
return m_rt
def update_member(self, context, id, member):
with context.session.begin(subtransactions=True):
m_query = self.get_member(context, id, fields=["status"])
if m_query['status'] in [
constants.PENDING_DELETE, constants.ERROR]:
raise loadbalancer.StateInvalid(id=id,
state=m_query['status'])
m = super(VShieldEdgeLBPlugin, self).update_member(
context, id, member)
self.update_status(context, loadbalancer_db.Member, id,
constants.PENDING_UPDATE)
LOG.debug(_("Update member: %s"), m['id'])
self.vselb.update_member(context, m)
self.update_status(context, loadbalancer_db.Member, id,
constants.ACTIVE)
m_rt = self.get_member(context, id)
return m_rt
def delete_member(self, context, id):
with context.session.begin(subtransactions=True):
m = self.get_member(context, id)
self.update_status(context, loadbalancer_db.Member, id,
constants.PENDING_DELETE)
LOG.debug(_("Delete member: %s"), id)
super(VShieldEdgeLBPlugin, self).delete_member(context, id)
self.vselb.delete_member(context, m)
def get_health_monitor(self, context, id, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_health_monitor(
context, id, fields)
LOG.debug(_("Get health_monitor: %s"), id)
return res
def get_health_monitors(self, context, filters=None, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_health_monitors(
context, filters, fields)
LOG.debug(_("Get health_monitors"))
return res
def create_health_monitor(self, context, health_monitor):
h = super(VShieldEdgeLBPlugin, self).create_health_monitor(
context, health_monitor)
self.update_status(context, loadbalancer_db.HealthMonitor, h['id'],
constants.PENDING_CREATE)
LOG.debug(_("Create health_monitor: %s"), h['id'])
# TODO: notify lbagent
h_rt = self.get_health_monitor(context, h['id'])
return h_rt
def update_health_monitor(self, context, id, health_monitor):
h_query = self.get_health_monitor(context, id, fields=["status"])
if h_query['status'] in [
constants.PENDING_DELETE, constants.ERROR]:
raise loadbalancer.StateInvalid(id=id,
state=h_query['status'])
h = super(VShieldEdgeLBPlugin, self).update_health_monitor(
context, id, health_monitor)
self.update_status(context, loadbalancer_db.HealthMonitor, id,
constants.PENDING_UPDATE)
LOG.debug(_("Update health_monitor: %s"), h['id'])
# TODO notify lbagent
h_rt = self.get_health_monitor(context, id)
return h_rt
def delete_health_monitor(self, context, id):
self.update_status(context, loadbalancer_db.HealthMonitor, id,
constants.PENDING_DELETE)
LOG.debug(_("Delete health_monitor: %s"), id)
super(VShieldEdgeLBPlugin, self).delete_health_monitor(context, id)
class VShieldEdgeFWPlugin(fw_db.FirewallPluginDb):
supported_extension_aliases = ["fwaas"]
def __init__(self):
"""
Do the initialization for the firewall service plugin here.
"""
# Hard coded for now
vseapi = VseAPI(edgeUri, edgeUser, edgePasswd, edgeId)
self.vsefw = FirewallAPI(vseapi)
qdbapi.register_models(base=model_base.BASEV2)
def get_plugin_type(self):
return constants.FIREWALL
def get_plugin_description(self):
return "Quantum Firewall Service Plugin"
def create_rule(self, context, rule):
with context.session.begin(subtransactions=True):
print rule
rule = super(VShieldEdgeFWPlugin, self).create_rule(context, rule)
print rule
self.vsefw.create_rule(context, rule)
return rule
def delete_rule(self, context, id):
with context.session.begin(subtransactions=True):
rule = self.get_rule(context, id)
self.vsefw.delete_rule(context, rule)
super(VShieldEdgeFWPlugin, self).delete_rule(context, id)
def create_ipobj(self, context, ipobj):
with context.session.begin(subtransactions=True):
ipobj = super(VShieldEdgeFWPlugin, self).create_ipobj(context, ipobj)
self.vsefw.create_ipset(context, ipobj)
return ipobj
def delete_ipobj(self, context, id):
with context.session.begin(subtransactions=True):
ipobj = self.get_ipobj(context, id)
self.vsefw.delete_ipset(context, ipobj)
super(VShieldEdgeFWPlugin, self).delete_ipobj(context, id)
def create_serviceobj(self, context, serviceobj):
with context.session.begin(subtransactions=True):
svcobj = super(VShieldEdgeFWPlugin, self).create_serviceobj(context, serviceobj)
self.vsefw.create_application(context, svcobj)
return svcobj
def delete_serviceobj(self, context, id):
with context.session.begin(subtransactions=True):
svcobj = self.get_serviceobj(context, id)
self.vsefw.delete_application(context, svcobj)
super(VShieldEdgeFWPlugin, self).delete_serviceobj(context, id)
| apache-2.0 | -8,121,185,046,052,741,000 | 41.267806 | 92 | 0.611957 | false |
pohzhiee/ghetto_omr | edcamcam_testfile_imgproc7.py | 1 | 5239 | import numpy as np
import cv2
from edcamcam_testfile_shape import shapedetector
import matplotlib.pyplot as plt
#Part 1: Image Loading
#-------------------------------------------------------------------
#load image
img = cv2.imread('img_data/omstest2.jpg',cv2.IMREAD_GRAYSCALE)
img2= cv2.imread('img_data/omstest2.jpg')
#bilateral filter, sharpen, thresh
biblur=cv2.bilateralFilter(img,20,175,175)
sharp=cv2.addWeighted(img,1.55,biblur,-0.5,0)
ret1,thresh1 = cv2.threshold(sharp,127,255,cv2.THRESH_OTSU)
#negative image
inv=cv2.bitwise_not(thresh1)
#closed image
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
closed = cv2.morphologyEx(inv, cv2.MORPH_CLOSE, kernel)
#Part 2: Finding Valid Contours
#-------------------------------------------------------------------
#find countours
im2, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
#prepare empty canvas
height, width = img.shape[:2]
emptycanvas=np.zeros((height,width),dtype=np.uint8)
#truncate contours with A<10
kcounter = 0
for c in contours:
A = cv2.contourArea(c)
if A<100:
contours=np.delete(contours,kcounter,0)
kcounter=kcounter-1
kcounter=kcounter+1
#find length of contour array
clen=len(contours)
#create match_array [dimension = len x len] with 0s
match_array=np.zeros((clen,clen),np.uint8)
#loop over the contours and compare two by two
icounter = 0
for i in contours:
jcounter = 0
for j in contours:
#If difference has index <0.01 then regard as TRUE
ret=cv2.matchShapes(i,j,1,0.0)
if ret<0.01:
match_array[icounter,jcounter]=1
else:
match_array[icounter,jcounter]=0
jcounter=jcounter+1
icounter=icounter+1
#sum each row of the array (for TRUEs and FALSEs]
sum_array=np.sum(match_array,axis=1,dtype=np.int32)
#finding mean of the comparison value
sum_array2=np.sum(sum_array,axis=0,dtype=np.int32)
sum_array_len=len(sum_array)
ave_sim_val=sum_array2/sum_array_len
#Assumption: there is a lot of 1s
#counters
#creation of new array to store centre point
#variables
counter_a=0
counter_s=0
counter_m=0
valid_counter =0
centpt_array = np.array([[0,0,0]])
hor_dist_acc=0
ver_dist_acc=0
#Area array
area_arr=np.array([])
#find valid mean area and SD
for k in sum_array:
if k>ave_sim_val:
A = cv2.contourArea(contours[counter_s])
area_arr=np.append(area_arr,[A],0)
counter_a=counter_a+1
counter_s=counter_s +1
sum_area_array=np.array([])
sum_area_array=np.sum(area_arr,axis=0,dtype=np.uint32)
mean_valid_A=sum_area_array/counter_a
sum_dif=0
for a in area_arr:
dif = (mean_valid_A - a)**2
sum_dif=sum_dif+dif
SD_valid=(sum_dif/counter_a)**0.5
print area_arr
#find midpoints of contours that fulfils 1)high similarity 2)occurence greater than average 3)least deviation from valid mean area
for i in sum_array:
if i>ave_sim_val:
cv2.drawContours(img2, contours, counter_m, (0, 255, 0), 2)
#Determine valid mean area
condition = cv2.contourArea(contours[counter_m])>mean_valid_A-2*SD_valid and cv2.contourArea(contours[counter_m])<mean_valid_A+2*SD_valid
if condition:
# obtain centre point of each contour
M = cv2.moments(contours[counter_m])
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
# store in it arrays
new_centpt_array=np.array([[cX,cY,counter_m]])
centpt_array=np.concatenate((centpt_array,new_centpt_array),axis=0)
#determine horizontal point and vertical point
c=contours[counter_m]
Xt_right=np.asarray(tuple(c[c[:,:,0].argmax()][0]))
Xt_bot=np.asarray(tuple(c[c[:,:,1].argmax()][0]))
hor_dist=Xt_right[0]-cX
ver_dist=Xt_bot[1]-cY
hor_dist_acc=hor_dist_acc+hor_dist
ver_dist_acc=ver_dist_acc+ver_dist
valid_counter = valid_counter +1
counter_m = counter_m+1
mean_hor_dist=hor_dist_acc/valid_counter
mean_ver_dist=ver_dist_acc/valid_counter
#delete 1st row
centpt_array=np.delete(centpt_array,0,0)
#checkpoint for adding array
centpt_array=np.append(centpt_array,[[48,185,1000]],0)
centpt_array=np.append(centpt_array,[[40,290,1001]],0)
centpt_array=np.append(centpt_array,[[500,500,1002]],0)
centpt_array=np.append(centpt_array,[[300,300,1003]],0)
centpt_array=np.append(centpt_array,[[0,0,1004]],0)
#Removing Duplicates
g=0
arr_len=len(centpt_array)
while arr_len>g:
target_arr1 = centpt_array[g]
h=1+g
while arr_len>h and h>g:
target_arr2 = centpt_array[h]
if abs(target_arr1[0]-target_arr2[0])<mean_hor_dist:
if abs(target_arr1[1]-target_arr2[1])<mean_ver_dist:
centpt_array=np.delete(centpt_array,h,0)
h=h-1
arr_len=arr_len-1
h = h + 1
g=g+1
#checkpoint
#print centpt_array
print '-----------'
#print valid_counter
#print len(centpt_array)
#print mean_hor_dist
#print mean_ver_dist
#initialise plot
plt.subplot(111),plt.imshow(img2)
plt.title('dilate1 Image'), plt.xticks([]), plt.yticks([])
for k in centpt_array:
plt.plot(k[0],k[1],'ro')
plt.show()
cv2.waitKey(0)
| gpl-3.0 | 6,262,305,167,322,656,000 | 24.807882 | 145 | 0.649361 | false |
zorna/zorna | zorna/calendars/forms.py | 1 | 2212 | from django import forms
from django.utils.translation import ugettext_lazy as _
from schedule.models import Event, Occurrence
import datetime
import time
from zorna.calendars.models import ZornaCalendar, EventDetails, ZornaResourceCalendar
FREQUENCIES_CHOICES = (
("", _("-----")),
("WEEKLY", _("Weekly")),
("DAILY", _("Daily")),
("MONTHLY", _("Monthly")),
("YEARLY", _("Yearly")))
DAYS_CHOICES = (
(6, _("Sun")),
(0, _("Mon")),
(1, _("Tue")),
(2, _("Wed")),
(3, _("Thu")),
(4, _("Fri")),
(5, _("Sat")),
)
class EditEventForm(forms.ModelForm):
interval_choices = [(i, i) for i in range(1, 31)]
title = forms.CharField()
description = forms.Textarea()
start = forms.DateTimeField(label=_(
"Start date"), widget=forms.SplitDateTimeWidget)
end = forms.DateTimeField(label=_("End date"), widget=forms.SplitDateTimeWidget, help_text=_(
"The end time must be later than start time."))
end_recurring_period = forms.DateField(label=_("Until date"), help_text=_(
"This date is ignored for one time only events."), required=False)
rule = forms.ChoiceField(label=_("Rule"), choices=FREQUENCIES_CHOICES, help_text=_(
"Select '----' for a one time only event."), required=False)
weekdays = forms.MultipleChoiceField(label=_(
"Repeat on"), choices=DAYS_CHOICES, widget=forms.CheckboxSelectMultiple, required=False)
interval = forms.ChoiceField(label=_(
"Repeat every"), choices=interval_choices, required=False)
class Meta:
model = Event
exclude = ('creator', 'created_on', 'calendar', 'rule')
def clean(self):
start = self.cleaned_data.get("start")
end = self.cleaned_data.get("end")
if start >= end:
raise forms.ValidationError(_(
u'The end time must be later than start time.'))
return self.cleaned_data
class EditEventDetailsForm(forms.ModelForm):
class Meta:
model = EventDetails
class ResourceCalendarForm(forms.ModelForm):
class Meta:
model = ZornaResourceCalendar
class ZornaCalendarSettingsForm(forms.ModelForm):
class Meta:
model = ZornaCalendar
| bsd-3-clause | 1,895,415,933,786,963,200 | 29.30137 | 97 | 0.631555 | false |
gregcorbett/SciBot | src/Board.py | 1 | 4163 | """This file defines the Board class."""
import pygame
from src.Component import Component
from src.Point import Point
class Board(Component):
"""This class defines the board (a.k.a. map)."""
def __init__(self, scenario):
"""Create the board."""
self.step = scenario.get_board_step()
# Call superclass constructor
super().__init__(scenario.get_background(),
Point(0, 0), # This is the window topleft corner
self.step)
# Work out (and check) screen size, also store for
# checking the BeeBot has not fallen of the edge
self.logical_board_height = scenario.get_logical_height()
self.logical_board_width = scenario.get_logical_width()
# Board dimensions in terms of pixels
self.board_height = self.logical_board_height * self.step
self.board_width = self.logical_board_width * self.step
self.border_colour = scenario.get_border_colour()
self.obstacle_group = scenario.get_obstacle_group()
self.goal_group = scenario.get_goal_group()
# Need to check the Board pixel height matches the image pixel height
if self.board_height != self.sprite.get_height():
raise ValueError(("Error 1: board height does "
"not match image height.\n"
"Board Height = %s\n"
"Image Height = %s"
% (self.board_height,
self.sprite.get_height())))
# Need to check the Board pixel width matches the image pixel width
if self.board_width != self.sprite.get_width():
raise ValueError(("Error 2: board width does "
"not match image width.\n"
"Board Width = %s\n"
"Image Width = %s"
% (self.board_width,
self.sprite.get_width())))
# Need to check the pixel height is a multiple of step
if self.board_height % self.step != 0:
raise ValueError(("Error 3: height mod step != 0.\n"
"Height = %s\n"
"Step = %s" % (self.board_height,
self.step)))
# Need to check the pixel height is a multiple of step
if self.board_width % self.step != 0:
raise ValueError(("Error 4: width mod step != 0.\n"
"Width = %s\n"
"Step = %s" % (self.board_width,
self.step)))
def display(self, screen):
"""Display the board on screen."""
# Call the superclass display method
super().display(screen)
self.obstacle_group.display(screen)
self.goal_group.display(screen)
# Draw lines over Board background image
if self.border_colour is not None:
for iter_width in range(0, self.board_width + 1, self.step):
line_start = Point(iter_width, 0)
line_end = Point(iter_width, self.board_height)
# Draw a line from line_start to line_end.
pygame.draw.line(screen, self.border_colour,
line_start, line_end, 5)
for iter_height in range(0, self.board_height + 1, self.step):
line_start = Point(0, iter_height)
line_end = Point(self.board_width, iter_height)
# Draw a line from line_start to line_end.
pygame.draw.line(screen, self.border_colour,
line_start, line_end, 5)
def is_equal_to(self, other_component):
"""Compare this Board for equality with other_component."""
if not isinstance(other_component, Board):
# An Board can obviously never be equal to a non Board
return False
# Comparing a Board to another Board has not yet been implemented
raise NotImplementedError()
| gpl-2.0 | 5,462,970,932,799,122,000 | 41.050505 | 77 | 0.528705 | false |
fretsonfire/fof-python | src/Credits.py | 1 | 9490 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
import math
from View import Layer
from Input import KeyListener
from Language import _
import MainMenu
import Song
import Version
import Player
class Element:
"""A basic element in the credits scroller."""
def getHeight(self):
"""@return: The height of this element in fractions of the screen height"""
return 0
def render(self, offset):
"""
Render this element.
@param offset: Offset in the Y direction in fractions of the screen height
"""
pass
class Text(Element):
def __init__(self, font, scale, color, alignment, text):
self.text = text
self.font = font
self.color = color
self.alignment = alignment
self.scale = scale
self.size = self.font.getStringSize(self.text, scale = scale)
def getHeight(self):
return self.size[1]
def render(self, offset):
if self.alignment == "left":
x = .1
elif self.alignment == "right":
x = .9 - self.size[0]
elif self.alignment == "center":
x = .5 - self.size[0] / 2
glColor4f(*self.color)
self.font.render(self.text, (x, offset), scale = self.scale)
class Picture(Element):
def __init__(self, engine, fileName, height):
self.height = height
self.engine = engine
engine.loadSvgDrawing(self, "drawing", fileName)
def getHeight(self):
return self.height
def render(self, offset):
self.drawing.transform.reset()
w, h = self.engine.view.geometry[2:4]
self.drawing.transform.translate(.5 * w, h - (.5 * self.height + offset) * h * float(w) / float(h))
self.drawing.transform.scale(1, -1)
self.drawing.draw()
class Credits(Layer, KeyListener):
"""Credits scroller."""
def __init__(self, engine, songName = None):
self.engine = engine
self.time = 0.0
self.offset = 1.0
self.songLoader = self.engine.resource.load(self, "song", lambda: Song.loadSong(self.engine, "defy", playbackOnly = True),
onLoad = self.songLoaded)
self.engine.loadSvgDrawing(self, "background1", "editor.svg")
self.engine.loadSvgDrawing(self, "background2", "keyboard.svg")
self.engine.loadSvgDrawing(self, "background3", "cassette.svg")
self.engine.boostBackgroundThreads(True)
nf = self.engine.data.font
bf = self.engine.data.bigFont
ns = 0.002
bs = 0.001
hs = 0.003
c1 = (1, 1, .5, 1)
c2 = (1, .75, 0, 1)
space = Text(nf, hs, c1, "center", " ")
self.credits = [
Text(nf, ns, c2, "center", _("Unreal Voodoo")),
Text(nf, ns, c1, "center", _("presents")),
Text(nf, bs, c2, "center", " "),
Picture(self.engine, "logo.svg", .25),
Text(nf, bs, c2, "center", " "),
Text(nf, bs, c2, "center", _("Version %s") % Version.version()),
space,
Text(nf, ns, c1, "left", _("Game Design,")),
Text(nf, ns, c1, "left", _("Programming:")),
Text(nf, ns, c2, "right", "Sami Kyostila"),
space,
Text(nf, ns, c1, "left", _("Music,")),
Text(nf, ns, c1, "left", _("Sound Effects:")),
Text(nf, ns, c2, "right", "Tommi Inkila"),
space,
Text(nf, ns, c1, "left", _("Graphics:")),
Text(nf, ns, c2, "right", "Joonas Kerttula"),
space,
Text(nf, ns, c1, "left", _("Introducing:")),
Text(nf, ns, c2, "right", "Mikko Korkiakoski"),
Text(nf, ns, c2, "right", _("as Jurgen, Your New God")),
space,
Text(nf, ns, c2, "right", "Marjo Hakkinen"),
Text(nf, ns, c2, "right", _("as Groupie")),
space,
Text(nf, ns, c1, "left", _("Song Credits:")),
Text(nf, ns, c2, "right", _("Bang Bang, Mystery Man")),
Text(nf, bs, c2, "right", _("music by Mary Jo and Tommi Inkila")),
Text(nf, bs, c2, "right", _("lyrics by Mary Jo")),
space,
Text(nf, ns, c2, "right", _("Defy The Machine")),
Text(nf, bs, c2, "right", _("music by Tommi Inkila")),
space,
Text(nf, ns, c2, "right", _("This Week I've Been")),
Text(nf, ns, c2, "right", _("Mostly Playing Guitar")),
Text(nf, bs, c2, "right", _("composed and performed by Tommi Inkila")),
space,
Text(nf, ns, c1, "left", _("Testing:")),
Text(nf, ns, c2, "right", "Mikko Korkiakoski"),
Text(nf, ns, c2, "right", "Tomi Kyostila"),
Text(nf, ns, c2, "right", "Jani Vaarala"),
Text(nf, ns, c2, "right", "Juho Jamsa"),
Text(nf, ns, c2, "right", "Olli Jakola"),
space,
Text(nf, ns, c1, "left", _("Mac OS X port:")),
Text(nf, ns, c2, "right", "Tero Pihlajakoski"),
space,
Text(nf, ns, c1, "left", _("Special thanks to:")),
Text(nf, ns, c2, "right", "Tutorial inspired by adam02"),
space,
Text(nf, ns, c1, "left", _("Made with:")),
Text(nf, ns, c2, "right", "Python"),
Text(nf, bs, c2, "right", "http://www.python.org"),
space,
Text(nf, ns, c2, "right", "PyGame"),
Text(nf, bs, c2, "right", "http://www.pygame.org"),
space,
Text(nf, ns, c2, "right", "PyOpenGL"),
Text(nf, bs, c2, "right", "http://pyopengl.sourceforge.net"),
space,
Text(nf, ns, c2, "right", "Amanith Framework"),
Text(nf, bs, c2, "right", "http://www.amanith.org"),
space,
Text(nf, ns, c2, "right", "Illusoft Collada module 1.4"),
Text(nf, bs, c2, "right", "http://colladablender.illusoft.com"),
space,
Text(nf, ns, c2, "right", "Psyco specializing compiler"),
Text(nf, bs, c2, "right", "http://psyco.sourceforge.net"),
space,
Text(nf, ns, c2, "right", "MXM Python Midi Package 0.1.4"),
Text(nf, bs, c2, "right", "http://www.mxm.dk/products/public/pythonmidi"),
space,
space,
Text(nf, bs, c1, "center", _("Source Code available under the GNU General Public License")),
Text(nf, bs, c2, "center", "http://www.unrealvoodoo.org"),
space,
space,
space,
space,
Text(nf, bs, c1, "center", _("Copyright 2006-2008 by Unreal Voodoo")),
]
def songLoaded(self, song):
self.engine.boostBackgroundThreads(False)
song.play()
def shown(self):
self.engine.input.addKeyListener(self)
def hidden(self):
if self.song:
self.song.fadeout(1000)
self.engine.input.removeKeyListener(self)
self.engine.view.pushLayer(MainMenu.MainMenu(self.engine))
def quit(self):
self.engine.view.popLayer(self)
def keyPressed(self, key, unicode):
if self.engine.input.controls.getMapping(key) in [Player.CANCEL, Player.KEY1, Player.KEY2] or key == pygame.K_RETURN:
self.songLoader.cancel()
self.quit()
return True
def run(self, ticks):
self.time += ticks / 50.0
if self.song:
self.offset -= ticks / 5000.0
if self.offset < -6.1:
self.quit()
def render(self, visibility, topMost):
v = 1.0 - ((1 - visibility) ** 2)
# render the background
t = self.time / 100 + 34
w, h, = self.engine.view.geometry[2:4]
r = .5
for i, background in [(0, self.background1), (1, self.background2), (2, self.background3)]:
background.transform.reset()
background.transform.translate((1 - v) * 2 * w + w / 2 + math.cos(t / 2) * w / 2 * r, h / 2 + math.sin(t) * h / 2 * r)
background.transform.translate(0, -h * (((self.offset + i * 2) % 6.0) - 3.0))
background.transform.rotate(math.sin(t * 4 + i) / 2)
background.transform.scale(math.sin(t / 8) + 3, math.sin(t / 8) + 3)
background.draw()
self.engine.view.setOrthogonalProjection(normalize = True)
font = self.engine.data.font
# render the scroller elements
y = self.offset
glTranslatef(-(1 - v), 0, 0)
try:
for element in self.credits:
h = element.getHeight()
if y + h > 0.0 and y < 1.0:
element.render(y)
y += h
if y > 1.0:
break
finally:
self.engine.view.resetProjection()
| mit | -1,735,713,498,330,326,300 | 36.509881 | 127 | 0.546891 | false |
cggh/DQXServer | _CreateFilterBankData.py | 1 | 1099 | # This file is part of DQXServer - (C) Copyright 2014, Paul Vauterin, Ben Jeffery, Alistair Miles <[email protected]>
# This program is free software licensed under the GNU Affero General Public License.
# You can find a copy of this license in LICENSE in the top directory of the source code or at <http://opensource.org/licenses/AGPL-3.0>
import math
import random
import SummCreate
import sys
basedir='.'
#============= FAKE STUFF FOR DEBUGGING; REMOVE FOR PRODUCTION ==============
#basedir='C:\Data\Test\Genome'
#sys.argv=['','/home/pvaut/Documents/Genome/Tracks-PfPopGen3.1/Coverage2','Summ01']
#============= END OF FAKE STUFF ============================================
if len(sys.argv)<3:
print('Usage: COMMAND DataFolder ConfigFilename')
print(' DataFolder= folder containing the source data, relative to the current path')
print(' ConfigFilename= name of the source configuration file (do not provide the extension ".cnf").')
sys.exit()
dataFolder=sys.argv[1]
summaryFile=sys.argv[2]
creat=SummCreate.Creator(basedir,dataFolder,summaryFile)
creat.Summarise()
| agpl-3.0 | -1,543,107,310,729,181,400 | 32.30303 | 136 | 0.690628 | false |
GammaC0de/pyload | src/pyload/plugins/downloaders/ShareonlineBiz.py | 1 | 5833 | # -*- coding: utf-8 -*-
import base64
import re
import time
from datetime import timedelta
from pyload.core.network.request_factory import get_url
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.simple_downloader import SimpleDownloader
class ShareonlineBiz(SimpleDownloader):
__name__ = "ShareonlineBiz"
__type__ = "downloader"
__version__ = "0.67"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.)?(share-online\.biz|egoshare\.com)/(download\.php\?id=|dl/)(?P<ID>\w+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Shareonline.biz downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("spoob", "[email protected]"),
("mkaay", "[email protected]"),
("zoidberg", "[email protected]"),
("Walter Purcaro", "[email protected]"),
]
URL_REPLACEMENTS = [(__pattern__ + ".*", r"http://www.share-online.biz/dl/\g<ID>")]
CHECK_TRAFFIC = True
ERROR_PATTERN = r'<p class="b">Information:</p>\s*<div>\s*<strong>(.*?)</strong>'
@classmethod
def api_info(cls, url):
info = {}
field = get_url(
"http://api.share-online.biz/linkcheck.php",
get={"md5": "1", "links": re.match(cls.__pattern__, url).group("ID")},
).split(";")
try:
if field[1] == "OK":
info["fileid"] = field[0]
info["status"] = 2
info["name"] = field[2]
info["size"] = field[3] #: In bytes
info["md5"] = field[4].strip().lower().replace("\n\n", "") #: md5
elif field[1] in ("DELETED", "NOTFOUND"):
info["status"] = 1
except IndexError:
pass
return info
def setup(self):
self.resume_download = self.premium
self.multi_dl = False
def handle_captcha(self):
self.captcha = ReCaptcha(self.pyfile)
response, challenge = self.captcha.challenge()
m = re.search(r"var wait=(\d+);", self.data)
self.set_wait(int(m.group(1)) if m else 30)
res = self.load(
"{}/free/captcha/{}".format(self.pyfile.url, int(time.time() * 1000)),
post={
"dl_free": "1",
"recaptcha_challenge_field": challenge,
"recaptcha_response_field": response,
},
)
if res != "0":
self.captcha.correct()
return res
else:
self.retry_captcha()
def handle_free(self, pyfile):
self.wait(3)
self.data = self.load(
"{}/free/".format(pyfile.url), post={"dl_free": "1", "choice": "free"}
)
self.check_errors()
res = self.handle_captcha()
self.link = base64.b64decode(res)
if not self.link.startswith("http://"):
self.error(self._("Invalid url"))
self.wait()
def check_download(self):
check = self.scan_download(
{
"cookie": re.compile(r'<div id="dl_failure"'),
"fail": re.compile(r"<title>Share-Online"),
}
)
if check == "cookie":
self.retry_captcha(5, 60, self._("Cookie failure"))
elif check == "fail":
self.retry_captcha(
5, timedelta(minutes=5).seconds, self._("Download failed")
)
return SimpleDownloader.check_download(self)
#: Should be working better loading (account) api internally
def handle_premium(self, pyfile):
self.api_data = dlinfo = {}
html = self.load(
"https://api.share-online.biz/account.php",
get={
"username": self.account.user,
"password": self.account.get_login("password"),
"act": "download",
"lid": self.info["fileid"],
},
)
self.log_debug(html)
for line in html.splitlines():
try:
key, value = line.split(": ")
dlinfo[key.lower()] = value
except ValueError:
pass
if dlinfo["status"] != "online":
self.offline()
else:
pyfile.name = dlinfo["name"]
pyfile.size = int(dlinfo["size"])
self.link = dlinfo["url"]
if self.link == "server_under_maintenance":
self.temp_offline()
else:
self.multi_dl = True
def check_errors(self):
m = re.search(r"/failure/(.*?)/", self.req.last_effective_url)
if m is None:
self.info.pop("error", None)
return
errmsg = m.group(1).lower()
try:
self.log_error(errmsg, re.search(self.ERROR_PATTERN, self.data).group(1))
except Exception:
self.log_error(self._("Unknown error occurred"), errmsg)
if errmsg == "invalid":
self.fail(self._("File not available"))
elif errmsg in ("freelimit", "size", "proxy"):
self.fail(self._("Premium account needed"))
elif errmsg in ("expired", "server"):
self.retry(wait=600, msg=errmsg)
elif errmsg == "full":
self.fail(self._("Server is full"))
elif "slot" in errmsg:
self.wait(3600, reconnect=True)
self.restart(errmsg)
else:
self.wait(60, reconnect=True)
self.restart(errmsg)
| agpl-3.0 | 814,808,045,400,288,100 | 28.459596 | 109 | 0.513286 | false |
MarsZone/DreamLand | muddery/statements/default_statement_func_set.py | 1 | 1790 | """
Default statement functions.
"""
from muddery.statements.statement_func_set import BaseStatementFuncSet
import muddery.statements.action as action
import muddery.statements.condition as condition
import muddery.statements.attribute as attribute
import muddery.statements.rand as rand
import muddery.statements.skill as skill
class ActionFuncSet(BaseStatementFuncSet):
"""
Statement functions used in actions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(attribute.FuncSetAttr)
self.add(attribute.FuncRemoveAttr)
self.add(action.FuncLearnSkill)
self.add(action.FuncGiveObject)
self.add(action.FuncRemoveObjects)
self.add(action.FuncTeleportTo)
self.add(action.FuncFightMob)
self.add(action.FuncFightTarget)
class ConditionFuncSet(BaseStatementFuncSet):
"""
Statement functions used in conditions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(condition.FuncIsQuestInProgress)
self.add(condition.FuncCanProvideQuest)
self.add(condition.FuncIsQuestCompleted)
self.add(condition.FuncHasObject)
self.add(attribute.FuncGetAttr)
self.add(attribute.FuncHasAttr)
self.add(attribute.FuncCheckAttr)
self.add(rand.FuncOdd)
self.add(rand.FuncRand)
self.add(rand.FuncRandInt)
class SkillFuncSet(BaseStatementFuncSet):
"""
Statement functions used in actions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(skill.FuncEscape)
self.add(skill.FuncHeal)
self.add(skill.FuncHit)
self.add(skill.FuncIncreaseMaxHP)
| bsd-3-clause | -489,832,213,529,719,900 | 26.538462 | 70 | 0.675978 | false |
yossan4343434/TK_15 | src/yamashita/preprocessing/capture_face.py | 1 | 1798 | # -*- coding: utf-8 -*-
import os
import shutil
import cv2
import glob
ROOT = os.path.abspath(os.path.dirname(__file__))
SRCDIR = ROOT.replace("src/yamashita/preprocessing", "data/rugby/goromaru/raw_goromaru/")
TARDIR = ROOT.replace("src/yamashita/preprocessing", "data/rugby/goromaru/pre_goromaru/")
NODIR = ROOT.replace("src/yamashita/preprocessing", "data/rugby/goromaru/no_goromaru/")
cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml")
#cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml")
#cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml")
#cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_mcs_mouth.xml")
#cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_mcs_nose.xml")
def cap_face(paths):
i = 0
for path in paths:
i += 1
img = cv2.imread(path)
face = cascade.detectMultiScale(img, 1.3, 3)
r_name = TARDIR + "goromaru_" + str(i)
if len(face) != 0:
j = 0
for (x, y, w, h) in face:
j += 1
name = r_name + "_" + str(j) + ".jpg"
tmp = img[y:y+h, x:x+w]
tmp = cv2.resize(tmp, (100, 100))
cv2.imwrite(name, tmp)
else:
nogoro = NODIR + path.split("/")[-1]
shutil.copy(path, nogoro)
def getlist():
filelist = glob.glob(SRCDIR+"*")
return filelist
if __name__ == '__main__':
imgpaths = getlist()
cap_face(imgpaths)
| mit | 5,616,299,162,888,946,000 | 27.539683 | 132 | 0.626808 | false |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py | 1 | 73915 | # Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2011 Daniel Bates ([email protected]). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import atexit
import base64
import codecs
import getpass
import os
import os.path
import re
import stat
import sys
import subprocess
import tempfile
import time
import webkitpy.thirdparty.unittest2 as unittest
import urllib
import shutil
from datetime import date
from webkitpy.common.checkout.checkout import Checkout
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.executive_mock import MockExecutive
from .git import Git, AmbiguousCommitError
from .detection import detect_scm_system
from .scm import SCM, CheckoutNeedsUpdate, commit_error_handler, AuthenticationError
from .svn import SVN
# We cache the mock SVN repo so that we don't create it again for each call to an SVNTest or GitTest test_ method.
# We store it in a global variable so that we can delete this cached repo on exit(3).
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
cached_svn_repo_path = None
def remove_dir(path):
# Change directory to / to ensure that we aren't in the directory we want to delete.
os.chdir('/')
shutil.rmtree(path)
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
@atexit.register
def delete_cached_mock_repo_at_exit():
if cached_svn_repo_path:
remove_dir(cached_svn_repo_path)
# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.)
# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from.
def run_command(*args, **kwargs):
# FIXME: This should not be a global static.
# New code should use Executive.run_command directly instead
return Executive().run_command(*args, **kwargs)
# FIXME: This should be unified into one of the executive.py commands!
# Callers could use run_and_throw_if_fail(args, cwd=cwd, quiet=True)
def run_silent(args, cwd=None):
# Note: Not thread safe: http://bugs.python.org/issue2320
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process.communicate() # ignore output
exit_code = process.wait()
if exit_code:
raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd))
def write_into_file_at_path(file_path, contents, encoding="utf-8"):
if encoding:
with codecs.open(file_path, "w", encoding) as file:
file.write(contents)
else:
with open(file_path, "w") as file:
file.write(contents)
def read_from_path(file_path, encoding="utf-8"):
with codecs.open(file_path, "r", encoding) as file:
return file.read()
def _make_diff(command, *args):
# We use this wrapper to disable output decoding. diffs should be treated as
# binary files since they may include text files of multiple differnet encodings.
# FIXME: This should use an Executive.
return run_command([command, "diff"] + list(args), decode_output=False)
def _svn_diff(*args):
return _make_diff("svn", *args)
def _git_diff(*args):
return _make_diff("git", *args)
# Exists to share svn repository creation code between the git and svn tests
class SVNTestRepository(object):
@classmethod
def _svn_add(cls, path):
run_command(["svn", "add", path])
@classmethod
def _svn_commit(cls, message):
run_command(["svn", "commit", "--quiet", "--message", message])
@classmethod
def _setup_test_commits(cls, svn_repo_url):
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Add some test commits
os.chdir(svn_checkout_path)
write_into_file_at_path("test_file", "test1")
cls._svn_add("test_file")
cls._svn_commit("initial commit")
write_into_file_at_path("test_file", "test1test2")
# This used to be the last commit, but doing so broke
# GitTest.test_apply_git_patch which use the inverse diff of the last commit.
# svn-apply fails to remove directories in Git, see:
# https://bugs.webkit.org/show_bug.cgi?id=34871
os.mkdir("test_dir")
# Slash should always be the right path separator since we use cygwin on Windows.
test_file3_path = "test_dir/test_file3"
write_into_file_at_path(test_file3_path, "third file")
cls._svn_add("test_dir")
cls._svn_commit("second commit")
write_into_file_at_path("test_file", "test1test2test3\n")
write_into_file_at_path("test_file2", "second file")
cls._svn_add("test_file2")
cls._svn_commit("third commit")
# This 4th commit is used to make sure that our patch file handling
# code correctly treats patches as binary and does not attempt to
# decode them assuming they're utf-8.
write_into_file_at_path("test_file", u"latin1 test: \u00A0\n", "latin1")
write_into_file_at_path("test_file2", u"utf-8 test: \u00A0\n", "utf-8")
cls._svn_commit("fourth commit")
# svn does not seem to update after commit as I would expect.
run_command(['svn', 'update'])
remove_dir(svn_checkout_path)
# This is a hot function since it's invoked by unittest before calling each test_ method in SVNTest and
# GitTest. We create a mock SVN repo once and then perform an SVN checkout from a filesystem copy of
# it since it's expensive to create the mock repo.
@classmethod
def setup(cls, test_object):
global cached_svn_repo_path
if not cached_svn_repo_path:
cached_svn_repo_path = cls._setup_mock_repo()
test_object.temp_directory = tempfile.mkdtemp(suffix="svn_test")
test_object.svn_repo_path = os.path.join(test_object.temp_directory, "repo")
test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path
test_object.svn_checkout_path = os.path.join(test_object.temp_directory, "checkout")
shutil.copytree(cached_svn_repo_path, test_object.svn_repo_path)
run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url + "/trunk", test_object.svn_checkout_path])
@classmethod
def _setup_mock_repo(cls):
# Create an test SVN repository
svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo")
svn_repo_url = "file://%s" % svn_repo_path # Not sure this will work on windows
# git svn complains if we don't pass --pre-1.5-compatible, not sure why:
# Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
run_command(['svnadmin', 'create', '--pre-1.5-compatible', svn_repo_path])
# Create a test svn checkout
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Create and checkout a trunk dir to match the standard svn configuration to match git-svn's expectations
os.chdir(svn_checkout_path)
os.mkdir('trunk')
cls._svn_add('trunk')
# We can add tags and branches as well if we ever need to test those.
cls._svn_commit('add trunk')
# Change directory out of the svn checkout so we can delete the checkout directory.
remove_dir(svn_checkout_path)
cls._setup_test_commits(svn_repo_url + "/trunk")
return svn_repo_path
@classmethod
def tear_down(cls, test_object):
remove_dir(test_object.temp_directory)
# Now that we've deleted the checkout paths, cwddir may be invalid
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
if os.path.isabs(__file__):
path = os.path.dirname(__file__)
else:
path = sys.path[0]
os.chdir(detect_scm_system(path).checkout_root)
# For testing the SCM baseclass directly.
class SCMClassTests(unittest.TestCase):
def setUp(self):
self.dev_null = open(os.devnull, "w") # Used to make our Popen calls quiet.
def tearDown(self):
self.dev_null.close()
def test_run_command_with_pipe(self):
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n")
# Test the non-pipe case too:
self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n")
command_returns_non_zero = ['/bin/sh', '--invalid-option']
# Test when the input pipe process fails.
input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertNotEqual(input_process.poll(), 0)
self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
# Test when the run_command process fails.
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments.
self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout)
def test_error_handlers(self):
git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
svn_failure_message="""svn: Commit failed (details follow):
svn: File or directory 'ChangeLog' is out of date; try updating
svn: resource out of date; try updating
"""
command_does_not_exist = ['does_not_exist', 'invalid_option']
self.assertRaises(OSError, run_command, command_does_not_exist)
self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error)
command_returns_non_zero = ['/bin/sh', '--invalid-option']
self.assertRaises(ScriptError, run_command, command_returns_non_zero)
# Check if returns error text:
self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah'))
# GitTest and SVNTest inherit from this so any test_ methods here will be run once for this class and then once for each subclass.
class SCMTest(unittest.TestCase):
def _create_patch(self, patch_contents):
# FIXME: This code is brittle if the Attachment API changes.
attachment = Attachment({"bug_id": 12345}, None)
attachment.contents = lambda: patch_contents
joe_cool = Committer("Joe Cool", "[email protected]")
attachment.reviewer = lambda: joe_cool
return attachment
def _setup_webkittools_scripts_symlink(self, local_scm):
webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__)))
webkit_scripts_directory = webkit_scm.scripts_directory()
local_scripts_directory = local_scm.scripts_directory()
os.mkdir(os.path.dirname(local_scripts_directory))
os.symlink(webkit_scripts_directory, local_scripts_directory)
# Tests which both GitTest and SVNTest should run.
# FIXME: There must be a simpler way to add these w/o adding a wrapper method to both subclasses
def _shared_test_changed_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertItemsEqual(self.scm.changed_files(), ["test_file"])
write_into_file_at_path("test_dir/test_file3", "new stuff")
self.assertItemsEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
old_cwd = os.getcwd()
os.chdir("test_dir")
# Validate that changed_files does not change with our cwd, see bug 37015.
self.assertItemsEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
os.chdir(old_cwd)
def _shared_test_added_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertItemsEqual(self.scm.added_files(), [])
write_into_file_at_path("added_file", "new stuff")
self.scm.add("added_file")
write_into_file_at_path("added_file3", "more new stuff")
write_into_file_at_path("added_file4", "more new stuff")
self.scm.add_list(["added_file3", "added_file4"])
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file2", "new stuff")
self.scm.add("added_dir")
# SVN reports directory changes, Git does not.
added_files = self.scm.added_files()
if "added_dir" in added_files:
added_files.remove("added_dir")
self.assertItemsEqual(added_files, ["added_dir/added_file2", "added_file", "added_file3", "added_file4"])
# Test also to make sure discard_working_directory_changes removes added files
self.scm.discard_working_directory_changes()
self.assertItemsEqual(self.scm.added_files(), [])
self.assertFalse(os.path.exists("added_file"))
self.assertFalse(os.path.exists("added_file3"))
self.assertFalse(os.path.exists("added_file4"))
self.assertFalse(os.path.exists("added_dir"))
def _shared_test_changed_files_for_revision(self):
# SVN reports directory changes, Git does not.
changed_files = self.scm.changed_files_for_revision(3)
if "test_dir" in changed_files:
changed_files.remove("test_dir")
self.assertItemsEqual(changed_files, ["test_dir/test_file3", "test_file"])
self.assertItemsEqual(self.scm.changed_files_for_revision(4), ["test_file", "test_file2"]) # Git and SVN return different orders.
self.assertItemsEqual(self.scm.changed_files_for_revision(2), ["test_file"])
def _shared_test_contents_at_revision(self):
self.assertEqual(self.scm.contents_at_revision("test_file", 3), "test1test2")
self.assertEqual(self.scm.contents_at_revision("test_file", 4), "test1test2test3\n")
# Verify that contents_at_revision returns a byte array, aka str():
self.assertEqual(self.scm.contents_at_revision("test_file", 5), u"latin1 test: \u00A0\n".encode("latin1"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 5), u"utf-8 test: \u00A0\n".encode("utf-8"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 4), "second file")
# Files which don't exist:
# Currently we raise instead of returning None because detecting the difference between
# "file not found" and any other error seems impossible with svn (git seems to expose such through the return code).
self.assertRaises(ScriptError, self.scm.contents_at_revision, "test_file2", 2)
self.assertRaises(ScriptError, self.scm.contents_at_revision, "does_not_exist", 2)
def _shared_test_revisions_changing_file(self):
self.assertItemsEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2])
self.assertRaises(ScriptError, self.scm.revisions_changing_file, "non_existent_file")
def _shared_test_committer_email_for_revision(self):
self.assertEqual(self.scm.committer_email_for_revision(3), getpass.getuser()) # Committer "email" will be the current user
def _shared_test_reverse_diff(self):
self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs
# Only test the simple case, as any other will end up with conflict markers.
self.scm.apply_reverse_diff('5')
self.assertEqual(read_from_path('test_file'), "test1test2test3\n")
def _shared_test_diff_for_revision(self):
# Patch formats are slightly different between svn and git, so just regexp for things we know should be there.
r3_patch = self.scm.diff_for_revision(4)
self.assertRegexpMatches(r3_patch, 'test3')
self.assertNotRegexpMatches(r3_patch, 'test4')
self.assertRegexpMatches(r3_patch, 'test2')
self.assertRegexpMatches(self.scm.diff_for_revision(3), 'test2')
def _shared_test_svn_apply_git_patch(self):
self._setup_webkittools_scripts_symlink(self.scm)
git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
new file mode 100644
index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90
60151690
GIT binary patch
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
literal 0
HcmV?d00001
"""
self.checkout.apply_patch(self._create_patch(git_binary_addition))
added = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual(512, len(added))
self.assertTrue(added.startswith('GIF89a'))
self.assertIn('fizzbuzz7.gif', self.scm.changed_files())
# The file already exists.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_addition))
git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7
GIT binary patch
literal 7
OcmYex&reD$;sO8*F9L)B
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
"""
self.checkout.apply_patch(self._create_patch(git_binary_modification))
modified = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual('foobar\n', modified)
self.assertIn('fizzbuzz7.gif', self.scm.changed_files())
# Applying the same modification should fail.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_modification))
git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
deleted file mode 100644
index 323fae0..0000000
GIT binary patch
literal 0
HcmV?d00001
literal 7
OcmYex&reD$;sO8*F9L)B
"""
self.checkout.apply_patch(self._create_patch(git_binary_deletion))
self.assertFalse(os.path.exists('fizzbuzz7.gif'))
self.assertNotIn('fizzbuzz7.gif', self.scm.changed_files())
# Cannot delete again.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_deletion))
def _shared_test_add_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
def _shared_test_delete_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertNotIn("added_dir", self.scm.added_files())
def _shared_test_delete_recursively_or_not(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
write_into_file_at_path("added_dir/another_added_file", "more new stuff")
self.scm.add("added_dir/added_file")
self.scm.add("added_dir/another_added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
self.assertIn("added_dir/another_added_file", self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertIn("added_dir/another_added_file", self.scm.added_files())
def _shared_test_exists(self, scm, commit_function):
os.chdir(scm.checkout_root)
self.assertFalse(scm.exists('foo.txt'))
write_into_file_at_path('foo.txt', 'some stuff')
self.assertFalse(scm.exists('foo.txt'))
scm.add('foo.txt')
commit_function('adding foo')
self.assertTrue(scm.exists('foo.txt'))
scm.delete('foo.txt')
commit_function('deleting foo')
self.assertFalse(scm.exists('foo.txt'))
def _shared_test_head_svn_revision(self):
self.assertEqual(self.scm.head_svn_revision(), '5')
def _shared_test_move(self):
write_into_file_at_path('added_file', 'new stuff')
self.scm.add('added_file')
self.scm.move('added_file', 'moved_file')
self.assertIn('moved_file', self.scm.added_files())
def _shared_test_move_recursive(self):
os.mkdir("added_dir")
write_into_file_at_path('added_dir/added_file', 'new stuff')
write_into_file_at_path('added_dir/another_added_file', 'more new stuff')
self.scm.add('added_dir')
self.scm.move('added_dir', 'moved_dir')
self.assertIn('moved_dir/added_file', self.scm.added_files())
self.assertIn('moved_dir/another_added_file', self.scm.added_files())
# Context manager that overrides the current timezone.
class TimezoneOverride(object):
def __init__(self, timezone_string):
self._timezone_string = timezone_string
def __enter__(self):
if hasattr(time, 'tzset'):
self._saved_timezone = os.environ.get('TZ', None)
os.environ['TZ'] = self._timezone_string
time.tzset()
def __exit__(self, type, value, traceback):
if hasattr(time, 'tzset'):
if self._saved_timezone:
os.environ['TZ'] = self._saved_timezone
else:
del os.environ['TZ']
time.tzset()
class SVNTest(SCMTest):
@staticmethod
def _set_date_and_reviewer(changelog_entry):
# Joe Cool matches the reviewer set in SCMTest._create_patch
changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool')
# svn-apply will update ChangeLog entries with today's date (as in Cupertino, CA, US)
with TimezoneOverride('PST8PDT'):
return changelog_entry.replace('DATE_HERE', date.today().isoformat())
def test_svn_apply(self):
first_entry = """2009-10-26 Eric Seidel <[email protected]>
Reviewed by Foo Bar.
Most awesome change ever.
* scm_unittest.py:
"""
intermediate_entry = """2009-10-27 Eric Seidel <[email protected]>
Reviewed by Baz Bar.
A more awesomer change yet!
* scm_unittest.py:
"""
one_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -1,5 +1,13 @@
2009-10-26 Eric Seidel <[email protected]>
%(whitespace)s
+ Reviewed by NOBODY (OOPS!).
+
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <[email protected]>
+
Reviewed by Foo Bar.
%(whitespace)s
Most awesome change ever.
""" % {'whitespace': ' '}
one_line_overlap_entry = """DATE_HERE Eric Seidel <[email protected]>
Reviewed by REVIEWER_HERE.
Second most awesome change ever.
* scm_unittest.py:
"""
two_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -2,6 +2,14 @@
%(whitespace)s
Reviewed by Foo Bar.
%(whitespace)s
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <[email protected]>
+
+ Reviewed by Foo Bar.
+
Most awesome change ever.
%(whitespace)s
* scm_unittest.py:
""" % {'whitespace': ' '}
two_line_overlap_entry = """DATE_HERE Eric Seidel <[email protected]>
Reviewed by Foo Bar.
Second most awesome change ever.
* scm_unittest.py:
"""
write_into_file_at_path('ChangeLog', first_entry)
run_command(['svn', 'add', 'ChangeLog'])
run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit'])
# Patch files were created against just 'first_entry'.
# Add a second commit to make svn-apply have to apply the patches with fuzz.
changelog_contents = "%s\n%s" % (intermediate_entry, first_entry)
write_into_file_at_path('ChangeLog', changelog_contents)
run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit'])
self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(one_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents)
self.assertEqual(read_from_path('ChangeLog'), expected_changelog_contents)
self.scm.revert_files(['ChangeLog'])
self.checkout.apply_patch(self._create_patch(two_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents)
self.assertEqual(read_from_path('ChangeLog'), expected_changelog_contents)
def setUp(self):
SVNTestRepository.setup(self)
os.chdir(self.svn_checkout_path)
self.scm = detect_scm_system(self.svn_checkout_path)
self.scm.svn_server_realm = None
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
def test_detect_scm_system_relative_url(self):
scm = detect_scm_system(".")
# I wanted to assert that we got the right path, but there was some
# crazy magic with temp folder names that I couldn't figure out.
self.assertTrue(scm.checkout_root)
def test_create_patch_is_full_patch(self):
test_dir_path = os.path.join(self.svn_checkout_path, "test_dir2")
os.mkdir(test_dir_path)
test_file_path = os.path.join(test_dir_path, 'test_file2')
write_into_file_at_path(test_file_path, 'test content')
run_command(['svn', 'add', 'test_dir2'])
# create_patch depends on 'svn-create-patch', so make a dummy version.
scripts_path = os.path.join(self.svn_checkout_path, 'Tools', 'Scripts')
os.makedirs(scripts_path)
create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD') # We could pass -n to prevent the \n, but not all echo accept -n.
os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
# Change into our test directory and run the create_patch command.
os.chdir(test_dir_path)
scm = detect_scm_system(test_dir_path)
self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right.
patch_contents = scm.create_patch()
# Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo.
self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n.
def test_detection(self):
self.assertEqual(self.scm.display_name(), "svn")
self.assertEqual(self.scm.supports_local_commits(), False)
def test_apply_small_binary_patch(self):
patch_contents = """Index: test_file.swf
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes on: test_file.swf
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
"""
expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==")
self._setup_webkittools_scripts_symlink(self.scm)
patch_file = self._create_patch(patch_contents)
self.checkout.apply_patch(patch_file)
actual_contents = read_from_path("test_file.swf", encoding=None)
self.assertEqual(actual_contents, expected_contents)
def test_apply_svn_patch(self):
patch = self._create_patch(_svn_diff("-r5:4"))
self._setup_webkittools_scripts_symlink(self.scm)
Checkout(self.scm).apply_patch(patch)
def test_commit_logs(self):
# Commits have dates and usernames in them, so we can't just direct compare.
self.assertRegexpMatches(self.scm.last_svn_commit_log(), 'fourth commit')
self.assertRegexpMatches(self.scm.svn_commit_log(3), 'second commit')
def _shared_test_commit_with_message(self, username=None):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit", username)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_in_subdir(self, username=None):
write_into_file_at_path('test_dir/test_file3', 'more test content')
os.chdir("test_dir")
commit_text = self.scm.commit_with_message("another test commit", username)
os.chdir("..")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_text_parsing(self):
self._shared_test_commit_with_message()
def test_commit_with_username(self):
self._shared_test_commit_with_message("[email protected]")
def test_commit_without_authorization(self):
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=111669
# This test ends up looking in the actal $HOME/.subversion for authorization,
# which makes it fragile. For now, set it to use a realm that won't be authorized,
# but we should really plumb through a fake_home_dir here like we do in
# test_has_authorization_for_realm.
self.scm.svn_server_realm = '<http://svn.example.com:80> Example'
self.assertRaises(AuthenticationError, self._shared_test_commit_with_message)
def test_has_authorization_for_realm_using_credentials_with_passtype(self):
credentials = """
K 8
passtype
V 8
keychain
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
[email protected]
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_has_authorization_for_realm_using_credentials_with_password(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
[email protected]
K 8
password
V 4
blah
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def _test_has_authorization_for_realm_using_credentials(self, realm, credentials):
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file")
write_into_file_at_path(fake_webkit_auth_file, credentials)
result = self.scm.has_authorization_for_realm(realm, home_directory=fake_home_dir)
os.remove(fake_webkit_auth_file)
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
return result
def test_not_have_authorization_for_realm_with_credentials_missing_password_and_passtype(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
[email protected]
END
"""
self.assertFalse(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_not_have_authorization_for_realm_when_missing_credentials_file(self):
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
self.assertFalse(self.scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_added_files(self):
self._shared_test_added_files()
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
os.chdir(self.svn_checkout_path)
self.scm.delete("test_file")
self.assertIn("test_file", self.scm.deleted_files())
def test_delete_list(self):
os.chdir(self.svn_checkout_path)
self.scm.delete_list(["test_file", "test_file2"])
self.assertIn("test_file", self.scm.deleted_files())
self.assertIn("test_file2", self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_move(self):
self._shared_test_move()
def test_move_recursive(self):
self._shared_test_move_recursive()
def test_propset_propget(self):
filepath = os.path.join(self.svn_checkout_path, "test_file")
expected_mime_type = "x-application/foo-bar"
self.scm.propset("svn:mime-type", expected_mime_type, filepath)
self.assertEqual(expected_mime_type, self.scm.propget("svn:mime-type", filepath))
def test_show_head(self):
write_into_file_at_path("test_file", u"Hello!", "utf-8")
SVNTestRepository._svn_commit("fourth commit")
self.assertEqual("Hello!", self.scm.show_head('test_file'))
def test_show_head_binary(self):
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def do_test_diff_for_file(self):
write_into_file_at_path('test_file', 'some content')
self.scm.commit_with_message("a test commit")
diff = self.scm.diff_for_file('test_file')
self.assertEqual(diff, "")
write_into_file_at_path("test_file", "changed content")
diff = self.scm.diff_for_file('test_file')
self.assertIn("-some content", diff)
self.assertIn("+changed content", diff)
def clean_bogus_dir(self):
self.bogus_dir = self.scm._bogus_dir_name()
if os.path.exists(self.bogus_dir):
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_existing_bogus_dir(self):
self.clean_bogus_dir()
os.mkdir(self.bogus_dir)
self.do_test_diff_for_file()
self.assertTrue(os.path.exists(self.bogus_dir))
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_missing_bogus_dir(self):
self.clean_bogus_dir()
self.do_test_diff_for_file()
self.assertFalse(os.path.exists(self.bogus_dir))
def test_svn_lock(self):
svn_root_lock_path = ".svn/lock"
write_into_file_at_path(svn_root_lock_path, "", "utf-8")
# webkit-patch uses a Checkout object and runs update-webkit, just use svn update here.
self.assertRaises(ScriptError, run_command, ['svn', 'update'])
self.scm.discard_working_directory_changes()
self.assertFalse(os.path.exists(svn_root_lock_path))
run_command(['svn', 'update']) # Should succeed and not raise.
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_with_message)
class GitTest(SCMTest):
def setUp(self):
"""Sets up fresh git repository with one commit. Then setups a second git
repo that tracks the first one."""
# FIXME: We should instead clone a git repo that is tracking an SVN repo.
# That better matches what we do with WebKit.
self.original_dir = os.getcwd()
self.untracking_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout2")
run_command(['git', 'init', self.untracking_checkout_path])
os.chdir(self.untracking_checkout_path)
write_into_file_at_path('foo_file', 'foo')
run_command(['git', 'add', 'foo_file'])
run_command(['git', 'commit', '-am', 'dummy commit'])
self.untracking_scm = detect_scm_system(self.untracking_checkout_path)
self.tracking_git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
run_command(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path])
os.chdir(self.tracking_git_checkout_path)
self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path)
def tearDown(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.tracking_git_checkout_path])
run_command(['rm', '-rf', self.untracking_checkout_path])
def test_remote_branch_ref(self):
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'refs/remotes/origin/master')
os.chdir(self.untracking_checkout_path)
self.assertRaises(ScriptError, self.untracking_scm.remote_branch_ref)
def test_multiple_remotes(self):
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1'])
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2'])
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'remote1')
def test_create_patch(self):
write_into_file_at_path('test_file_commit1', 'contents')
run_command(['git', 'add', 'test_file_commit1'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertNotRegexpMatches(patch, r'Subversion Revision:')
def test_orderfile(self):
os.mkdir("Tools")
os.mkdir("Source")
os.mkdir("LayoutTests")
os.mkdir("Websites")
# Slash should always be the right path separator since we use cygwin on Windows.
Tools_ChangeLog = "Tools/ChangeLog"
write_into_file_at_path(Tools_ChangeLog, "contents")
Source_ChangeLog = "Source/ChangeLog"
write_into_file_at_path(Source_ChangeLog, "contents")
LayoutTests_ChangeLog = "LayoutTests/ChangeLog"
write_into_file_at_path(LayoutTests_ChangeLog, "contents")
Websites_ChangeLog = "Websites/ChangeLog"
write_into_file_at_path(Websites_ChangeLog, "contents")
Tools_ChangeFile = "Tools/ChangeFile"
write_into_file_at_path(Tools_ChangeFile, "contents")
Source_ChangeFile = "Source/ChangeFile"
write_into_file_at_path(Source_ChangeFile, "contents")
LayoutTests_ChangeFile = "LayoutTests/ChangeFile"
write_into_file_at_path(LayoutTests_ChangeFile, "contents")
Websites_ChangeFile = "Websites/ChangeFile"
write_into_file_at_path(Websites_ChangeFile, "contents")
run_command(['git', 'add', 'Tools/ChangeLog'])
run_command(['git', 'add', 'LayoutTests/ChangeLog'])
run_command(['git', 'add', 'Source/ChangeLog'])
run_command(['git', 'add', 'Websites/ChangeLog'])
run_command(['git', 'add', 'Tools/ChangeFile'])
run_command(['git', 'add', 'LayoutTests/ChangeFile'])
run_command(['git', 'add', 'Source/ChangeFile'])
run_command(['git', 'add', 'Websites/ChangeFile'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertTrue(re.search(r'Tools/ChangeLog', patch).start() < re.search(r'Tools/ChangeFile', patch).start())
self.assertTrue(re.search(r'Websites/ChangeLog', patch).start() < re.search(r'Websites/ChangeFile', patch).start())
self.assertTrue(re.search(r'Source/ChangeLog', patch).start() < re.search(r'Source/ChangeFile', patch).start())
self.assertTrue(re.search(r'LayoutTests/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Source/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Tools/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Websites/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Source/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Tools/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Websites/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Source/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Tools/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Websites/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
def test_exists(self):
scm = self.untracking_scm
self._shared_test_exists(scm, scm.commit_locally_with_message)
def test_head_svn_revision(self):
scm = detect_scm_system(self.untracking_checkout_path)
# If we cloned a git repo tracking an SVN repo, this would give the same result as
# self._shared_test_head_svn_revision().
self.assertEqual(scm.head_svn_revision(), '')
def test_rename_files(self):
scm = self.tracking_scm
scm.move('foo_file', 'bar_file')
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertNotRegexpMatches(patch, r'rename from ')
self.assertNotRegexpMatches(patch, r'rename to ')
class GitSVNTest(SCMTest):
def _setup_git_checkout(self):
self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
# --quiet doesn't make git svn silent, so we use run_silent to redirect output
run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path])
os.chdir(self.git_checkout_path)
def _tear_down_git_checkout(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.git_checkout_path])
def setUp(self):
self.original_dir = os.getcwd()
SVNTestRepository.setup(self)
self._setup_git_checkout()
self.scm = detect_scm_system(self.git_checkout_path)
self.scm.svn_server_realm = None
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
self._tear_down_git_checkout()
def test_detection(self):
self.assertEqual(self.scm.display_name(), "git")
self.assertEqual(self.scm.supports_local_commits(), True)
def test_read_git_config(self):
key = 'test.git-config'
value = 'git-config value'
run_command(['git', 'config', key, value])
self.assertEqual(self.scm.read_git_config(key), value)
def test_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
def test_discard_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
self.scm.discard_local_commits()
self.assertEqual(len(self.scm.local_commits()), 0)
def test_delete_branch(self):
new_branch = 'foo'
run_command(['git', 'checkout', '-b', new_branch])
self.assertEqual(run_command(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch)
run_command(['git', 'checkout', '-b', 'bar'])
self.scm.delete_branch(new_branch)
self.assertNotRegexpMatches(run_command(['git', 'branch']), r'foo')
def test_remote_merge_base(self):
# Diff to merge-base should include working-copy changes,
# which the diff to svn_branch.. doesn't.
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
diff_to_common_base = _git_diff(self.scm.remote_branch_ref() + '..')
diff_to_merge_base = _git_diff(self.scm.remote_merge_base())
self.assertNotRegexpMatches(diff_to_common_base, r'foo')
self.assertRegexpMatches(diff_to_merge_base, r'foo')
def test_rebase_in_progress(self):
svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
write_into_file_at_path(svn_test_file, "svn_checkout")
run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
git_test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(git_test_file, "git_checkout")
run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
# --quiet doesn't make git svn silent, so use run_silent to redirect output
self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
self.assertTrue(self.scm.rebase_in_progress())
# Make sure our cleanup works.
self.scm.discard_working_directory_changes()
self.assertFalse(self.scm.rebase_in_progress())
# Make sure cleanup doesn't throw when no rebase is in progress.
self.scm.discard_working_directory_changes()
def test_commitish_parsing(self):
# Multiple revisions are cherry-picked.
self.assertEqual(len(self.scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
self.assertEqual(len(self.scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
# ... is an invalid range specifier
self.assertRaises(ScriptError, self.scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
def test_commitish_order(self):
commit_range = 'HEAD~3..HEAD'
actual_commits = self.scm.commit_ids_from_commitish_arguments([commit_range])
expected_commits = []
expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
self.assertEqual(actual_commits, expected_commits)
def test_apply_git_patch(self):
# We carefullly pick a diff which does not have a directory addition
# as currently svn-apply will error out when trying to remove directories
# in Git: https://bugs.webkit.org/show_bug.cgi?id=34871
patch = self._create_patch(_git_diff('HEAD..HEAD^'))
self._setup_webkittools_scripts_symlink(self.scm)
Checkout(self.scm).apply_patch(patch)
def test_commit_text_parsing(self):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_with_message_working_copy_only(self):
write_into_file_at_path('test_file_commit1', 'more test content')
run_command(['git', 'add', 'test_file_commit1'])
commit_text = self.scm.commit_with_message("yet another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def _local_commit(self, filename, contents, message):
write_into_file_at_path(filename, contents)
run_command(['git', 'add', filename])
self.scm.commit_locally_with_message(message)
def _one_local_commit(self):
self._local_commit('test_file_commit1', 'more test content', 'another test commit')
def _one_local_commit_plus_working_copy_changes(self):
self._one_local_commit()
write_into_file_at_path('test_file_commit2', 'still more test content')
run_command(['git', 'add', 'test_file_commit2'])
def _second_local_commit(self):
self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit')
def _two_local_commits(self):
self._one_local_commit()
self._second_local_commit()
def _three_local_commits(self):
self._local_commit('test_file_commit0', 'more test content', 'another test commit')
self._two_local_commits()
def test_locally_commit_all_working_copy_changes(self):
self._local_commit('test_file', 'test content', 'test commit')
write_into_file_at_path('test_file', 'changed test content')
self.assertTrue(self.scm.has_working_directory_changes())
self.scm.commit_locally_with_message('all working copy changes')
self.assertFalse(self.scm.has_working_directory_changes())
def test_locally_commit_no_working_copy_changes(self):
self._local_commit('test_file', 'test content', 'test commit')
write_into_file_at_path('test_file', 'changed test content')
self.assertTrue(self.scm.has_working_directory_changes())
self.assertRaises(ScriptError, self.scm.commit_locally_with_message, 'no working copy changes', False)
def test_locally_commit_selected_working_copy_changes(self):
self._local_commit('test_file_1', 'test content 1', 'test commit 1')
self._local_commit('test_file_2', 'test content 2', 'test commit 2')
write_into_file_at_path('test_file_1', 'changed test content 1')
write_into_file_at_path('test_file_2', 'changed test content 2')
self.assertTrue(self.scm.has_working_directory_changes())
run_command(['git', 'add', 'test_file_1'])
self.scm.commit_locally_with_message('selected working copy changes', commit_all_working_directory_changes=False)
self.assertTrue(self.scm.has_working_directory_changes())
self.assertTrue(self.scm.diff_for_file('test_file_1') == '')
self.assertFalse(self.scm.diff_for_file('test_file_2') == '')
def test_revisions_changing_files_with_local_commit(self):
self._one_local_commit()
self.assertItemsEqual(self.scm.revisions_changing_file('test_file_commit1'), [])
def test_commit_with_message(self):
self._one_local_commit_plus_working_copy_changes()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "yet another test commit")
commit_text = self.scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_git_commit(self):
self._two_local_commits()
commit_text = self.scm.commit_with_message("another test commit", git_commit="HEAD^")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
self.assertNotRegexpMatches(svn_log, r'test_file_commit2')
def test_commit_with_message_git_commit_range(self):
self._three_local_commits()
commit_text = self.scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertNotRegexpMatches(svn_log, r'test_file_commit0')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
self.assertRegexpMatches(svn_log, r'test_file_commit2')
def test_commit_with_message_only_local_commit(self):
self._one_local_commit()
commit_text = self.scm.commit_with_message("another test commit")
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_multiple_local_commits_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "another test commit")
commit_text = self.scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_git_commit_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
self.assertRaises(ScriptError, self.scm.commit_with_message, "another test commit", git_commit="HEAD^")
def test_commit_with_message_multiple_local_commits_always_squash(self):
run_command(['git', 'config', 'webkit-patch.commit-should-always-squash', 'true'])
self._two_local_commits()
commit_text = self.scm.commit_with_message("yet another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_multiple_local_commits(self):
self._two_local_commits()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "yet another test commit")
commit_text = self.scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "another test commit")
commit_text = self.scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertNotRegexpMatches(svn_log, r'test_file2')
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_not_synced_with_conflict(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._local_commit('test_file2', 'asdf', 'asdf commit')
# There's a conflict between trunk and the test_file2 modification.
self.assertRaises(ScriptError, self.scm.commit_with_message, "another test commit", force_squash=True)
def test_upstream_branch(self):
run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
run_command(['git', 'checkout', '-t', '-b', 'my-second-branch'])
self.assertEqual(self.scm._upstream_branch(), 'my-branch')
def test_remote_branch_ref(self):
self.assertEqual(self.scm.remote_branch_ref(), 'refs/remotes/trunk')
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_create_patch_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_after_merge(self):
run_command(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
self._one_local_commit()
run_command(['git', 'merge', 'trunk'])
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_with_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch(changed_files=['test_file_commit2'])
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_with_rm_and_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
os.remove('test_file_commit1')
patch = self.scm.create_patch()
patch_with_changed_files = self.scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
self.assertEqual(patch, patch_with_changed_files)
def test_create_patch_git_commit(self):
self._two_local_commits()
patch = self.scm.create_patch(git_commit="HEAD^")
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertNotRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_git_commit_range(self):
self._three_local_commits()
patch = self.scm.create_patch(git_commit="HEAD~2..HEAD")
self.assertNotRegexpMatches(patch, r'test_file_commit0')
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch(git_commit="HEAD....")
self.assertNotRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_multiple_local_commits(self):
self._two_local_commits()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
patch = self.scm.create_patch()
self.assertNotRegexpMatches(patch, r'test_file2')
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_binary_patch(self):
# Create a git binary patch and check the contents.
test_file_name = 'binary_file'
test_file_path = os.path.join(self.git_checkout_path, test_file_name)
file_contents = ''.join(map(chr, range(256)))
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'\nliteral 0\n')
self.assertRegexpMatches(patch, r'\nliteral 256\n')
# Check if we can apply the created patch.
run_command(['git', 'rm', '-f', test_file_name])
self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(patch))
self.assertEqual(file_contents, read_from_path(test_file_path, encoding=None))
# Check if we can create a patch from a local commit.
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
run_command(['git', 'commit', '-m', 'binary diff'])
patch_from_local_commit = self.scm.create_patch('HEAD')
self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 0\n')
self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 256\n')
def test_changed_files_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
files = self.scm.changed_files()
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
# working copy should *not* be in the list.
files = self.scm.changed_files('trunk..')
self.assertIn('test_file_commit1', files)
self.assertNotIn('test_file_commit2', files)
# working copy *should* be in the list.
files = self.scm.changed_files('trunk....')
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_git_commit(self):
self._two_local_commits()
files = self.scm.changed_files(git_commit="HEAD^")
self.assertIn('test_file_commit1', files)
self.assertNotIn('test_file_commit2', files)
def test_changed_files_git_commit_range(self):
self._three_local_commits()
files = self.scm.changed_files(git_commit="HEAD~2..HEAD")
self.assertNotIn('test_file_commit0', files)
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
files = self.scm.changed_files(git_commit="HEAD....")
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_multiple_local_commits(self):
self._two_local_commits()
files = self.scm.changed_files()
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
files = self.scm.changed_files()
self.assertNotIn('test_file2', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
files = self.scm.changed_files()
self.assertNotIn('test_file2', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_changed_files_upstream(self):
run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
self._one_local_commit()
run_command(['git', 'checkout', '-t', '-b', 'my-second-branch'])
self._second_local_commit()
write_into_file_at_path('test_file_commit0', 'more test content')
run_command(['git', 'add', 'test_file_commit0'])
# equivalent to 'git diff my-branch..HEAD, should not include working changes
files = self.scm.changed_files(git_commit='UPSTREAM..')
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
self.assertNotIn('test_file_commit0', files)
# equivalent to 'git diff my-branch', *should* include working changes
files = self.scm.changed_files(git_commit='UPSTREAM....')
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit0', files)
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_added_files(self):
self._shared_test_added_files()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
self._two_local_commits()
self.scm.delete('test_file_commit1')
self.assertIn("test_file_commit1", self.scm.deleted_files())
def test_delete_list(self):
self._two_local_commits()
self.scm.delete_list(["test_file_commit1", "test_file_commit2"])
self.assertIn("test_file_commit1", self.scm.deleted_files())
self.assertIn("test_file_commit2", self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_move(self):
self._shared_test_move()
def test_move_recursive(self):
self._shared_test_move_recursive()
def test_to_object_name(self):
relpath = 'test_file_commit1'
fullpath = os.path.realpath(os.path.join(self.git_checkout_path, relpath))
self.assertEqual(relpath, self.scm.to_object_name(fullpath))
def test_show_head(self):
self._two_local_commits()
self.assertEqual("more test content", self.scm.show_head('test_file_commit1'))
def test_show_head_binary(self):
self._two_local_commits()
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_locally_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def test_diff_for_file(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', "Updated", encoding=None)
diff = self.scm.diff_for_file('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertIn("+Updated", diff)
self.assertIn("-more test content", diff)
self.scm.add('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertIn("+Updated", cached_diff)
self.assertIn("-more test content", cached_diff)
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_locally_with_message)
# We need to split off more of these SCM tests to use mocks instead of the filesystem.
# This class is the first part of that.
class GitTestWithMock(unittest.TestCase):
maxDiff = None
def make_scm(self, logging_executive=False):
# We do this should_log dance to avoid logging when Git.__init__ runs sysctl on mac to check for 64-bit support.
scm = Git(cwd=".", executive=MockExecutive(), filesystem=MockFileSystem())
scm.read_git_config = lambda *args, **kw: "MOCKKEY:MOCKVALUE"
scm._executive._should_log = logging_executive
return scm
def test_create_patch(self):
scm = self.make_scm(logging_executive=True)
expected_stderr = """\
MOCK run_command: ['git', 'merge-base', 'MOCKVALUE', 'HEAD'], cwd=%(checkout)s
MOCK run_command: ['git', 'diff', '--binary', '--no-color', '--no-ext-diff', '--full-index', '--no-renames', '', 'MOCK output of child process', '--'], cwd=%(checkout)s
MOCK run_command: ['git', 'rev-parse', '--show-toplevel'], cwd=%(checkout)s
MOCK run_command: ['git', 'log', '-1', '--grep=git-svn-id:', '--date=iso', './MOCK output of child process/MOCK output of child process'], cwd=%(checkout)s
""" % {'checkout': scm.checkout_root}
OutputCapture().assert_outputs(self, scm.create_patch, expected_logs=expected_stderr)
def test_push_local_commits_to_server_with_username_and_password(self):
self.assertEqual(self.make_scm().push_local_commits_to_server(username='[email protected]', password='blah'), "MOCK output of child process")
def test_push_local_commits_to_server_without_username_and_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server)
def test_push_local_commits_to_server_with_username_and_without_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'username': '[email protected]'})
def test_push_local_commits_to_server_without_username_and_with_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'password': 'blah'})
def test_timestamp_of_revision(self):
scm = self.make_scm()
scm.find_checkout_root = lambda path: ''
scm._run_git = lambda args: 'Date: 2013-02-08 08:05:49 +0000'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T08:05:49Z')
scm._run_git = lambda args: 'Date: 2013-02-08 01:02:03 +0130'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-07T23:32:03Z')
scm._run_git = lambda args: 'Date: 2013-02-08 01:55:21 -0800'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T09:55:21Z')
| apache-2.0 | -2,000,034,110,292,578,300 | 43.905832 | 211 | 0.663235 | false |
Gabriel439/pants | src/python/pants/option/options_bootstrapper.py | 1 | 5139 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
import sys
from pants.base.config import Config
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.option_util import is_boolean_flag
from pants.option.options import Options
class OptionsBootstrapper(object):
"""An object that knows how to create options in two stages: bootstrap, and then full options."""
def __init__(self, env=None, configpath=None, args=None):
self._env = env if env is not None else os.environ.copy()
self._configpath = configpath
self._post_bootstrap_config = None # Will be set later.
self._args = sys.argv if args is None else args
self._bootstrap_options = None # We memoize the bootstrap options here.
self._full_options = {} # We memoize the full options here.
def get_bootstrap_options(self):
""":returns: an Options instance that only knows about the bootstrap options.
:rtype: :class:`Options`
"""
if not self._bootstrap_options:
flags = set()
short_flags = set()
def capture_the_flags(*args, **kwargs):
for arg in args:
flags.add(arg)
if len(arg) == 2:
short_flags.add(arg)
elif is_boolean_flag(kwargs):
flags.add('--no-{}'.format(arg[2:]))
GlobalOptionsRegistrar.register_bootstrap_options(capture_the_flags)
def is_bootstrap_option(arg):
components = arg.split('=', 1)
if components[0] in flags:
return True
for flag in short_flags:
if arg.startswith(flag):
return True
return False
# Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line.
# Stop before '--' since args after that are pass-through and may have duplicate names to our
# bootstrap options.
bargs = filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != '--', self._args))
configpaths = [self._configpath] if self._configpath else None
pre_bootstrap_config = Config.load(configpaths)
def bootstrap_options_from_config(config):
bootstrap_options = Options.create(env=self._env, config=config,
known_scope_infos=[GlobalOptionsRegistrar.get_scope_info()], args=bargs)
def register_global(*args, **kwargs):
bootstrap_options.register(GLOBAL_SCOPE, *args, **kwargs)
GlobalOptionsRegistrar.register_bootstrap_options(register_global)
return bootstrap_options
initial_bootstrap_options = bootstrap_options_from_config(pre_bootstrap_config)
bootstrap_option_values = initial_bootstrap_options.for_global_scope()
# Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped
# from (typically pants.ini), then config override, then rcfiles.
full_configpaths = pre_bootstrap_config.sources()
if bootstrap_option_values.config_override:
full_configpaths.extend(bootstrap_option_values.config_override)
if bootstrap_option_values.pantsrc:
rcfiles = [os.path.expanduser(rcfile) for rcfile in bootstrap_option_values.pantsrc_files]
existing_rcfiles = filter(os.path.exists, rcfiles)
full_configpaths.extend(existing_rcfiles)
self._post_bootstrap_config = Config.load(full_configpaths,
seed_values=bootstrap_option_values)
# Now recompute the bootstrap options with the full config. This allows us to pick up
# bootstrap values (such as backends) from a config override file, for example.
self._bootstrap_options = bootstrap_options_from_config(self._post_bootstrap_config)
return self._bootstrap_options
def get_full_options(self, known_scope_infos):
"""Get the full Options instance bootstrapped by this object for the given known scopes.
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:returns: A bootrapped Options instance that also carries options for all the supplied known
scopes.
:rtype: :class:`Options`
"""
key = frozenset(sorted(known_scope_infos))
if key not in self._full_options:
# Note: Don't inline this into the Options() call, as this populates
# self._post_bootstrap_config, which is another argument to that call.
bootstrap_option_values = self.get_bootstrap_options().for_global_scope()
self._full_options[key] = Options.create(self._env,
self._post_bootstrap_config,
known_scope_infos,
args=self._args,
bootstrap_option_values=bootstrap_option_values)
return self._full_options[key]
| apache-2.0 | -1,450,630,030,796,535,800 | 43.686957 | 99 | 0.664915 | false |
geoffsmiller/RetroTechClub | retrotechclub/views/game_views.py | 1 | 5726 | from flask import flash, redirect, render_template, url_for
from flask_login import login_required
from retrotechclub import app, db
from retrotechclub.models import Company, GameMaster, GameRelease, Platform
from retrotechclub.forms import GameMasterForm, GameReleaseForm
@app.route('/games')
def game_masters_list():
game_masters = GameMaster.query.all()
return render_template('game_masters_list.html', game_masters=game_masters)
@app.route('/games/<master_id>')
def game_master_view(master_id):
game_master = GameMaster.query.get_or_404(master_id)
game_releases = GameRelease.query.filter_by(game_master_id=master_id)
return render_template('game_master_view.html', game_master=game_master,
game_releases=game_releases)
@app.route('/games/add', methods=['POST', 'GET'])
@login_required
def game_master_add():
form = GameMasterForm()
if form.validate_on_submit():
game_master = GameMaster(
form.name.data,
form.release_date.data,
form.description.data
)
db.session.add(game_master)
db.session.commit()
flash('New game master added', 'alert-success')
return render_template('game_master_add.html', form=form)
@app.route('/games/<master_id>/edit', methods=['POST', 'GET'])
@login_required
def game_master_edit(master_id):
form = GameMasterForm()
game_master = GameMaster.query.get_or_404(master_id)
game_releases = GameRelease.query.filter_by(game_master_id=master_id)
if not form.is_submitted():
form.name.data = game_master.name
form.release_date.data = game_master.release_date
form.description.data = game_master.description
if form.validate_on_submit():
game_master.name = form.name.data
game_master.release_date = form.release_date.data
game_master.description = form.description.data
db.session.commit()
flash('Game master edited', 'alert-success')
return render_template(
'game_master_edit.html',
form=form,
game_master=game_master,
game_releases=game_releases
)
@app.route('/games/<master_id>/delete')
@login_required
def game_master_delete(master_id):
game_master = GameMaster.query.get_or_404(master_id)
db.session.delete(game_master)
db.session.commit()
flash('Game master deleted', 'alert-success')
return redirect(url_for('game_masters_list'))
@app.route('/games/<master_id>/release/<release_id>')
def game_release_view(master_id, release_id):
game_master = GameMaster.query.get_or_404(master_id)
game_release = GameRelease.query.get_or_404(release_id)
return render_template('game_release_view.html', game_master=game_master,
game_release=game_release)
@app.route('/games/<master_id>/release/add', methods=['POST', 'GET'])
@login_required
def game_release_add(master_id):
game_master = GameMaster.query.get_or_404(master_id)
platforms = Platform.query.all()
companies = Company.query.all()
form = GameReleaseForm()
company_choices = [(c.id, c.name) for c in companies]
form.publisher.choices = company_choices
form.developer.choices = company_choices
form.platform.choices = [(p.id, p.name) for p in platforms]
if form.validate_on_submit():
game_release = GameRelease(
form.name.data,
form.release_date.data,
master_id,
form.publisher.data,
form.developer.data,
form.platform.data,
form.description.data
)
db.session.add(game_release)
db.session.commit()
flash('New game release added', 'alert-success')
return render_template('game_release_edit.html', form=form,
game_master=game_master)
@app.route('/games/<master_id>/release/<release_id>/edit',
methods=['POST', 'GET'])
@login_required
def game_release_edit(master_id, release_id):
game_master = GameMaster.query.get_or_404(master_id)
platforms = Platform.query.all()
companies = Company.query.all()
game_release = GameRelease.query.get_or_404(release_id)
form = GameReleaseForm()
company_choices = [(c.id, c.name) for c in companies]
form.publisher.choices = company_choices
form.developer.choices = company_choices
form.platform.choices = [(p.id, p.name) for p in platforms]
if not form.is_submitted():
form.name.data = game_release.name
form.release_date.data = game_release.release_date
form.publisher.data = game_release.publisher_id
form.developer.data = game_release.developer_id
form.platform.data = game_release.platform_id
form.description.data = game_release.description
if form.validate_on_submit():
game_release.name = form.name.data
game_release.release_date = form.release_date.data
game_release.publisher_id = form.publisher.data
game_release.developer_id = form.developer.data
game_release.description = form.description.data
game_release.platform_id = form.platform.data
db.session.commit()
flash('Game release edited', 'alert-success')
return render_template('game_release_edit.html', form=form,
game_release=game_release, game_master=game_master)
@app.route('/games/<master_id>/release<release_id>/delete')
@login_required
def game_release_delete(master_id, release_id):
game_release = GameRelease.query.get_or_404(release_id)
db.session.delete(game_release)
db.session.commit()
flash('Game release deleted', 'alert-success')
return redirect(url_for('game_master_view', master_id=master_id))
| mit | -1,783,189,908,252,373,000 | 37.689189 | 79 | 0.66853 | false |
Grumpy-Mike/Mikes-Pi-Bakery | Santa's-Run/software/santa's_run.py | 1 | 8982 | # Santa's Run - a Christmas game
# By Mike Cook - October 2020
import pygame
import time
import os
import random
import RPi.GPIO as io
def main():
global restart, santaState, coverTrack, santaDistance, targetRect, disInc
global santaL_R, delivered, santaHeight, drop, lastDistance, throwCount, distance
init()
initGPIO()
print("Santa's Run")
while True:
if restart:
distance = 0 ; lastDistance = 0 ; santaDistance = 0
santaState = 0 ; coverTrack=[] ; drop = False ; throwCount = 0
delivered = 0 ; santaL_R = 0 ; santaHeight = rigel - 150
targetRect = []
setUpGround()
restart = False
showPicture(distance)
waitNextFrame()
distance = santaDistance * 4
showPicture(distance)
santaHeight += 0.5 # normal loss of height
if santaHeight >= rigel : santaHeight = rigel # peg the lowest he can get
if santaHeight <= 0 : santaHeight = 0.0 # peg the highest he can get
if santaDistance >= 1150 : santaL_R = 1 # reverse run at end of screen
if santaDistance < 0 or throwCount >= 100: # stop at end of screen or when magazines run out
gameSound[3].play() # end
drawWords("Finished "+str(delivered)+" MagPi magazines delivered ",400,258)
drawWords("Type return for another run", 467, 300)
pygame.display.update()
while not restart:
checkForEvent()
def init():
global textHeight, font, restart, santaState, screen
global soundEffects, santaFrames, background, chimney
global cover, gameSound, snowLine, snowLineShort, drop
global groundPlotType, groundPlotY, groundTypeW, coverTrack
global targetRect, coverDrop, santaL_R, groundSpeed, rigel
global dropVel, groundLine, frame
pygame.init() # initialise graphics interface
pygame.mixer.quit()
pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=512)
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
pygame.display.set_caption("Santa's Run")
pygame.event.set_allowed(None)
pygame.event.set_allowed([pygame.KEYDOWN, pygame.QUIT, pygame.MOUSEBUTTONDOWN])
screen = pygame.display.set_mode([1250,526],0,32)
textHeight = 36
font = pygame.font.Font(None, textHeight)
random.seed()
restart = True ; santaState = 0 ; drop = False
santaFrames = [[0,0] for _ in range(9) ]
frames1 = [ pygame.image.load("images/Santa/Santa"+str(frame)+".png").convert_alpha()
for frame in range(1,10)]
frames2 = [ pygame.transform.flip (pygame.image.load("images/Santa/Santa"+str(frame)+".png").convert_alpha(), True, False)
for frame in range(1,10)]
santaL_R = 0 # santa image flip 0 l to r, 1 r to l
frame = 0
for i in range(9) :
santaFrames[i][0] = frames1[i]
santaFrames[i][1] = frames2[i]
background = pygame.image.load("images/stars.png").convert_alpha()
chimney = pygame.image.load("images/chimney.png").convert_alpha()
cover = [pygame.image.load("images/covers/"+str(cov)+"-Cover1.png").convert_alpha()
for cov in range(1,101) ]
soundEffects = ["throw","hit","miss","end"]
gameSound = [ pygame.mixer.Sound("sounds/"+soundEffects[sound]+".wav")
for sound in range(0,4)]
snowLine = pygame.image.load("images/snow_line.png").convert_alpha()
snowLineShort = pygame.image.load("images/snow_line_short.png").convert_alpha()
groundSpeed = 4
groundPlotType = [chimney, snowLine, snowLineShort]
groundPlotY = [466, 517, 517]
groundTypeW = [130, 130, 65] # width of each type of ground
coverTrack = []
targetRect = []
coverDrop = [0, 0, 0]
rigel = 312
dropVel = 0
# define what we fly over 0 = double chimney 1 = long snow line 2 = short snow line
groundLine = [1, 1, 0, 2, 0, 2, 0, 2, 1, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 2, 2, 0, 2, 0,
1, 0, 1, 0, 0, 0, 1, 2, 0, 1, 1, 0, 2, 0, 2, 0, 1, 0, 2, 0, 0, 1, 2, 0,
2, 0, 0, 1, 1, 1, 1]
def setUpGround():
global coverTrack, targetRect
targetRect = []
coverTrack = []
length = 0
for i in range(len(groundLine)) :
part = groundLine[i]
if part == 0 :
targetRect.append(pygame.Rect(length + 18, 481, 93, 32))
if part == 2 :
length += 65
else :
length += 130
#print("ground line length",length)
def initGPIO():
io.setwarnings(False)
io.setmode(io.BCM)
io.setup(2, io.IN)
io.add_event_detect(2, io.FALLING, callback = shakeDetect, bouncetime = 30)
def showPicture(distance):
global coverDrop, drop, dropVel, santaDistance
screen.blit(background,[0,0])
showGround(distance)
''' # uncomment to see catching rectangles
for t in range(len(targetRect)) :
pygame.draw.rect(screen, (0,128,0), targetRect[t], 0)
'''
if drop :
if dropVel != 0 :
dropVel += 1
else :
dropVel = 2
screen.blit(cover[coverDrop[0]], [ coverDrop[1], coverDrop[2] ])
if santaL_R :
coverDrop[1] -= 4
else:
coverDrop[1] += 4
coverDrop[2] += dropVel
if coverDrop[2] > 526: gameSound[2].play() ; drop = False ; dropVel = 0
if catch(distance) :
gameSound[1].play()
drop = False
dropVel = 0
santaDistance += disInc * 8 # give a little kick
screen.blit(santaFrames[frame][santaL_R],[santaDistance, santaHeight])
pygame.display.update()
def showGround(scroll):
global lastDistance
if scroll != 0:
delta = scroll - lastDistance
for t in range(len(targetRect)):
targetRect[t] = targetRect[t].move(-delta, 0)
lastDistance = scroll
length = - scroll
chunk = 0
while length < 1250 :
if length > -130 :
screen.blit(groundPlotType[groundLine[chunk]],[length, groundPlotY[groundLine[chunk]]])
length += groundTypeW[groundLine[chunk]]
chunk += 1
for coverCount in range(len(coverTrack)) :
screen.blit(cover[coverTrack[coverCount][0]], [coverTrack[coverCount][1] - scroll,
413] )
def catch(offset) : # dropping cover collide with chimney catch rectangle
global coverTrack, delivered
caught = False
for r in range(len(targetRect)):
if targetRect[r].collidepoint((coverDrop[1], coverDrop[2] + 66)) or targetRect[r].collidepoint((coverDrop[1] + 50, coverDrop[2] + 66)):
caught = True ; delivered += 1
coverTrack.append([coverDrop[0], coverDrop[1] + offset, coverDrop[2]])
#print("coverTrack list",coverTrack)
return caught
def drawWords(words,x,y) :
textSurface = pygame.Surface((14,textHeight))
textRect = textSurface.get_rect()
textRect.left = x
textRect.top = y
pygame.draw.rect(screen,(102,204,255), (x,y,14,textHeight-10), 0)
textSurface = font.render(words, True, (255,255,255), (102,204,255))
screen.blit(textSurface, textRect)
def shakeDetect(pin):
global frame, coverDrop, throwCount, disInc, santaDistance
global santaHeight
frame = frame + 1
if frame >= 9: frame = 0 # frame of animation
disInc = 2
if santaL_R : disInc = -2
if drop :
santaHeight -= 2 # go up
else :
santaDistance += disInc
def throw():
global santaHeight, drop, coverDrop, throwCount
if drop : return
else:
if santaHeight >= rigel : # boost up if too low
santaHeight = 30.0
else :
drop = True
if drop:
if santaL_R :
coverDrop = [throwCount, 100 + santaDistance, int(santaHeight)]
else :
coverDrop = [throwCount, santaDistance, int(santaHeight)]
throwCount += 1 # number of covers thrown for next time
gameSound[0].play() # throw
def waitNextFrame():
autoTime = time.time()
while time.time() - autoTime < 0.04:
checkForEvent()
def terminate(): # close down the program
print("Closing down")
io.remove_event_detect(2)
pygame.mixer.quit()
pygame.quit() # close pygame
os._exit(1)
def checkForEvent(): # see if we need to quit
global restart
event = pygame.event.poll()
if event.type == pygame.QUIT :
terminate()
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_ESCAPE :
terminate()
if event.key == pygame.K_SPACE :
throw()
if event.key == pygame.K_RETURN :
restart = True
print("New Run")
if event.type == pygame.MOUSEBUTTONDOWN :
pass
#print(pygame.mouse.get_pos())
#os.system("scrot")
# Main program logic:
if __name__ == '__main__':
main()
| gpl-2.0 | 4,538,330,641,443,869,000 | 36.739496 | 143 | 0.5943 | false |
kubeflow/kfp-tekton | sdk/python/tests/compiler/testdata/withparam_global.py | 1 | 1802 | # Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
from kfp_tekton.compiler import TektonCompiler
class Coder:
def empty(self):
return ""
TektonCompiler._get_unique_id_code = Coder.empty
@dsl.pipeline(name='withparam-global')
def pipeline(loopidy_doop: list = [3, 5, 7, 9]):
op0 = dsl.ContainerOp(
name="my-out-cop0",
image='python:alpine3.6',
command=["sh", "-c"],
arguments=[
'python -c "import json; import sys; json.dump([i for i in range(20, 31)], open(\'/tmp/out.json\', \'w\'))"'],
file_outputs={'out': '/tmp/out.json'},
)
with dsl.ParallelFor(loopidy_doop) as item:
op1 = dsl.ContainerOp(
name="my-in-cop1",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo no output global op1, item: %s" % item],
).after(op0)
op_out = dsl.ContainerOp(
name="my-out-cop2",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo no output global op2, outp: %s" % op0.output],
).after(op1)
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(pipeline, __file__.replace('.py', '.yaml'))
| apache-2.0 | -853,971,105,150,103,200 | 31.178571 | 122 | 0.629301 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/available_endpoint_services_operations.py | 1 | 4491 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class AvailableEndpointServicesOperations(object):
"""AvailableEndpointServicesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-08-01"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""List what values of endpoint services are available for use.
:param location: The location to check available endpoint services.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of EndpointServiceResult
:rtype:
~azure.mgmt.network.v2017_08_01.models.EndpointServiceResultPaged[~azure.mgmt.network.v2017_08_01.models.EndpointServiceResult]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.EndpointServiceResultPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.EndpointServiceResultPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/virtualNetworkAvailableEndpointServices'}
| mit | 6,466,520,176,057,215,000 | 41.367925 | 151 | 0.625696 | false |
Scitator/rl-course-experiments | GEN/genetic_gym.py | 1 | 5386 | #!/usr/bin/python
import gym
from gym import wrappers
import argparse
import numpy as np
import random
from tqdm import trange
def get_random_policy(env):
"""
Build a numpy array representing agent policy.
This array must have one element per each of 16 environment states.
Element must be an integer from 0 to 3, representing action
to take from that state.
"""
return np.random.randint(0, int(env.action_space.n), int(env.observation_space.n))
def sample_reward(env, policy, t_max=100):
"""
Interact with an environment, return sum of all rewards.
If game doesn't end on t_max (e.g. agent walks into a wall),
force end the game and return whatever reward you got so far.
Tip: see signature of env.step(...) method above.
"""
s = env.reset()
total_reward = 0
for _ in range(t_max):
action = policy[s]
s, reward, done, info = env.step(action)
total_reward += reward
if done:
break
return total_reward
def evaluate(sample_func, env, policy, n_times=100):
"""Run several evaluations and average the score the policy gets."""
rewards = [sample_func(env, policy) for _ in range(n_times)]
return float(np.mean(rewards))
def crossover(env, policy1, policy2, p=0.5, prioritize_func=None):
"""
for each state, with probability p take action from policy1, else policy2
"""
if prioritize_func is not None:
p = prioritize_func(env, policy1, policy2, p)
return np.choose(
(np.random.random_sample(policy1.shape[0]) <= p).astype(int), [policy1, policy2])
def mutation(env, policy, p=0.1):
"""
for each state, with probability p replace action with random action
Tip: mutation can be written as crossover with random policy
"""
return crossover(env, get_random_policy(env), policy, p)
def run(env, n_episodes, max_steps,
pool_size, n_crossovers, n_mutations,
seed=42, verbose=False, api_key=None):
random.seed(seed)
np.random.seed(seed)
env_name = env
env = gym.make(env).env
env.reset()
if api_key is not None:
env = gym.wrappers.Monitor(env, "/tmp/" + env_name, force=True)
if verbose:
print("initializing...")
pool = [get_random_policy(env) for _ in range(pool_size)]
rewards = np.zeros(n_episodes)
tr = trange(
n_episodes,
desc="best score: {:.4}".format(0.0),
leave=True)
def sample_func(env, policy):
return sample_reward(
env, policy, t_max=max_steps if api_key is None else int(1e10))
def prioritize_func(env, policy1, policy2, p):
return min(
p * evaluate(sample_func, env, policy1) / (evaluate(sample_func, env, policy2) + 0.001),
1.0)
for i_epoch in tr:
crossovered = [
crossover(env, random.choice(pool), random.choice(pool),
prioritize_func=prioritize_func)
for _ in range(n_crossovers)]
mutated = [mutation(env, random.choice(pool)) for _ in range(n_mutations)]
assert type(crossovered) == type(mutated) == list
# add new policies to the pool
pool = pool + crossovered + mutated
pool_scores = list(map(lambda x: evaluate(sample_func, env, x), pool))
# select pool_size best policies
selected_indices = np.argsort(pool_scores)[-pool_size:]
pool = [pool[i] for i in selected_indices]
pool_scores = [pool_scores[i] for i in selected_indices]
# print the best policy so far (last in ascending score order)
tr.set_description("best score: {:.4}".format(pool_scores[-1]))
rewards[i_epoch] = pool_scores[-1]
print("Avg rewards over {} episodes: {:.4f} +/-{:.4f}".format(
n_episodes, np.mean(rewards), np.std(rewards)))
if api_key is not None:
env.close()
gym.upload("/tmp/" + env_name, api_key=api_key)
def _parse_args():
parser = argparse.ArgumentParser(description='Policy iteration example')
parser.add_argument(
'--env',
type=str,
default='FrozenLake8x8-v0',
help='The environment to use')
parser.add_argument(
'--num_episodes',
type=int,
default=200,
help='Number of episodes')
parser.add_argument(
'--max_steps',
type=int,
default=200,
help='Max number per episode')
parser.add_argument(
'--pool_size',
type=int,
default=200,
help='Population size')
parser.add_argument(
'--n_crossovers',
type=int,
default=100,
help='Number of crossovers per episode')
parser.add_argument(
'--n_mutations',
type=int,
default=100,
help='Number of mutations per episode')
parser.add_argument(
'--seed',
type=int,
default=42)
parser.add_argument(
'--verbose',
action='store_true',
default=False)
parser.add_argument(
'--api_key',
type=str,
default=None)
args, _ = parser.parse_known_args()
return args
def main():
args = _parse_args()
run(args.env, args.num_episodes, args.max_steps,
args.pool_size, args.n_crossovers, args.n_mutations,
args.seed, args.verbose, args.api_key)
if __name__ == '__main__':
main()
| mit | 801,298,826,288,472,000 | 28.431694 | 100 | 0.604716 | false |
Adrianacmy/Classic-Interesting-CS-Mini-Programs | old/reverse_dict.py | 1 | 2014 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Sun May 15
@author: Adrianacmy
Create a function reverse that takes in a dictionary and reverses it, such that
all of the values become keys and all of the keys become values. Be careful: we
do not wish to lose any information. Consider what to do if the original
dictionary has lists of values for a particular key, or has duplicate values
for some keys.
'''
# def format_dic_value(dict):
# '''format the single elements value list if it is necessary'''
# nw_dict = {}
# for k, v in dict.items():
# if len(v) == 1 and type(v) == list:
# nw_dict[k] = ''.join(v)
# else:
# nw_dict[k] = v
# return nw_dict
def convert_to_simple_list(lst, nw_list=[]):
'''
Convert a muti-dimentinal list to one dimention list.
lst: any list
nw_list: one dimentiona list, could start as empty
return: a one dimention list
'''
for a in lst:
if type(a) == list:
convert_to_simple_list(a)
else:
nw_list.append(a)
return nw_list
# lst = ['a', 'b', 'c', [1,2,3], 'abc']
# print(convert_to_simple_list(lst))
def add_dic_val(dic, k, v):
'''
add elements or values to a dictionary.
dic: an empty dictionary
k: a key
v: a value
'''
dic[k] = dic.get(k, [])
if not v in dic[k]:
dic[k].append(v)
def reverse_dict(d):
'''reverse keys and values in a dictionary'''
r = {} #reversed dictionary
for k, v in d.items():
nw_lst = []
if type(v) == list:
value_list = convert_to_simple_list(v, nw_lst)
# if value_list:
for val in value_list:
add_dic_val(r, val, k)
else:
add_dic_val(r, v, k)
return r
def main():
d = {1: 'a', 4: ['abc', 'egf'], 5: '',(1, 6): 'abc', 2:[1, 2, 3, [1, 2]], 8: ['', 2]}
print(reverse_dict(d))
if __name__ == "__main__":
main()
| mit | -2,144,025,257,464,626,000 | 22.149425 | 89 | 0.541708 | false |
cloudbau/nova | nova/tests/conductor/test_conductor.py | 1 | 81806 | # Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import mox
from nova.api.ec2 import ec2utils
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.compute import test_compute
from nova.tests import fake_instance
from nova.tests import fake_instance_actions
from nova.tests import fake_notifier
from nova.tests.objects import test_migration
from nova import utils
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
migration_p = jsonutils.to_primitive(migration)
migration = self.conductor.migration_update(self.context, migration_p,
'finished')
self.assertEqual(migration['status'], 'finished')
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'])
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertTrue(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertFalse(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get(self):
aggregate_ref = self._setup_aggregate_with_host()
aggregate = self.conductor.aggregate_get(self.context,
aggregate_ref['id'])
self.assertEqual(jsonutils.to_primitive(aggregate_ref), aggregate)
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get_by_host(self):
self._setup_aggregate_with_host()
aggregates = self.conductor.aggregate_get_by_host(self.context, 'bar')
self.assertEqual(aggregates[0]['availability_zone'], 'foo')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args)
self.assertEqual(result, 'foo')
def test_security_group_get_by_instance(self):
fake_inst = {'uuid': 'fake-instance'}
self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
db.security_group_get_by_instance(
self.context, fake_inst['uuid']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_get_by_instance(self.context,
fake_inst)
self.assertEqual(result, 'it worked')
def test_security_group_rule_get_by_security_group(self):
fake_secgroup = {'id': 'fake-secgroup'}
self.mox.StubOutWithMock(db,
'security_group_rule_get_by_security_group')
db.security_group_rule_get_by_security_group(
self.context, fake_secgroup['id']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_rule_get_by_security_group(
self.context, fake_secgroup)
self.assertEqual(result, 'it worked')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_destroy(self.context, {'uuid': 'fake-uuid'})
def test_instance_info_cache_delete(self):
self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
db.instance_info_cache_delete(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_info_cache_delete(self.context,
{'uuid': 'fake-uuid'})
def test_flavor_get(self):
self.mox.StubOutWithMock(db, 'flavor_get')
db.flavor_get(self.context, 'fake-id').AndReturn('fake-type')
self.mox.ReplayAll()
result = self.conductor.instance_type_get(self.context, 'fake-id')
self.assertEqual(result, 'fake-type')
def test_vol_get_usage_by_time(self):
self.mox.StubOutWithMock(db, 'vol_get_usage_by_time')
db.vol_get_usage_by_time(self.context, 'fake-time').AndReturn(
'fake-usage')
self.mox.ReplayAll()
result = self.conductor.vol_get_usage_by_time(self.context,
'fake-time')
self.assertEqual(result, 'fake-usage')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst,
'fake-update-time', False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], 'fake-values',
False).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
'fake-values', False)
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertEqual(result, None)
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host')
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
def test_notify_usage_exists(self):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = {
'system_metadata': [],
}
self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
notifications.bandwidth_usage(instance, 'start', True).AndReturn(
'bw_usage')
notifier = self.conductor_manager.notifier
compute_utils.notify_about_instance_usage(notifier,
self.context, instance,
'exists',
system_metadata={},
extra_usage_info=info)
self.mox.ReplayAll()
self.conductor.notify_usage_exists(self.context, instance,
system_metadata={},
extra_usage_info=dict(extra='info'))
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_quota_commit(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
quota.QUOTAS.commit(self.context, 'reservations', project_id=None,
user_id=None)
quota.QUOTAS.commit(self.context, 'reservations', project_id='proj',
user_id='user')
self.mox.ReplayAll()
self.conductor.quota_commit(self.context, 'reservations')
self.conductor.quota_commit(self.context, 'reservations', 'proj',
'user')
def test_quota_rollback(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.rollback(self.context, 'reservations', project_id=None,
user_id=None)
quota.QUOTAS.rollback(self.context, 'reservations', project_id='proj',
user_id='user')
self.mox.ReplayAll()
self.conductor.quota_rollback(self.context, 'reservations')
self.conductor.quota_rollback(self.context, 'reservations', 'proj',
'user')
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_info_cache_update(self):
fake_values = {'key1': 'val1', 'key2': 'val2'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_info_cache_update(self.context, 'fake-uuid',
fake_values)
self.mox.ReplayAll()
self.conductor.instance_info_cache_update(self.context,
fake_inst,
fake_values)
def test_migration_get(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
self.assertEqual(jsonutils.to_primitive(migration),
self.conductor.migration_get(self.context,
migration['id']))
def test_migration_get_unconfirmed_by_dest_compute(self):
self.mox.StubOutWithMock(db,
'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
self.mox.ReplayAll()
self.conductor.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
def test_compute_confirm_resize(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
self.conductor_manager.compute_api.confirm_resize(
self.context, 'instance', migration='migration')
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, 'instance',
'migration')
def test_migration_create(self):
inst = {'uuid': 'fake-uuid',
'host': 'fake-host',
'node': 'fake-node'}
self.mox.StubOutWithMock(db, 'migration_create')
db.migration_create(self.context.elevated(),
{'instance_uuid': inst['uuid'],
'source_compute': inst['host'],
'source_node': inst['node'],
'fake-key': 'fake-value'}).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.migration_create(self.context, inst,
{'fake-key': 'fake-value'})
self.assertEqual(result, 'result')
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id', 'device_name': 'foo'}
fake_bdm2 = {'id': 'fake-id', 'device_name': 'foo2'}
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context, fake_bdm2,
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context,
fake_bdm2,
create=False)
db.block_device_mapping_update_or_create(
self.context, fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context,
fake_bdm2,
create=None)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_bdm2 = {'id': 'fake-bdm-2',
'instance_uuid': 'fake-uuid2',
'device_name': '',
'volume_id': 'fake-vol-id2'}
fake_inst = {'uuid': 'fake-uuid'}
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
self.mox.StubOutWithMock(cells_rpcapi, 'bdm_destroy_at_top')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
cells_rpcapi.bdm_destroy_at_top(self.context,
fake_bdm['instance_uuid'],
device_name=fake_bdm['device_name'])
db.block_device_mapping_destroy(self.context, 'fake-bdm-2')
cells_rpcapi.bdm_destroy_at_top(self.context,
fake_bdm2['instance_uuid'],
volume_id=fake_bdm2['volume_id'])
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
cells_rpcapi.bdm_destroy_at_top(self.context, fake_inst['uuid'],
device_name='fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
cells_rpcapi.bdm_destroy_at_top(self.context, fake_inst['uuid'],
volume_id='fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
[fake_bdm,
fake_bdm2])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host')
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node')
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(rpc_common.ClientException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def test_compute_confirm_resize_with_objects(self):
# use an instance object rather than a dict
instance = self._create_fake_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance)
migration = test_migration.fake_db_migration()
mig_obj = migration_obj.Migration._from_db_object(
self.context.elevated(), migration_obj.Migration(),
migration)
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
self.conductor_manager.compute_api.confirm_resize(
self.context, inst_obj, migration=mig_obj)
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, inst_obj,
mig_obj)
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(rpc_common.ClientException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(rpc_common.ClientException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def test_aggregate_metadata_add(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
metadata = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_add(
mox.IgnoreArg(), aggregate['id'], metadata, False).AndReturn(
metadata)
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_add(self.context,
aggregate,
metadata)
self.assertEqual(result, metadata)
def test_aggregate_metadata_delete(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
db.aggregate_metadata_delete(mox.IgnoreArg(), aggregate['id'], 'fake')
self.mox.ReplayAll()
self.conductor.aggregate_metadata_delete(self.context, aggregate,
'fake')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, fake_bdm)
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
db.block_device_mapping_update_or_create(self.context, fake_bdm)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
bdms=[fake_bdm])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, 'fake-bdm')
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context, [fake_bdm])
self.conductor.block_device_mapping_destroy_by_instance_and_device(
self.context, fake_inst, 'fake-device')
self.conductor.block_device_mapping_destroy_by_instance_and_volume(
self.context, fake_inst, 'fake-volume')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host')
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise rpc_common.Timeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_instance_actions.stub_out_action_events(self.stubs)
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(instance_obj.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def test_cold_migrate(self):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor['extra_specs'] = 'extra_specs'
request_spec = {'instance_type': flavor}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'image_ref', mox.IsA(instance_obj.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(instance_obj.Instance)],
instance_type=flavor).AndReturn(request_spec)
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, request_spec, {}).AndReturn(hosts)
filter_properties = {'limits': {}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(instance_obj.Instance),
mox.IsA(dict), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [])
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [])
def test_build_instances(self):
instance_type = flavors.get_default_flavor()
system_metadata = flavors.save_flavor_info({}, instance_type)
# NOTE(alaski): instance_type -> system_metadata -> instance_type
# loses some data (extra_specs). This build process is using
# scheduler/utils:build_request_spec() which extracts flavor from
# system_metadata and will re-query the DB for extra_specs.. so
# we need to test this properly
expected_instance_type = flavors.extract_flavor(
{'system_metadata': system_metadata})
expected_instance_type['extra_specs'] = 'fake-specs'
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'run_instance')
db.flavor_extra_specs_get(
self.context,
instance_type['flavorid']).AndReturn('fake-specs')
self.conductor_manager.scheduler_rpcapi.run_instance(self.context,
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': {'system_metadata': system_metadata,
'uuid': 'fakeuuid'},
'instance_type': expected_instance_type,
'instance_uuids': ['fakeuuid', 'fakeuuid2'],
'block_device_mapping': 'block_device_mapping',
'security_group': 'security_groups',
'num_instances': 2},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks', is_first_time=True,
filter_properties={}, legacy_bdm_in_spec=False)
self.mox.ReplayAll()
self.conductor.build_instances(self.context,
instances=[{'uuid': 'fakeuuid',
'system_metadata': system_metadata},
{'uuid': 'fakeuuid2'}],
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_unshelve_instance_on_host(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager, '_get_image')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager._get_image(self.context,
'fake_image_id').AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host'}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', 'fake_image')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager, '_get_image')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager._get_image(self.context,
'fake_image_id').AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host'}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', None)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, "dummy", None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def test_migrate_server_deals_with_expected_exceptions(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.DestinationHypervisorTooOld()
live_migrate.execute(self.context, mox.IsA(instance_obj.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.DestinationHypervisorTooOld,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_unexpected_exceptions(self):
instance = fake_instance.fake_db_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = IOError()
live_migrate.execute(self.context, mox.IsA(instance_obj.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ERROR},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(IOError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref')
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor.quotas, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
self.conductor.quotas.rollback(self.context, resvs)
self.mox.ReplayAll()
self.conductor._cold_migrate(self.context, inst_obj,
'flavor', filter_props, resvs)
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor.quotas, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
request_spec)
self.conductor.quotas.rollback(self.context, resvs)
self.mox.ReplayAll()
self.conductor._cold_migrate(self.context, inst_obj,
'flavor', filter_props, resvs)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref')
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()))
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(self.conductor.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor.quotas, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_service,
'fake-image_ref', mox.IsA(instance_obj.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_rpcapi.select_destinations(
self.context, request_spec, filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
# context popped
expected_filter_props = dict()
# extra_specs popped
expected_request_spec = dict(instance_type=dict())
exc_info = test.TestingException('something happened')
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], resvs,
request_spec=expected_request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename']).AndRaise(exc_info)
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
'migrate_server',
updates, exc_info,
expected_request_spec)
self.conductor.quotas.rollback(self.context, resvs)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, resvs)
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
| apache-2.0 | -7,662,453,531,337,650,000 | 45.218079 | 79 | 0.558688 | false |
tkaitchuck/nupic | py/nupic/research/fdrutilities.py | 1 | 62245 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from numpy import *
import random
import copy
import sys
import itertools
random.seed(42)
numpy.random.seed(42)
from nupic.bindings.math import (SM32, SparseBinaryMatrix)
###############################################################################
def setRandomSeed(seed):
""" Set the random seeds. Helpful to make unit tests repeatable"""
random.seed(seed)
numpy.random.seed(seed)
###############################################################################
def addNoise(input, noise=0.1, doForeground=True, doBackground=True):
"""
Add noise to the given input.
Parameters:
-----------------------------------------------
input: the input to add noise to
noise: how much noise to add
doForeground: If true, turn off some of the 1 bits in the input
doBackground: If true, turn on some of the 0 bits in the input
"""
if doForeground and doBackground:
return numpy.abs(input - (numpy.random.random(input.shape) < noise))
else:
if doForeground:
return numpy.logical_and(input, numpy.random.random(input.shape) > noise)
if doBackground:
return numpy.logical_or(input, numpy.random.random(input.shape) < noise)
return input
################################################################################
def generateCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array([1.0] * activity, dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length),
activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
# This is the right code to use, it's faster, but it derails the unit
# testing of the pooling for now.
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0
###############################################################################
def generateVectors(numVectors=100, length=500, activity=50):
"""
Generate a list of random sparse distributed vectors. This is used to generate
training vectors to the spatial or temporal learner and to compare the predicted
output against.
It generates a list of 'numVectors' elements, each element has length 'length'
and has a total of 'activity' bits on.
Parameters:
-----------------------------------------------
numVectors: the number of vectors to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
vectors = []
coinc = numpy.zeros(length, dtype='int32')
indexList = range(length)
for i in xrange(numVectors):
coinc[:] = 0
coinc[random.sample(indexList, activity)] = 1
vectors.append(coinc.copy())
return vectors
###############################################################################
def generateSimpleSequences(nCoinc=10, seqLength=[5,6,7], nSeq=100):
"""
Generate a set of simple sequences. The elements of the sequences will be
integers from 0 to 'nCoinc'-1. The length of each sequence will be
randomly chosen from the 'seqLength' list.
Parameters:
-----------------------------------------------
nCoinc: the number of elements available to use in the sequences
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSeq: The number of sequences to generate
retval: a list of sequences. Each sequence is itself a list
containing the coincidence indices for that sequence.
"""
coincList = range(nCoinc)
seqList = []
for i in xrange(nSeq):
if max(seqLength) <= nCoinc:
seqList.append(random.sample(coincList, random.choice(seqLength)))
else:
len = random.choice(seqLength)
seq = []
for x in xrange(len):
seq.append(random.choice(coincList))
seqList.append(seq)
return seqList
###############################################################################
def generateHubSequences(nCoinc=10, hubs = [2,6], seqLength=[5,6,7], nSeq=100):
"""
Generate a set of hub sequences. These are sequences which contain a hub
element in the middle. The elements of the sequences will be integers
from 0 to 'nCoinc'-1. The hub elements will only appear in the middle of
each sequence. The length of each sequence will be randomly chosen from the
'seqLength' list.
Parameters:
-----------------------------------------------
nCoinc: the number of elements available to use in the sequences
hubs: which of the elements will be used as hubs.
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSeq: The number of sequences to generate
retval: a list of sequences. Each sequence is itself a list
containing the coincidence indices for that sequence.
"""
coincList = range(nCoinc)
for hub in hubs:
coincList.remove(hub)
seqList = []
for i in xrange(nSeq):
length = random.choice(seqLength)-1
seq = random.sample(coincList,length)
seq.insert(length//2, random.choice(hubs))
seqList.append(seq)
return seqList
def genTestSeqsForLookback(nPatterns=10, patternLen=500, patternActivity=50,
seqLength=[5,6,7], nSequences=50):
"""
Generate two sets of sequences. The first set of sequences is used to train
the sequence learner till it fills up capacity. The second set is then used
to further train the system to test its generalization capability using the
one step look back idea. The second set of sequences are generated by modifying
the first set
Parameters:
-----------------------------------------------
nPatterns: the number of patterns to use in the sequences.
patternLen: The number of elements in each pattern
patternActivity: The number of elements that should be active in
each pattern
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSequences: The number of simple sequences in the first set
retval: (seqList1, seqList2, patterns)
seqList1, seqList2: a list of sequences. Each sequence is itself a list
containing the input pattern indices for that sequence.
patterns: the input patterns used in the seqList.
"""
# Create the input patterns
patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,
activity=patternActivity)
#patterns = generateSimpleCoincMatrix(nCoinc=nPatterns, length=patternLen,
# activity=patternActivity)
similarity = []
for i in xrange(nPatterns):
similarity.append(patterns.rightVecProd(patterns.getRow(i)))
similarity = numpy.array(similarity, dtype='int32')
print similarity
# Create the raw sequences
seqList1 = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,
nSeq=nSequences)
#The second set of sequences are obtained by replacing just the first
#element in each sequence.
seqList2 = copy.deepcopy(seqList1)
for i in range(0,len(seqList2)):
seqList2[i][0] = random.randint(0,nPatterns-1)
#return ([range(6),[5,4,1,3,4]],[[7,1,2,3,4,5]],patterns)
return (seqList1, seqList2, patterns)
################################################################################
def generateSimpleCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a non overlapping coincidence matrix. This is used to generate random
inputs to the temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
assert nCoinc*activity<=length, "can't generate non-overlapping coincidences"
coincMatrix = SM32(0, length)
coinc = numpy.zeros(length, dtype='int32')
for i in xrange(nCoinc):
coinc[:] = 0
coinc[i*activity:(i+1)*activity] = 1
coincMatrix.addRow(coinc)
return coincMatrix
###############################################################################
def generateSequences(nPatterns=10, patternLen=500, patternActivity=50,
hubs=[2,6], seqLength=[5,6,7],
nSimpleSequences=50, nHubSequences=50):
"""
Generate a set of simple and hub sequences. A simple sequence contains
a randomly chosen set of elements from 0 to 'nCoinc-1'. A hub sequence
always contains a hub element in the middle of it.
Parameters:
-----------------------------------------------
nPatterns: the number of patterns to use in the sequences.
patternLen: The number of elements in each pattern
patternActivity: The number of elements that should be active in
each pattern
hubs: which of the elements will be used as hubs.
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSimpleSequences: The number of simple sequences to generate
nHubSequences: The number of hub sequences to generate
retval: (seqList, patterns)
seqList: a list of sequences. Each sequence is itself a list
containing the input pattern indices for that sequence.
patterns: the input patterns used in the seqList.
"""
# Create the input patterns
patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,
activity=patternActivity)
# Create the raw sequences
seqList = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,
nSeq=nSimpleSequences) + \
generateHubSequences(nCoinc=nPatterns, hubs=hubs, seqLength=seqLength,
nSeq=nHubSequences)
# Return results
return (seqList, patterns)
###############################################################################
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7],
nL1SimpleSequences=50, nL1HubSequences=50,
l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0,
patternLen=500, patternActivity=50):
"""
Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal pooler pair. The average on-time
of the outputs from the simulated TP is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal pooler and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TP. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList.
"""
# First, generate the L1 sequences
l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength,
nSeq=nL1SimpleSequences) + \
generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs,
seqLength=l1SeqLength, nSeq=nL1HubSequences)
# Generate the L2 SP output from those
spOutput = generateSlowSPOutput(seqListBelow = l1SeqList,
poolingTimeBelow=l1Pooling, outputWidth=patternLen,
activity=patternActivity, perfectStability=perfectStability,
spHysteresisFactor=spHysteresisFactor)
# Map the spOutput patterns into indices into a pattern matrix which we
# generate now.
outSeq = None
outSeqList = []
outPatterns = SM32(0, patternLen)
for pattern in spOutput:
# If we have a reset vector start a new sequence
if pattern.sum() == 0:
if outSeq is not None:
outSeqList.append(outSeq)
outSeq = []
continue
# See if this vector matches a pattern we've already seen before
patternIdx = None
if outPatterns.nRows() > 0:
# Find most matching 1's.
matches = outPatterns.rightVecSumAtNZ(pattern)
outCoinc = matches.argmax().astype('uint32')
# See if its number of 1's is the same in the pattern and in the
# coincidence row. If so, it is an exact match
numOnes = pattern.sum()
if matches[outCoinc] == numOnes \
and outPatterns.getRow(int(outCoinc)).sum() == numOnes:
patternIdx = outCoinc
# If no match, add this pattern to our matrix
if patternIdx is None:
outPatterns.addRow(pattern)
patternIdx = outPatterns.nRows() - 1
# Store the pattern index into the sequence
outSeq.append(patternIdx)
# Put in last finished sequence
if outSeq is not None:
outSeqList.append(outSeq)
# Return with the seqList and patterns matrix
return (outSeqList, outPatterns)
###############################################################################
def vectorsFromSeqList(seqList, patternMatrix):
"""
Convert a list of sequences of pattern indices, and a pattern lookup table
into a an array of patterns
Parameters:
-----------------------------------------------
seq: the sequence, given as indices into the patternMatrix
patternMatrix: a SparseMatrix contaning the possible patterns used in
the sequence.
"""
totalLen = 0
for seq in seqList:
totalLen += len(seq)
vectors = numpy.zeros((totalLen, patternMatrix.shape[1]), dtype='bool')
vecOffset = 0
for seq in seqList:
seq = numpy.array(seq, dtype='uint32')
for idx,coinc in enumerate(seq):
vectors[vecOffset] = patternMatrix.getRow(int(coinc))
vecOffset += 1
return vectors
###############################################################################
# The following three functions are used in tests to compare two different
# TP instances.
def sameTPParams(tp1, tp2):
"""Given two TP instances, see if any parameters are different."""
result = True
for param in ["numberOfCols", "cellsPerColumn", "initialPerm", "connectedPerm",
"minThreshold", "newSynapseCount", "permanenceInc", "permanenceDec",
"permanenceMax", "globalDecay", "activationThreshold",
"doPooling", "segUpdateValidDuration", "seed",
"burnIn", "pamLength", "maxAge"]:
if getattr(tp1, param) != getattr(tp2,param):
print param,"is different"
print getattr(tp1, param), "vs", getattr(tp2,param)
result = False
return result
def sameSynapse(syn, synapses):
"""Given a synapse and a list of synapses, check whether this synapse
exist in the list. A synapse is represented as [col, cell, permanence].
A synapse matches if col and cell are identical and the permanence value is
within 0.001."""
for s in synapses:
if (s[0]==syn[0]) and (s[1]==syn[1]) and (abs(s[2]-syn[2]) <= 0.001):
return True
return False
def sameSegment(seg1, seg2):
"""Return True if seg1 and seg2 are identical, ignoring order of synapses"""
result = True
# check sequence segment, total activations etc. In case any are floats,
# check that they are within 0.001.
for field in [1, 2, 3, 4, 5, 6]:
if abs(seg1[0][field] - seg2[0][field]) > 0.001:
result = False
# Compare number of synapses
if len(seg1[1:]) != len(seg2[1:]):
result = False
# Now compare synapses, ignoring order of synapses
if seg2[1:][2] <= 0:
print "A synapse with zero permanence encountered"
result = False
if result == True:
for syn in seg1[1:]:
if syn[2] <= 0:
print "A synapse with zero permanence encountered"
result = False
res = sameSynapse(syn, seg2[1:])
if res == False:
result = False
return result
def tpDiff(tp1, tp2, verbosity = 0, relaxSegmentTests =True):
"""
Given two TP instances, list the difference between them and returns False
if there is a difference. This function checks the major parameters. If this
passes (and checkLearn is true) it checks the number of segments on
each cell. If this passes, checks each synapse on each segment.
When comparing C++ and Py, the segments are usually in different orders in the
cells. tpDiff ignores segment order when comparing TP's.
"""
# First check basic parameters. If we fail here, don't continue
if sameTPParams(tp1, tp2) == False:
print "Two TP's have different parameters"
return False
result = True
# Compare states at t first, they usually diverge before the structure of the
# cells starts diverging
if (tp1.activeState['t'] != tp2.activeState['t']).any():
print 'Active states diverge', numpy.where(tp1.activeState['t'] != tp2.activeState['t'])
result = False
if (tp1.predictedState['t'] - tp2.predictedState['t']).any():
print 'Predicted states diverge', numpy.where(tp1.predictedState['t'] != tp2.predictedState['t'])
result = False
# TODO: check confidence at T (confT)
# Now check some high level learned parameters.
if tp1.getNumSegments() != tp2.getNumSegments():
print "Number of segments are different", tp1.getNumSegments(), tp2.getNumSegments()
result = False
if tp1.getNumSynapses() != tp2.getNumSynapses():
print "Number of synapses are different", tp1.getNumSynapses(), tp2.getNumSynapses()
tp1.printCells()
tp2.printCells()
result = False
# Check that each cell has the same number of segments and synapses
for c in xrange(tp1.numberOfCols):
for i in xrange(tp2.cellsPerColumn):
if tp1.getNumSegmentsInCell(c, i) != tp2.getNumSegmentsInCell(c, i):
print "Num segments different in cell:",c,i,
print tp1.getNumSegmentsInCell(c, i), tp2.getNumSegmentsInCell(c, i)
result = False
# If the above tests pass, then check each segment and report differences
# Note that segments in tp1 can be in a different order than tp2. Here we
# make sure that, for each segment in tp1, there is an identical segment
# in tp2.
if result == True and not relaxSegmentTests:
for c in xrange(tp1.numberOfCols):
for i in xrange(tp2.cellsPerColumn):
nSegs = tp1.getNumSegmentsInCell(c, i)
for segIdx in xrange(nSegs):
tp1seg = tp1.getSegmentOnCell(c, i, segIdx)
# Loop through all segments in tp2seg and see if any of them match tp1seg
res = False
for tp2segIdx in xrange(nSegs):
tp2seg = tp2.getSegmentOnCell(c, i, tp2segIdx)
if sameSegment(tp1seg, tp2seg) == True:
res = True
break
if res == False:
print "\nSegments are different for cell:",c,i
if verbosity >= 1:
print "C++"
tp1.printCell(c,i)
print "Py"
tp2.printCell(c,i)
result = False
if result == True and (verbosity > 1):
print "TP's match"
return result
def tpDiff2(tp1, tp2, verbosity = 0, relaxSegmentTests =True,
checkLearn = True, checkStates = True):
"""
Given two TP instances, list the difference between them and returns False
if there is a difference. This function checks the major parameters. If this
passes (and checkLearn is true) it checks the number of segments on each cell.
If this passes, checks each synapse on each segment.
When comparing C++ and Py, the segments are usually in different orders in the
cells. tpDiff ignores segment order when comparing TP's.
If checkLearn is True, will check learn states as well as all the segments
If checkStates is True, will check the various state arrays
"""
# First check basic parameters. If we fail here, don't continue
if sameTPParams(tp1, tp2) == False:
print "Two TP's have different parameters"
return False
tp1Label = "<tp_1 (%s)>" % tp1.__class__.__name__
tp2Label = "<tp_2 (%s)>" % tp2.__class__.__name__
result = True
if checkStates:
# Compare states at t first, they usually diverge before the structure of the
# cells starts diverging
if (tp1.infActiveState['t'] != tp2.infActiveState['t']).any():
print 'Active states diverged', numpy.where(tp1.infActiveState['t'] != tp2.infActiveState['t'])
result = False
if (tp1.infPredictedState['t'] - tp2.infPredictedState['t']).any():
print 'Predicted states diverged', numpy.where(tp1.infPredictedState['t'] != tp2.infPredictedState['t'])
result = False
if checkLearn and (tp1.lrnActiveState['t'] - tp2.lrnActiveState['t']).any():
print 'lrnActiveState[t] diverged', numpy.where(tp1.lrnActiveState['t'] != tp2.lrnActiveState['t'])
result = False
if checkLearn and (tp1.lrnPredictedState['t'] - tp2.lrnPredictedState['t']).any():
print 'lrnPredictedState[t] diverged', numpy.where(tp1.lrnPredictedState['t'] != tp2.lrnPredictedState['t'])
result = False
if checkLearn and abs(tp1.getAvgLearnedSeqLength() - tp2.getAvgLearnedSeqLength()) > 0.01:
print "Average learned sequence lengths differ: ",
print tp1.getAvgLearnedSeqLength()," vs ", tp2.getAvgLearnedSeqLength()
result = False
# TODO: check confidence at T (confT)
# Now check some high level learned parameters.
if tp1.getNumSegments() != tp2.getNumSegments():
print "Number of segments are different", tp1.getNumSegments(), tp2.getNumSegments()
result = False
if tp1.getNumSynapses() != tp2.getNumSynapses():
print "Number of synapses are different", tp1.getNumSynapses(), tp2.getNumSynapses()
if verbosity >= 3:
print "%s: " % tp1Label,
tp1.printCells()
print "\n%s : " % tp2Label,
tp2.printCells()
#result = False
# Check that each cell has the same number of segments and synapses
for c in xrange(tp1.numberOfCols):
for i in xrange(tp2.cellsPerColumn):
if tp1.getNumSegmentsInCell(c, i) != tp2.getNumSegmentsInCell(c, i):
print "Num segments different in cell:",c,i,
print tp1.getNumSegmentsInCell(c, i), tp2.getNumSegmentsInCell(c, i)
result = False
# If the above tests pass, then check each segment and report differences
# Note that segments in tp1 can be in a different order than tp2. Here we
# make sure that, for each segment in tp1, there is an identical segment
# in tp2.
if result == True and not relaxSegmentTests and checkLearn:
for c in xrange(tp1.numberOfCols):
for i in xrange(tp2.cellsPerColumn):
nSegs = tp1.getNumSegmentsInCell(c, i)
for segIdx in xrange(nSegs):
tp1seg = tp1.getSegmentOnCell(c, i, segIdx)
# Loop through all segments in tp2seg and see if any of them match tp1seg
res = False
for tp2segIdx in xrange(nSegs):
tp2seg = tp2.getSegmentOnCell(c, i, tp2segIdx)
if sameSegment(tp1seg, tp2seg) == True:
res = True
break
if res == False:
print "\nSegments are different for cell:",c,i
result = False
if verbosity >= 0:
print "%s : " % tp1Label,
tp1.printCell(c,i)
print "\n%s : " % tp2Label,
tp2.printCell(c,i)
if result == True and (verbosity > 1):
print "TP's match"
return result
###############################################################################
def spDiff(SP1,SP2):
"""
Function that compares two spatial pooler instances. Compares the
static variables between the two poolers to make sure that they are equivalent.
Parameters
-----------------------------------------
SP1 first spatial pooler to be compared
SP2 second spatial pooler to be compared
To establish equality, this function does the following:
1.Compares the connected synapse matrices for each coincidence
2.Compare the potential synapse matrices for each coincidence
3.Compare the permanence matrices for each coincidence
4.Compare the firing boosts between the two poolers.
5.Compare the duty cycles before and after inhibition for both poolers
"""
if(len(SP1._masterConnectedM)!=len(SP2._masterConnectedM)):
print "Connected synapse matrices are different sizes"
return False
if(len(SP1._masterPotentialM)!=len(SP2._masterPotentialM)):
print "Potential synapse matrices are different sizes"
return False
if(len(SP1._masterPermanenceM)!=len(SP2._masterPermanenceM)):
print "Permanence matrices are different sizes"
return False
#iterate over cells
for i in range(0,len(SP1._masterConnectedM)):
#grab the Coincidence Matrices and compare them
connected1 = SP1._masterConnectedM[i]
connected2 = SP2._masterConnectedM[i]
if(connected1!=connected2):
print "Connected Matrices for cell %d different" % (i)
return False
#grab permanence Matrices and compare them
permanences1 = SP1._masterPermanenceM[i];
permanences2 = SP2._masterPermanenceM[i];
if(permanences1!=permanences2):
print "Permanence Matrices for cell %d different" % (i)
return False
#grab the potential connection Matrices and compare them
potential1 = SP1._masterPotentialM[i];
potential2 = SP2._masterPotentialM[i];
if(potential1!=potential2):
print "Potential Matrices for cell %d different" % (i)
return False
#Check firing boosts
if(not numpy.array_equal(SP1._firingBoostFactors,SP2._firingBoostFactors)):
print "Firing boost factors are different between spatial poolers"
return False
#Check duty cycles after inhibiton
if(not numpy.array_equal(SP1._dutyCycleAfterInh,SP2._dutyCycleAfterInh)):
print "Duty cycles after inhibition are different between spatial poolers"
return False
#Check duty cycles before inhibition
if(not numpy.array_equal(SP1._dutyCycleBeforeInh,SP2._dutyCycleBeforeInh)):
print "Duty cycles before inhibition are different between spatial poolers"
return False
print("Spatial Poolers are equivalent")
return True
###############################################################################
def removeSeqStarts(vectors, resets, numSteps=1):
"""
Convert a list of sequences of pattern indices, and a pattern lookup table
into a an array of patterns
Parameters:
-----------------------------------------------
vectors: the data vectors. Row 0 contains the outputs from time
step 0, row 1 from time step 1, etc.
resets: the reset signal. This is a vector of booleans
the same length as the number of rows in 'vectors'. It
has a 1 where a sequence started and a 0 otherwise. The
first 'numSteps' rows of 'vectors' of each sequence will
not be included in the return result.
numSteps Number of samples to remove from the start of each sequence
retval: copy of vectors, with the first 'numSteps' samples at the
start of each sequence removed.
"""
# Do nothing if numSteps is 0
if numSteps == 0:
return vectors
resetIndices = resets.nonzero()[0]
removeRows = resetIndices
for i in range(numSteps-1):
removeRows = numpy.hstack((removeRows, resetIndices+i+1))
return numpy.delete(vectors, removeRows, axis=0)
###############################################################################
def _accumulateFrequencyCounts(values, freqCounts=None):
"""
Accumulate a list of values 'values' into the frequency counts 'freqCounts',
and return the updated frequency counts
For example, if values contained the following: [1,1,3,5,1,3,5], and the initial
freqCounts was None, then the return value would be:
[0,3,0,2,0,2]
which corresponds to how many of each value we saw in the input, i.e. there
were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's.
If freqCounts is not None, the values will be added to the existing counts and
the length of the frequency Counts will be automatically extended as necessary
Parameters:
-----------------------------------------------
values: The values to accumulate into the frequency counts
freqCounts: Accumulated frequency counts so far, or none
"""
# How big does our freqCounts vector need to be?
values = numpy.array(values)
numEntries = values.max() + 1
if freqCounts is not None:
numEntries = max(numEntries, freqCounts.size)
# Where do we accumulate the results?
if freqCounts is not None:
if freqCounts.size != numEntries:
newCounts = numpy.zeros(numEntries, dtype='int32')
newCounts[0:freqCounts.size] = freqCounts
else:
newCounts = freqCounts
else:
newCounts = numpy.zeros(numEntries, dtype='int32')
# Accumulate the new values
for v in values:
newCounts[v] += 1
return newCounts
###############################################################################
def _listOfOnTimesInVec(vector):
"""
Returns 3 things for a vector:
* the total on time
* the number of runs
* a list of the durations of each run.
Parameters:
-----------------------------------------------
input stream: 11100000001100000000011111100000
return value: (11, 3, [3, 2, 6])
"""
# init counters
durations = []
numOnTimes = 0
totalOnTime = 0
# Find where the nonzeros are
nonzeros = numpy.array(vector).nonzero()[0]
# Nothing to do if vector is empty
if len(nonzeros) == 0:
return (0, 0, [])
# Special case of only 1 on bit
if len(nonzeros) == 1:
return (1, 1, [1])
# Count the consecutive non-zeros
prev = nonzeros[0]
onTime = 1
endIdx = nonzeros[-1]
for idx in nonzeros[1:]:
if idx != prev+1:
totalOnTime += onTime
numOnTimes += 1
durations.append(onTime)
onTime = 1
else:
onTime += 1
prev = idx
# Add in the last one
totalOnTime += onTime
numOnTimes += 1
durations.append(onTime)
return (totalOnTime, numOnTimes, durations)
###############################################################################
def _fillInOnTimes(vector, durations):
"""
Helper function used by averageOnTimePerTimestep. 'durations' is a vector
which must be the same len as vector. For each "on" in vector, it fills in
the corresponding element of duration with the duration of that "on" signal
up until that time
Parameters:
-----------------------------------------------
vector: vector of output values over time
durations: vector same length as 'vector', initialized to 0's.
This is filled in with the durations of each 'on" signal.
Example:
vector: 11100000001100000000011111100000
durations: 12300000001200000000012345600000
"""
# Find where the nonzeros are
nonzeros = numpy.array(vector).nonzero()[0]
# Nothing to do if vector is empty
if len(nonzeros) == 0:
return
# Special case of only 1 on bit
if len(nonzeros) == 1:
durations[nonzeros[0]] = 1
return
# Count the consecutive non-zeros
prev = nonzeros[0]
onTime = 1
onStartIdx = prev
endIdx = nonzeros[-1]
for idx in nonzeros[1:]:
if idx != prev+1:
# Fill in the durations
durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1)
onTime = 1
onStartIdx = idx
else:
onTime += 1
prev = idx
# Fill in the last one
durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1)
###############################################################################
def averageOnTimePerTimestep(vectors, numSamples=None):
"""
Computes the average on-time of the outputs that are on at each time step, and
then averages this over all time steps.
This metric is resiliant to the number of outputs that are on at each time
step. That is, if time step 0 has many more outputs on than time step 100, it
won't skew the results. This is particularly useful when measuring the
average on-time of things like the temporal pooler output where you might
have many columns bursting at the start of a sequence - you don't want those
start of sequence bursts to over-influence the calculated average on-time.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns (scalar average on-time over all time steps,
list containing frequency counts of each encountered on-time)
"""
# Special case given a 1 dimensional vector: it represents a single column
if vectors.ndim == 1:
vectors.shape = (-1,1)
numTimeSteps = len(vectors)
numElements = len(vectors[0])
# How many samples will we look at?
if numSamples is not None:
import pdb; pdb.set_trace() # Test this....
countOn = numpy.random.randint(0, numElements, numSamples)
vectors = vectors[:, countOn]
# Fill in each non-zero of vectors with the on-time that that output was
# on for.
durations = numpy.zeros(vectors.shape, dtype='int32')
for col in xrange(vectors.shape[1]):
_fillInOnTimes(vectors[:,col], durations[:,col])
# Compute the average on time for each time step
sums = vectors.sum(axis=1)
sums.clip(min=1, max=numpy.inf, out=sums)
avgDurations = durations.sum(axis=1, dtype='float64') / sums
avgOnTime = avgDurations.sum() / (avgDurations > 0).sum()
# Generate the frequency counts for each duration
freqCounts = _accumulateFrequencyCounts(avgDurations)
return (avgOnTime, freqCounts)
###############################################################################
def averageOnTime(vectors, numSamples=None):
"""
Returns the average on-time, averaged over all on-time runs.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns: (scalar average on-time of all outputs,
list containing frequency counts of each encountered on-time)
"""
# Special case given a 1 dimensional vector: it represents a single column
if vectors.ndim == 1:
vectors.shape = (-1,1)
numTimeSteps = len(vectors)
numElements = len(vectors[0])
# How many samples will we look at?
if numSamples is None:
numSamples = numElements
countOn = range(numElements)
else:
countOn = numpy.random.randint(0, numElements, numSamples)
# Compute the on-times and accumulate the frequency counts of each on-time
# encountered
sumOfLengths = 0.0
onTimeFreqCounts = None
n = 0
for i in countOn:
(onTime, segments, durations) = _listOfOnTimesInVec(vectors[:,i])
if onTime != 0.0:
sumOfLengths += onTime
n += segments
onTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts)
# Return the average on time of each element that was on.
if n > 0:
return (sumOfLengths/n, onTimeFreqCounts)
else:
return (0.0, onTimeFreqCounts)
###############################################################################
def plotOutputsOverTime(vectors, buVectors=None, title='On-times'):
"""
Generate a figure that shows each output over time. Time goes left to right,
and each output is plotted on a different line, allowing you to see the overlap
in the outputs, when they turn on/off, etc.
Parameters:
------------------------------------------------------------
vectors: the vectors to plot
buVectors: These are normally specified when plotting the pooling
outputs of the temporal pooler over time. The 'buVectors'
are the sequence outputs and the 'vectors' are the
pooling outputs. The buVector (sequence) outputs will be drawn
in a darker color than the vector (pooling) outputs to
distinguish where the cell is outputting due to pooling vs.
sequence memory.
title: title for the plot
avgOnTime: The average on-time measurement. If not supplied,
then it will be calculated from the passed in vectors.
"""
# Produce the plot
import pylab
pylab.ion()
pylab.figure()
imData = vectors.transpose()
if buVectors is not None:
assert(buVectors.shape == vectors.shape)
imData = imData.copy()
imData[buVectors.transpose().astype('bool')] = 2
pylab.imshow(imData, aspect='auto', cmap=pylab.cm.gray_r,
interpolation='nearest')
pylab.title(title)
###############################################################################
def plotHistogram(freqCounts, title='On-Times Histogram', xLabel='On-Time'):
"""
This is usually used to display a histogram of the on-times encountered
in a particular output.
The freqCounts is a vector containg the frequency counts of each on-time
(starting at an on-time of 0 and going to an on-time = len(freqCounts)-1)
The freqCounts are typically generated from the averageOnTimePerTimestep
or averageOnTime methods of this module.
Parameters:
-----------------------------------------------
freqCounts: The frequency counts to plot
title: Title of the plot
"""
import pylab
pylab.ion()
pylab.figure()
pylab.bar(numpy.arange(len(freqCounts)) - 0.5, freqCounts)
pylab.title(title)
pylab.xlabel(xLabel)
###############################################################################
def populationStability(vectors, numSamples=None):
"""
Returns the stability for the population averaged over multiple time steps
Parameters:
-----------------------------------------------
vectors: the vectors for which the stability is calculated
numSamples the number of time steps where stability is counted
At each time step, count the fraction of the active elements which are stable
from the previous step
Average all the fraction
"""
# ----------------------------------------------------------------------
# Calculate the stability
numVectors = len(vectors)
if numSamples is None:
numSamples = numVectors-1
countOn = range(numVectors-1)
else:
countOn = numpy.random.randint(0, numVectors-1, numSamples)
sigmap = 0.0
for i in countOn:
match = checkMatch(vectors[i], vectors[i+1], sparse=False)
# Ignore reset vectors (all 0's)
if match[1] != 0:
sigmap += float(match[0])/match[1]
return sigmap / numSamples
###############################################################################
def percentOutputsStableOverNTimeSteps(vectors, numSamples=None):
"""
Returns the percent of the outputs that remain completely stable over
N time steps.
Parameters:
-----------------------------------------------
vectors: the vectors for which the stability is calculated
numSamples: the number of time steps where stability is counted
For each window of numSamples, count how many outputs are active during
the entire window.
"""
# ----------------------------------------------------------------------
# Calculate the stability
totalSamples = len(vectors)
windowSize = numSamples
# Process each window
numWindows = 0
pctStable = 0
for wStart in range(0, totalSamples-windowSize+1):
# Count how many elements are active for the entire time
data = vectors[wStart:wStart+windowSize]
outputSums = data.sum(axis=0)
stableOutputs = (outputSums == windowSize).sum()
# Accumulated
samplePctStable = float(stableOutputs) / data[0].sum()
print samplePctStable
pctStable += samplePctStable
numWindows += 1
# Return percent average over all possible windows
return float(pctStable) / numWindows
###########################################################################
def computeSaturationLevels(outputs, outputsShape, sparseForm=False):
"""
Compute the saturation for a continuous level. This breaks the level into
multiple regions and computes the saturation level for each region.
Parameters:
--------------------------------------------
outputs: output of the level. If sparseForm is True, this is a list of
the non-zeros. If sparseForm is False, it is the dense
representation
outputsShape: The shape of the outputs of the level (height, width)
retval: (sat, innerSat):
sat: list of the saturation levels of each non-empty
region of the level (each 0 -> 1.0)
innerSat: list of the saturation level of each non-empty region
that is not near an edge (each 0 -> 1.0)
"""
# Get the outputs into a SparseBinaryMatrix
if not sparseForm:
outputs = outputs.reshape(outputsShape)
spOut = SM32(outputs)
else:
if len(outputs) > 0:
assert (outputs.max() < outputsShape[0] * outputsShape[1])
spOut = SM32(1, outputsShape[0] * outputsShape[1])
spOut.setRowFromSparse(0, outputs, [1]*len(outputs))
spOut.reshape(outputsShape[0], outputsShape[1])
# Get the activity in each local region using the nNonZerosPerBox method
# This method takes a list of the end row indices and a list of the end
# column indices.
# We will use regions that are 15x15, which give us about a 1/225 (.4%) resolution
# on saturation.
regionSize = 15
rows = xrange(regionSize+1, outputsShape[0]+1, regionSize)
cols = xrange(regionSize+1, outputsShape[1]+1, regionSize)
regionSums = spOut.nNonZerosPerBox(rows, cols)
# Get all the nonzeros out - those are our saturation sums
(locations, values) = regionSums.tolist()
values /= float(regionSize * regionSize)
sat = list(values)
# Now, to compute which are the inner regions, we will only take the ones that
# are surrounded by activity above, below, left and right
innerSat = []
locationSet = set(locations)
for (location, value) in itertools.izip(locations, values):
(row, col) = location
if (row-1,col) in locationSet and (row, col-1) in locationSet \
and (row+1, col) in locationSet and (row, col+1) in locationSet:
innerSat.append(value)
return (sat, innerSat)
################################################################################
def checkMatch(input, prediction, sparse=True, verbosity=0):
"""
Compares the actual input with the predicted input and returns results
Parameters:
-----------------------------------------------
input: The actual input
prediction: the predicted input
verbosity: If > 0, print debugging messages
sparse: If true, they are in sparse form (list of
active indices)
retval (foundInInput, totalActiveInInput, missingFromInput,
totalActiveInPrediction)
foundInInput: The number of predicted active elements that were
found in the actual input
totalActiveInInput: The total number of active elements in the input.
missingFromInput: The number of predicted active elements that were not
found in the actual input
totalActiveInPrediction: The total number of active elements in the prediction
"""
if sparse:
activeElementsInInput = set(input)
activeElementsInPrediction = set(prediction)
else:
activeElementsInInput = set(input.nonzero()[0])
activeElementsInPrediction = set(prediction.nonzero()[0])
totalActiveInPrediction = len(activeElementsInPrediction)
totalActiveInInput = len(activeElementsInInput)
foundInInput = len(activeElementsInPrediction.intersection(activeElementsInInput))
missingFromInput = len(activeElementsInPrediction.difference(activeElementsInInput))
missingFromPrediction = len(activeElementsInInput.difference(activeElementsInPrediction))
if verbosity >= 1:
print "preds. found in input:", foundInInput, "out of", totalActiveInPrediction,
print "; preds. missing from input:", missingFromInput, "out of", \
totalActiveInPrediction,
print "; unexpected active in input:", missingFromPrediction, "out of", \
totalActiveInInput
return (foundInInput, totalActiveInInput, missingFromInput,
totalActiveInPrediction)
###############################################################################
def predictionExtent(inputs, resets, outputs, minOverlapPct=100.0):
"""
Computes the predictive ability of a temporal pooler (TP). This routine returns
a value which is the average number of time steps of prediction provided
by the TP. It accepts as input the inputs, outputs, and resets provided to
the TP as well as a 'minOverlapPct' used to evalulate whether or not a
prediction is a good enough match to the actual input.
The 'outputs' are the pooling outputs of the TP. This routine treats each output
as a "manifold" that includes the active columns that should be present in the
next N inputs. It then looks at each successive input and sees if it's active
columns are within the manifold. For each output sample, it computes how
many time steps it can go forward on the input before the input overlap with
the manifold is less then 'minOverlapPct'. It returns the average number of
time steps calculated for each output.
Parameters:
-----------------------------------------------
inputs: The inputs to the TP. Row 0 contains the inputs from time
step 0, row 1 from time step 1, etc.
resets: The reset input to the TP. Element 0 contains the reset from
time step 0, element 1 from time step 1, etc.
outputs: The pooling outputs from the TP. Row 0 contains the outputs
from time step 0, row 1 from time step 1, etc.
minOverlapPct: How much each input's columns must overlap with the pooling
output's columns to be considered a valid prediction.
retval: (Average number of time steps of prediction over all output
samples,
Average number of time steps of prediction when we aren't
cut short by the end of the sequence,
List containing frequency counts of each encountered
prediction time)
"""
# List of how many times we encountered each prediction amount. Element 0
# is how many times we successfully predicted 0 steps in advance, element 1
# is how many times we predicted 1 step in advance, etc.
predCounts = None
# Total steps of prediction over all samples
predTotal = 0
# Total number of samples
nSamples = len(outputs)
# Total steps of prediction for samples at the start of the sequence, or
# for samples whose prediction runs aren't cut short by the end of the
# sequence.
predTotalNotLimited = 0
nSamplesNotLimited = 0
# Compute how many cells/column we have
nCols = len(inputs[0])
nCellsPerCol = len(outputs[0]) // nCols
# Evalulate prediction for each output sample
for idx in xrange(nSamples):
# What are the active columns for this output?
activeCols = outputs[idx].reshape(nCols, nCellsPerCol).max(axis=1)
# How many steps of prediction do we have?
steps = 0
while (idx+steps+1 < nSamples) and (resets[idx+steps+1] == 0):
overlap = numpy.logical_and(inputs[idx+steps+1], activeCols)
overlapPct = 100.0 * float(overlap.sum()) / inputs[idx+steps+1].sum()
if overlapPct >= minOverlapPct:
steps += 1
else:
break
# print "idx:", idx, "steps:", steps
# Accumulate into our total
predCounts = _accumulateFrequencyCounts([steps], predCounts)
predTotal += steps
# If this sample was not cut short by the end of the sequence, include
# it into the "NotLimited" runs
if resets[idx] or \
((idx+steps+1 < nSamples) and (not resets[idx+steps+1])):
predTotalNotLimited += steps
nSamplesNotLimited += 1
# Return results
return (float(predTotal) / nSamples,
float(predTotalNotLimited) / nSamplesNotLimited,
predCounts)
###############################################################################
def getCentreAndSpreadOffsets(spaceShape,
spreadShape,
stepSize=1):
"""
Generates centre offsets and spread offsets for block-mode based training
regimes - star, cross, block.
Parameters:
-----------------------------------------------
spaceShape: The (height, width) of the 2-D space to explore. This
sets the number of center-points.
spreadShape: The shape (height, width) of the area around each center-point
to explore.
stepSize: The step size. How big each step is, in pixels. This controls
*both* the spacing of the center-points within the block and the
points we explore around each center-point
retval: (centreOffsets, spreadOffsets)
"""
from nupic.math.cross import cross
# =====================================================================
# Init data structures
# What is the range on the X and Y offsets of the center points?
shape = spaceShape
# If the shape is (1,1), special case of just 1 center point
if shape[0] == 1 and shape[1] == 1:
centerOffsets = [(0,0)]
else:
xMin = -1 * (shape[1] // 2)
xMax = xMin + shape[1] - 1
xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize)
yMin = -1 * (shape[0] // 2)
yMax = yMin + shape[0] - 1
yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize)
centerOffsets = list(cross(yPositions, xPositions))
numCenterOffsets = len(centerOffsets)
print "centerOffsets:", centerOffsets
# What is the range on the X and Y offsets of the spread points?
shape = spreadShape
# If the shape is (1,1), special case of no spreading around each center
# point
if shape[0] == 1 and shape[1] == 1:
spreadOffsets = [(0,0)]
else:
xMin = -1 * (shape[1] // 2)
xMax = xMin + shape[1] - 1
xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize)
yMin = -1 * (shape[0] // 2)
yMax = yMin + shape[0] - 1
yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize)
spreadOffsets = list(cross(yPositions, xPositions))
# Put the (0,0) entry first
spreadOffsets.remove((0,0))
spreadOffsets.insert(0, (0,0))
numSpreadOffsets = len(spreadOffsets)
print "spreadOffsets:", spreadOffsets
return centerOffsets, spreadOffsets
###############################################################################
def makeCloneMap(columnsShape, outputCloningWidth, outputCloningHeight=-1):
"""Make a two-dimensional clone map mapping columns to clone master.
This makes a map that is (numColumnsHigh, numColumnsWide) big that can
be used to figure out which clone master to use for each column. Here are
a few sample calls
>>> makeCloneMap(columnsShape=(10, 6), outputCloningWidth=4)
(array([[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5],
[ 8, 9, 10, 11, 8, 9],
[12, 13, 14, 15, 12, 13],
[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5],
[ 8, 9, 10, 11, 8, 9],
[12, 13, 14, 15, 12, 13],
[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5]], dtype=uint32), 16)
>>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3)
(array([[0, 1, 2, 0, 1, 2, 0, 1],
[3, 4, 5, 3, 4, 5, 3, 4],
[6, 7, 8, 6, 7, 8, 6, 7],
[0, 1, 2, 0, 1, 2, 0, 1],
[3, 4, 5, 3, 4, 5, 3, 4],
[6, 7, 8, 6, 7, 8, 6, 7],
[0, 1, 2, 0, 1, 2, 0, 1]], dtype=uint32), 9)
>>> makeCloneMap(columnsShape=(7, 11), outputCloningWidth=5)
(array([[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],
[ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5],
[10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10],
[15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15],
[20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20],
[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],
[ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5]], dtype=uint32), 25)
>>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3, outputCloningHeight=4)
(array([[ 0, 1, 2, 0, 1, 2, 0, 1],
[ 3, 4, 5, 3, 4, 5, 3, 4],
[ 6, 7, 8, 6, 7, 8, 6, 7],
[ 9, 10, 11, 9, 10, 11, 9, 10],
[ 0, 1, 2, 0, 1, 2, 0, 1],
[ 3, 4, 5, 3, 4, 5, 3, 4],
[ 6, 7, 8, 6, 7, 8, 6, 7]], dtype=uint32), 12)
The basic idea with this map is that, if you imagine things stretching off
to infinity, every instance of a given clone master is seeing the exact
same thing in all directions. That includes:
- All neighbors must be the same
- The "meaning" of the input to each of the instances of the same clone
master must be the same. If input is pixels and we have translation
invariance--this is easy. At higher levels where input is the output
of lower levels, this can be much harder.
- The "meaning" of the inputs to neighbors of a clone master must be the
same for each instance of the same clone master.
The best way to think of this might be in terms of 'inputCloningWidth' and
'outputCloningWidth'.
- The 'outputCloningWidth' is the number of columns you'd have to move
horizontally (or vertically) before you get back to the same the same
clone that you started with. MUST BE INTEGRAL!
- The 'inputCloningWidth' is the 'outputCloningWidth' of the node below us.
If we're getting input from an sensor where every element just represents
a shift of every other element, this is 1.
At a conceptual level, it means that if two different inputs are shown
to the node and the only difference between them is that one is shifted
horizontally (or vertically) by this many pixels, it means we are looking
at the exact same real world input, but shifted by some number of pixels
(doesn't have to be 1). MUST BE INTEGRAL!
At level 1, I think you could have this:
* inputCloningWidth = 1
* sqrt(coincToInputRatio^2) = 2.5
* outputCloningWidth = 5
...in this case, you'd end up with 25 masters.
Let's think about this case:
input: - - - 0 1 2 3 4 5 - - - - -
columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4
...in other words, input 0 is fed to both column 0 and column 1. Input 1
is fed to columns 2, 3, and 4, etc. Hopefully, you can see that you'll
get the exact same output (except shifted) with:
input: - - - - - 0 1 2 3 4 5 - - -
columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4
...in other words, we've shifted the input 2 spaces and the output shifted
5 spaces.
*** The outputCloningWidth MUST ALWAYS be an integral multiple of the ***
*** inputCloningWidth in order for all of our rules to apply. ***
*** NOTE: inputCloningWidth isn't passed here, so it's the caller's ***
*** responsibility to ensure that this is true. ***
*** The outputCloningWidth MUST ALWAYS be an integral multiple of ***
*** sqrt(coincToInputRatio^2), too. ***
@param columnsShape The shape (height, width) of the columns.
@param outputCloningWidth See docstring above.
@param outputCloningHeight If non-negative, can be used to make
rectangular (instead of square) cloning fields.
@return cloneMap An array (numColumnsHigh, numColumnsWide) that
contains the clone index to use for each
column.
@return numDistinctClones The number of distinct clones in the map. This
is just outputCloningWidth*outputCloningHeight.
"""
if outputCloningHeight < 0:
outputCloningHeight = outputCloningWidth
columnsHeight, columnsWidth = columnsShape
numDistinctMasters = outputCloningWidth * outputCloningHeight
a = numpy.empty((columnsHeight, columnsWidth), 'uint32')
for row in xrange(columnsHeight):
for col in xrange(columnsWidth):
a[row, col] = (col % outputCloningWidth) + \
(row % outputCloningHeight) * outputCloningWidth
return a, numDistinctMasters
##############################################################################
def numpyStr(array, format='%f', includeIndices=False, includeZeros=True):
""" Pretty print a numpy matrix using the given format string for each
value. Return the string representation
Parameters:
------------------------------------------------------------
array: The numpy array to print. This can be either a 1D vector or 2D matrix
format: The format string to use for each value
includeIndices: If true, include [row,col] label for each value
includeZeros: Can only be set to False if includeIndices is on.
If True, include 0 values in the print-out
If False, exclude 0 values from the print-out.
"""
shape = array.shape
assert (len(shape) <= 2)
items = ['[']
if len(shape) == 1:
if includeIndices:
format = '%d:' + format
if includeZeros:
rowItems = [format % (c,x) for (c,x) in enumerate(array)]
else:
rowItems = [format % (c,x) for (c,x) in enumerate(array) if x != 0]
else:
rowItems = [format % (x) for x in array]
items.extend(rowItems)
else:
(rows, cols) = shape
if includeIndices:
format = '%d,%d:' + format
for r in xrange(rows):
if includeIndices:
rowItems = [format % (r,c,x) for c,x in enumerate(array[r])]
else:
rowItems = [format % (x) for x in array[r]]
if r > 0:
items.append('')
items.append('[')
items.extend(rowItems)
if r < rows-1:
items.append(']\n')
else:
items.append(']')
items.append(']')
return ' '.join(items)
###############################################################################
if __name__=='__main__':
testStability(numOrigVectors=10, length=500, activity=50,morphTime=3)
from IPython.Shell import IPShellEmbed; IPShellEmbed()() | gpl-3.0 | -4,347,953,636,782,061,600 | 36.429946 | 114 | 0.6196 | false |
googleapis/python-monitoring-dashboards | noxfile.py | 1 | 6959 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
session.install("asyncmock", "pytest-asyncio", "-c", constraints_path)
session.install("mock", "pytest", "pytest-cov", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google/cloud",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=96")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
# "-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| apache-2.0 | 7,822,627,594,197,761,000 | 29.125541 | 87 | 0.615318 | false |
blancltd/django-latest-tweets | latest_tweets/utils.py | 1 | 6404 | from __future__ import unicode_literals
from datetime import datetime
import hashlib
from tempfile import TemporaryFile
from django.core.files import File
from django.utils.six.moves import html_parser
from django.utils.timezone import utc
from PIL import Image
import requests
from .models import Hashtag, Like, Photo, Tweet
HASHTAG_HTML = '<a href="https://twitter.com/hashtag/{text}" target="_blank">#{text}</a>'
URL_HTML = '<a href="{expanded_url}" target="_blank">{display_url}</a>'
MENTION_HTML = '<a href="https://twitter.com/{screen_name}" target="_blank">@{screen_name}</a>'
SYMBOL_HTML = '<a href="https://twitter.com/search?q=%24{text}" target="_blank">${text}</a>'
def tweet_html_entities(tweet, **kwargs):
text = list(tweet)
for hashtag in kwargs.get('hashtags', []):
start, end = hashtag['indices']
text[start] = HASHTAG_HTML.format(**hashtag)
text[start + 1:end] = [''] * (end - start - 1)
for url in kwargs.get('urls', []):
start, end = url['indices']
text[start] = URL_HTML.format(**url)
text[start + 1:end] = [''] * (end - start - 1)
for mention in kwargs.get('user_mentions', []):
start, end = mention['indices']
text[start] = MENTION_HTML.format(**mention)
text[start + 1:end] = [''] * (end - start - 1)
for symbol in kwargs.get('symbols', []):
start, end = symbol['indices']
text[start] = SYMBOL_HTML.format(**symbol)
text[start + 1:end] = [''] * (end - start - 1)
for media in kwargs.get('media', []):
start, end = media['indices']
text[start] = URL_HTML.format(**media)
text[start + 1:end] = [''] * (end - start - 1)
return ''.join(text)
def tweet_hashtags(tweet, hashtags):
for hashtag in hashtags:
text = hashtag['text'].lower()
tag, created = Hashtag.objects.get_or_create(text=text)
tweet.hashtags.add(tag)
def tweet_photos(tweet, media, download):
for photo in media:
# Only photos
if photo['type'] != 'photo':
continue
photo_id = photo['id']
large = photo['sizes']['large']
obj, created = Photo.objects.get_or_create(tweet=tweet, photo_id=photo_id, defaults={
'text': photo['display_url'],
'text_index': photo['indices'][0],
'url': photo['url'],
'media_url': photo['media_url_https'],
'large_width': int(large['w']),
'large_height': int(large['h']),
})
if download and not obj.image_file:
with TemporaryFile() as temp_file:
image_file = File(temp_file)
# Download the file
r = requests.get(obj.media_url, stream=True)
r.raise_for_status()
for chunk in r.iter_content(4096):
image_file.write(chunk)
# Get Pillow to look at it
image_file.seek(0)
pil_image = Image.open(image_file)
image_name = '%s.%s' % (
hashlib.md5(obj.media_url.encode()).hexdigest(), pil_image.format.lower())
# Save the file
image_file.seek(0)
obj.image_file.save(image_name, image_file, save=True)
def update_tweets(tweet_list, tweet_entities=tweet_html_entities, download=False):
# Need to escape HTML entities
htmlparser = html_parser.HTMLParser()
unescape = htmlparser.unescape
obj_list = []
for tweet in tweet_list:
tweet_id = tweet['id']
tweet_username = tweet['user']['screen_name']
tweet_name = tweet['user']['name']
tweet_created = datetime.strptime(
tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y'
).replace(tzinfo=utc)
tweet_is_reply = tweet['in_reply_to_screen_name'] is not None
if 'retweeted_status' in tweet:
retweeted_username = tweet['retweeted_status']['user']['screen_name']
retweeted_name = tweet['retweeted_status']['user']['name']
retweeted_tweet_id = tweet['retweeted_status']['id']
tweet_text = tweet['retweeted_status']['text']
tweet_html = tweet_entities(tweet_text, **tweet['retweeted_status']['entities'])
favorite_count = tweet['retweeted_status']['favorite_count']
retweet_count = tweet['retweeted_status']['retweet_count']
else:
retweeted_username = ''
retweeted_name = ''
retweeted_tweet_id = None
tweet_text = tweet['text']
tweet_html = tweet_entities(tweet_text, **tweet['entities'])
favorite_count = tweet['favorite_count']
retweet_count = tweet['retweet_count']
tweet_text = unescape(tweet_text)
obj, created = Tweet.objects.get_or_create(tweet_id=tweet_id, defaults={
'user': tweet_username,
'name': tweet_name,
'text': tweet_text,
'html': tweet_html,
'favorite_count': favorite_count,
'retweet_count': retweet_count,
'retweeted_username': retweeted_username,
'retweeted_name': retweeted_name,
'retweeted_tweet_id': retweeted_tweet_id,
'is_reply': tweet_is_reply,
'created': tweet_created,
})
if created:
# Add hashtags
tweet_hashtags(tweet=obj, hashtags=tweet['entities'].get('hashtags', []))
# Add any photos
tweet_photos(tweet=obj, media=tweet['entities'].get('media', []), download=download)
else:
# Update counts, but try to avoid excessive updates
update_fields = []
if obj.favorite_count != favorite_count:
obj.favorite_count = favorite_count
update_fields.append('favorite_count')
if obj.retweet_count != retweet_count:
obj.retweet_count = retweet_count
update_fields.append('retweet_count')
if update_fields:
obj.save(update_fields=update_fields)
obj_list.append(obj)
return obj_list
def update_likes(user, tweet_list, download=False):
obj_list = update_tweets(tweet_list=tweet_list, download=download)
for tweet in obj_list:
Like.objects.get_or_create(user=user, tweet=tweet)
return obj_list
| bsd-3-clause | -4,917,055,748,261,835,000 | 33.994536 | 96 | 0.570581 | false |
bk1285/rpi_wordclock | wordclock_interfaces/event_handler.py | 1 | 2667 | import threading
from monotonic import monotonic as _time
class next_action:
NEXT_PLUGIN = 1
GOTO_MENU = 2
RUN_DEFAULT_PLUGIN = 3
class event_handler:
EVENT_INVALID = -1
EVENT_BUTTON_LEFT = 0
EVENT_BUTTON_RIGHT = 1
EVENT_BUTTON_RETURN = 2
EVENT_EXIT_PLUGIN = 3
EVENT_NEXT_PLUGIN_REQUESTED = 4
BUTTONS = {'left': EVENT_BUTTON_LEFT, 'right': EVENT_BUTTON_RIGHT, 'return': EVENT_BUTTON_RETURN}
def __init__(self):
self.condition = threading.Condition()
self.event = self.EVENT_INVALID
self.lock_time = 0.1
self.nextAction = next_action.RUN_DEFAULT_PLUGIN
def getNextAction(self, evt):
if evt == self.EVENT_NEXT_PLUGIN_REQUESTED:
self.nextAction = next_action.NEXT_PLUGIN
elif evt == self.EVENT_BUTTON_RETURN:
self.nextAction = next_action.GOTO_MENU
else:
self.nextAction = next_action.RUN_DEFAULT_PLUGIN
def waitForEvent(self, seconds=None):
self.condition.acquire()
self.__wait_for(lambda: self.event != self.EVENT_INVALID, seconds)
evt = self.event
self.getNextAction(evt)
self.event = self.EVENT_INVALID
self.condition.release()
return evt
def setEvent(self, evt):
self.condition.acquire()
if self.event != self.EVENT_EXIT_PLUGIN and self.event != self.EVENT_NEXT_PLUGIN_REQUESTED:
self.event = evt
self.condition.notifyAll()
self.condition.release()
def waitForExit(self, seconds=None):
self.condition.acquire()
exitWasTriggered = self.__wait_for(
lambda: self.event == self.EVENT_EXIT_PLUGIN or self.event == self.EVENT_NEXT_PLUGIN_REQUESTED, seconds)
self.getNextAction(self.event)
self.event = self.EVENT_INVALID
self.condition.release()
return True if exitWasTriggered else False
def __wait_for(self, predicate, timeout=None):
"""
Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.condition.wait(waittime)
result = predicate()
return result
| gpl-3.0 | 370,705,147,843,874,800 | 31.13253 | 116 | 0.600675 | false |
saikia81/the-number-game | 4/number_game.py | 2 | 11834 | #!/usr/bin/python
# version 4
# april 2012
# this was written by saikia81 and is copyrighted under the GNU general public license 3
# it was written in notepad++, a program I recommend!
# whitespace ftw!
#import random, system and operating system possibilities.
import os, sys
import random, time #time moduele
#pickling for data2file
import cPickle as pickle
#introducing the player
def instructions():
print 'welcome to the guess my number game V4'
print "I'll think of a number and you have to guess it\n"
#making a list of all possible numbers for every dificulty
def list_numbers():
list_easy = []
list_medium = []
list_hard = []
for n in range(1,101):
list_easy.append(n)
list_medium.append(n)
list_hard.append(n)
for n in range(101,201):
list_medium.append(n)
list_hard.append(n)
for n in range(-201,0):
n += 1
list_hard.append(n)
return list_easy, list_medium, list_hard
#does the player want to change the dificulty
def change_dificulty(dificulty):
if dificulty == None:
dificulty = choose_dificulty()
return dificulty
if raw_input("do you want to change dificulty? yes/no: ") == 'yes':
dificulty = choose_dificulty()
return dificulty
else:
return dificulty
#the dificulty the player wants to choose
def choose_dificulty():
print '\nwhat dificulty do you want to play in?'
dificulty = raw_input('choose between "easy", "medium" or "hard":\n')
dificulties = 'easy', 'medium', 'hard'
#if anybody tries to be smart: help them get it right
wrong = -1
if dificulty in dificulties: wrong = 0
elif dificulty not in dificulties:
wrong += 1
for n in (1,2,3):
if n == 3:
print "\nseems like you can't handle choosing a dificulty..."
dificulty = "easy"
time.sleep(2)
print ""
elif (dificulty not in dificulties):
print 'something went wrong!!! please try again\n'
dificulty = raw_input('choose between "easy", "medium" or "hard":\n')
wrong += 1
elif dificulty in dificulties:
print "\nalright so let's get started :D\n"
break
else:
print "you're doing something wrong! I'll chooce a dificulty for you\a\a\a\a\n"
dificulty = 'easy'
print "ERROR: 008"
time.sleep(2)
else:
print '\a\a\asomething went wrong the program will shutdown.'
print "ERROR: 009"
time.sleep(2.5)
sys.exit()
return dificulty
#so here a random number will be choosen depending of the dificulty
def random_number(dificulty, list_easy, list_medium, list_hard):
if dificulty == 'easy':
NUMBER = random.randrange(100) + 1
print "you have choosen the dificulty easy."
number_range = '1 and 100: '
numbers = list_easy
elif dificulty == 'medium':
NUMBER = random.randrange(200) + 1
print "you have choosen the dificulty medium."
number_range = '1 and 200: '
numbers = list_medium
elif dificulty =='hard':
NUMBER = random.randrange(-200,201)
print "you have choosen the dificulty hard."
number_range = '-200 and 200: '
numbers = list_hard
else:
print "dificulty malfunction"
print "ERROR: 003"
time.sleep(2.5)
exit()
return NUMBER, number_range, numbers
# if the guess != "the (predefined) number": loop.
def game(dificulty, NUMBER, number_range, numbers):
time.sleep(2.5)
os.system('cls')
guesses=0
guess='nothing'
while guess != NUMBER:
if guess == 'nothing':
print 'guess a number between', number_range
try:
guess = input()
except:
print "\nsomething went wrong\nyou're getting another try\n\n"
continue
guesses += 1
elif guess == 'cheater':
guess = NUMBER
elif guess not in numbers:
print "\nthe guess you made isn't in the range of valid numbers.\nAre you sure you want to make this guess?"
answ = raw_input("'yes'/'no' \n")
if answ == 'yes':
print "it's your funeral"
print '\nnguess a number between', number_range
guesses += 1
elif answ == 'no':
print "good choice"
print '\nguess a number between', number_range
try:
guess = input()
except:
print "something went wrong\nyou're getting another try\n"
continue
else:
print "that isn't a valid option"
print "let's continue\n"
#if the number is higher than the guess
elif guess < NUMBER:
print 'higher...'
print '\nguess a number between', number_range
try:
guess = input()
except:
print "something went wrong\nyou're getting another try\n"
continue
guesses += 1
continue
#if the number is 'lower...'
elif guess > NUMBER:
print 'lower...'
print '\nguess a number between', number_range
try:
guess = input()
except:
print "something went wrong\n you'll get another try"
continue
guesses -= 1
guesses += 1
#this is actually an error that will never occur... but better safe than sorry.
else:
print '\a\a\asorry, something went wrong. The game will now end itself.'
sys.exit()
print
print 'you did it the NUMBER was: ', NUMBER,
print 'it cost you ', guesses, 'guesses to get it right', 'on dificulty', dificulty
print
return guesses
##Here I will use the 'os' module to keep a highscore system
#in the default appdata of the users profile.
#everything here is to see if everything is alright in it's place.
def highscore(dificulty,guesses):
FOLDER_LOCALAPPDATA = os.environ['LOCALAPPDATA']
FOLDER_NUMBER_GAME = FOLDER_LOCALAPPDATA + '\\Number_game'
#deciding if a new highscore file and/or dir is needed
if os.access(FOLDER_NUMBER_GAME, 0) == False: #dir
try:
os.mkdir(FOLDER_NUMBER_GAME)
except:
os.system('cls')
print 'creating folder: ERROR\nError code: 002'
os.system('pause')
sys.exit()
try:
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "w+")
easy_highscores={}
medium_highscores={}
hard_highscores={}
all_highscores = [easy_highscores,medium_highscores,hard_highscores]
pickle.dump(all_highscores,HIGHSCORES_DAT)
HIGHSCORES_DAT.close()
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "r+")
unpickled_file = pickle.load(HIGHSCORES_DAT)
except:
os.system('cls')
print 'loading file: ERROR\nError code: 001'
os.system('pause')
sys.exit()
else:
HIGHSCORES_DAT.close()
#done with file and folder creation
#
#showing highscores
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "r")
try:
unpickled_file = pickle.load(HIGHSCORES_DAT)
except:
print "couldn't locate or unpickle file"
print "ERROR: 005"
print "\n if this was your first run of the game: this is common"
print "if not, please send a message at [email protected], thank you"
time.sleep(1)
print "everything went worse then expected. shutting down"
time.sleep(2.5)
sys.exit()
else:
HIGHSCORES_DAT.close()
if dificulty == "easy": l=0
if dificulty == "medium": l=1
if dificulty == "hard": l=2
highscores = unpickled_file[l]
#creating your highscore...
your_name = raw_input('what is your name?: ')
try:
if highscores[your_name]>guesses:
os.system('cls')
print "congratulations, new highscore!!"
if raw_input('do you want to replace your score yes/no: ') =="yes": highscores[your_name]=guesses
except:
print "new user"
highscores[your_name]=guesses
list_keys= highscores.keys()
list_values= highscores.values()
list_values.sort()
time.sleep(4)
os.system('cls')
#deeply annoying part
#highscore display
print" ---HIGHSCORE---"
print "highscores in", dificulty,"dificulty"
print"\nname attempts"
print"----------------------------------------"
i=0
#for values in sorted values list
for n in list_values:
#reset found to find next highscore
found = False
#set p to 0: to try different keys
p=0
#while the matching key and value not found keep looking
while found != True:
#m = the next key in list
m=list_keys[p]
if highscores[m] == n: found=True
p+=1
b=len(m)
b=21-b
print m,' '*b,highscores[m]
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "r")
unpickled_file = pickle.load(HIGHSCORES_DAT)
HIGHSCORES_DAT.close()
if l==0: unpickled_file[0]=highscores
if l==1: unpickled_file[1]=highscores
if l==2: unpickled_file[2]=highscores
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "w")
pickle.dump(unpickled_file,HIGHSCORES_DAT)
HIGHSCORES_DAT.close()
def end():
time.sleep(1)
print('''
The number Game V4
Copyright (C) 2012 Saikia81
''')
time.sleep(5)
os.system('cls')
print("""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
""")
time.sleep(7)
try:
if pygame.mixer.get_busy()>0:
try:
pygame.mixer.music.fadeout(3000)
except:
print "ERROR: 012"
except:
pass
time.sleep(3)
os.system('pause')
sys.exit()
def main():
#initializing
ask_music = raw_input('music "on"?: ')
if (ask_music == 'on') or (ask_music == 'yes'):
try:
import pygame.mixer
pygame.mixer.init()
pygame.mixer.music.load("song.mp3")
pygame.mixer.music.play(-1)
except:
print "pygame not working!\nError: 013"
os.system('cls')
list_easy, list_medium, list_hard = list_numbers()
dificulty = None
instructions()
while 1:
dificulty=change_dificulty(dificulty)
NUMBER, number_range, numbers = random_number(dificulty, list_easy, list_medium, list_hard)
guesses = game(dificulty, NUMBER, number_range, numbers)
highscore(dificulty,guesses)
ask_again = raw_input('\ndo you want to play again? yes/no: ')
os.system('cls')
if ask_again == 'no': end()
#start
main()
| gpl-3.0 | -2,437,525,922,798,313,000 | 32.908309 | 120 | 0.577489 | false |
akusok/website-ibc | wibc_old/utils/copy_hdf5.py | 1 | 6601 | # -*- coding: utf-8 -*-
"""
Utilites for copying huge HDF5 files.
Created on Thu Jun 20 14:02:59 2013
"""
#from ../modules/hdf5_creator import create_empty_hdf5
from tables import openFile
import numpy as np
import time
def copy_hdf5_newindex(data, new):
"""Copying a part of data, updating indexes.
Copying process is image-based.
Websites currently stays the same, as they are a tiny table.
Only full-file copy, no appends!
"""
def modify_data(imgrow, regs, descrs, rli):
"""Modifying data before writing it back.
Returns an empty tuple if an image is to be deleted.
"""
K = 0.8 # coeffitient
regs1 = []
descrs1 = []
rli1 = rli
x,y = imgrow[9]
for i in xrange(len(regs)):
rg = regs[i]
ds = descrs[i]
xr,yr = rg[4]
# centering and normalizing
xr = (float(xr)/x - 0.5)*2
yr = (float(yr)/y - 0.5)*2
# check required condition
if (xr**2 + yr**2 >= K**2):
rg[0] = rli1 # self index
ds[0] = rli1 # self index
rli1 += 1
regs1.append(rg)
descrs1.append(ds)
if len(regs1) == 0:
return ()
else:
return (regs1, descrs1)
print "Opening files"
db0 = openFile(data, "r")
db1 = openFile(new, "a")
i = 0
Ws0 = db0.root.Websites
Img0 = db0.root.Images
Reg0 = db0.root.Regions
Des0 = db0.root.Descriptors
Ws1 = db1.root.Websites
Img1 = db1.root.Images
Reg1 = db1.root.Regions
Des1 = db1.root.Descriptors
# websites
print "Copying websites"
batch = 10000
N = Ws0.nrows
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
rows = []
# just copy rows as they are the same
Ws1.append(Ws0.read(nmin, nmax))
print "ws: %d/%d" % (nmax, N)
Ws1.attrs.last_index = Ws0.attrs.last_index
Ws1.flush()
# image-based copy process
t = time.time()
reg_first = 0
last_index = 0
reg_last_index = 0
nr_in_class = np.zeros((Img0.attrs.nr_in_class.shape[0],))
flush = 0
flushbatch = 1000
N = Img0.nrows
for j in xrange(N):
imgrow = Img0.read(j,j+1)[0]
i0 = imgrow[3]
i1 = i0 + imgrow[4]
regs = Reg0.read(i0, i1)
descrs = Des0.read(i0, i1)
imgrow[0] = last_index
data = modify_data(imgrow, regs, descrs, reg_last_index)
# skipping an image if needed
if data == ():
continue
regs, descrs = data
reg_count = len(regs)
# update image row
imgrow[0] = last_index
imgrow[3] = reg_first
imgrow[4] = reg_count
# writing data - an array of tuples
Img1.append([tuple(imgrow)])
Reg1.append([tuple(r) for r in regs])
Des1.append([tuple(d) for d in descrs])
# update global attributes
nr_in_class[imgrow[1]] += 1
last_index += 1
reg_first += reg_count # updating reg_first for next image
reg_last_index += reg_count
flush += 1
# flushing
if flush >= flushbatch:
dt = time.time() - t
etr = int((float(dt)/(j+1)) * (N-j-1))
print "Images %d/%d, time remaining %d:%02d:%02d" % \
(j+1, N, etr/3600, (etr % 3600)/60, etr % 60)
flush = 0
Img1.attrs.last_index = last_index
Img1.attrs.nr_in_class = nr_in_class
Img1.flush()
Reg1.attrs.last_index = reg_last_index
Reg1.flush()
Des1.attrs.last_index = reg_last_index
Des1.flush()
# final flush
Img1.attrs.last_index = last_index
Img1.attrs.nr_in_class = nr_in_class
Img1.flush()
Reg1.attrs.last_index = reg_last_index
Reg1.flush()
Des1.attrs.last_index = reg_last_index
Des1.flush()
db0.close()
db1.close()
print 'Done copying!'
def copy_hdf5(data, new, batch=100000):
"""Copying all data to modify some columns.
"""
print "Opening files"
db0 = openFile(data, "r")
db1 = openFile(new, "a")
i = 0
Ws0 = db0.root.Websites
Img0 = db0.root.Images
Reg0 = db0.root.Regions
Des0 = db0.root.Descriptors
Ws1 = db1.root.Websites
Img1 = db1.root.Images
Reg1 = db1.root.Regions
Des1 = db1.root.Descriptors
# websites
print "Copying websites"
N = Ws0.nrows
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
rows = []
# just copy rows as they are the same
Ws1.append(Ws0.read(nmin, nmax))
print "ws: %d/%d" % (nmax, N)
Ws1.attrs.last_index = Ws0.attrs.last_index
Ws1.flush()
# images
print "Copying images"
N = Img0.nrows
img_repr = np.ones((24,), dtype=np.float64) * -1
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
rows = []
for row in Img0.read(nmin, nmax):
rows.append(tuple(row) + (img_repr,))
Img1.append(rows)
print "img: %d/%d" % (nmax, N)
Img1.attrs.last_index = Img0.attrs.last_index
Img1.attrs.nr_in_class = Img0.attrs.nr_in_class
Img1.flush()
# regions
print "Copying regions"
N = Reg0.nrows
ngb = np.ones((10,2), dtype=np.float64) * -1
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
rows = []
for tupl in Reg0.read(nmin, nmax):
row = list(tupl)
# format rows here
rows.append(tuple(row[:6] + [ngb] + row[6:]))
Reg1.append(rows)
print "reg: %d/%d" % (nmax, N)
Reg1.attrs.last_index = Reg0.attrs.last_index
Reg1.flush()
# descriptors
print "Copying descriptors"
N = Des0.nrows
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
Des1.append(Des0.read(nmin, nmax))
print "des: %d/%d" % (nmax, N)
Des1.attrs.last_index = Des0.attrs.last_index
Des1.flush()
db0.close()
db1.close()
print 'Done copying!'
if __name__ == "__main__":
copy_hdf5_newindex("/data/spiiras/spiiras.h5",
"/users/akusoka1/local/spiiras_border.h5")
| gpl-2.0 | -4,039,075,724,023,822,300 | 22.242958 | 67 | 0.518709 | false |
bdfoster/blumate | blumate/components/sensor/mfi.py | 1 | 3158 | """
Support for Ubiquiti mFi sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mfi/
"""
import logging
import requests
from blumate.components.sensor import DOMAIN
from blumate.const import CONF_PASSWORD, CONF_USERNAME, TEMP_CELSIUS
from blumate.helpers import validate_config
from blumate.helpers.entity import Entity
REQUIREMENTS = ['mficlient==0.3.0']
_LOGGER = logging.getLogger(__name__)
STATE_ON = 'on'
STATE_OFF = 'off'
DIGITS = {
'volts': 1,
'amps': 1,
'active_power': 0,
'temperature': 1,
}
SENSOR_MODELS = [
'Ubiquiti mFi-THS',
'Ubiquiti mFi-CS',
'Outlet',
'Input Analog',
'Input Digital',
]
CONF_TLS = 'use_tls'
CONF_VERIFY_TLS = 'verify_tls'
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup mFi sensors."""
if not validate_config({DOMAIN: config},
{DOMAIN: ['host',
CONF_USERNAME,
CONF_PASSWORD]},
_LOGGER):
_LOGGER.error('A host, username, and password are required')
return False
host = config.get('host')
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_tls = bool(config.get(CONF_TLS, True))
verify_tls = bool(config.get(CONF_VERIFY_TLS, True))
default_port = use_tls and 6443 or 6080
port = int(config.get('port', default_port))
from mficlient.client import FailedToLogin, MFiClient
try:
client = MFiClient(host, username, password, port=port,
use_tls=use_tls, verify=verify_tls)
except (FailedToLogin, requests.exceptions.ConnectionError) as ex:
_LOGGER.error('Unable to connect to mFi: %s', str(ex))
return False
add_devices(MfiSensor(port, hass)
for device in client.get_devices()
for port in device.ports.values()
if port.model in SENSOR_MODELS)
class MfiSensor(Entity):
"""Representation of a mFi sensor."""
def __init__(self, port, hass):
"""Initialize the sensor."""
self._port = port
self._hass = hass
@property
def name(self):
"""Return the name of th sensor."""
return self._port.label
@property
def state(self):
"""Return the state of the sensor."""
if self._port.model == 'Input Digital':
return self._port.value > 0 and STATE_ON or STATE_OFF
else:
digits = DIGITS.get(self._port.tag, 0)
return round(self._port.value, digits)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if self._port.tag == 'temperature':
return TEMP_CELSIUS
elif self._port.tag == 'active_pwr':
return 'Watts'
elif self._port.model == 'Input Digital':
return 'State'
return self._port.tag
def update(self):
"""Get the latest data."""
self._port.refresh()
| mit | -4,293,454,322,114,779,000 | 28.240741 | 74 | 0.598797 | false |
qiime2-plugins/feature-table | q2_feature_table/_summarize/_visualizer.py | 1 | 11251 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pkg_resources
import shutil
import biom
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from q2_types.feature_data import DNAIterator
import q2templates
import skbio
import qiime2
import json
from ._vega_spec import vega_spec
_blast_url_template = ("http://www.ncbi.nlm.nih.gov/BLAST/Blast.cgi?"
"ALIGNMENT_VIEW=Pairwise&PROGRAM=blastn&DATABASE"
"=nt&CMD=Put&QUERY=%s")
TEMPLATES = pkg_resources.resource_filename('q2_feature_table', '_summarize')
def tabulate_seqs(output_dir: str, data: DNAIterator) -> None:
sequences = []
seq_lengths = []
with open(os.path.join(output_dir, 'sequences.fasta'), 'w') as fh:
for sequence in data:
skbio.io.write(sequence, format='fasta', into=fh)
str_seq = str(sequence)
seq_len = len(str_seq)
sequences.append({'id': sequence.metadata['id'],
'len': seq_len,
'url': _blast_url_template % str_seq,
'seq': str_seq})
seq_lengths.append(seq_len)
seq_len_stats = _compute_descriptive_stats(seq_lengths)
_write_tsvs_of_descriptive_stats(seq_len_stats, output_dir)
index = os.path.join(TEMPLATES, 'tabulate_seqs_assets', 'index.html')
q2templates.render(index, output_dir, context={'data': sequences,
'stats': seq_len_stats})
js = os.path.join(
TEMPLATES, 'tabulate_seqs_assets', 'js', 'tsorter.min.js')
os.mkdir(os.path.join(output_dir, 'js'))
shutil.copy(js, os.path.join(output_dir, 'js', 'tsorter.min.js'))
def summarize(output_dir: str, table: biom.Table,
sample_metadata: qiime2.Metadata = None) -> None:
number_of_features, number_of_samples = table.shape
sample_summary, sample_frequencies = _frequency_summary(
table, axis='sample')
if number_of_samples > 1:
# Calculate the bin count, with a minimum of 5 bins
IQR = sample_summary['3rd quartile'] - sample_summary['1st quartile']
if IQR == 0.0:
bins = 5
else:
# Freedman–Diaconis rule
bin_width = (2 * IQR) / (number_of_samples ** (1/3))
bins = max((sample_summary['Maximum frequency'] -
sample_summary['Minimum frequency']) / bin_width, 5)
sample_frequencies_ax = sns.distplot(sample_frequencies, kde=False,
rug=True, bins=int(round(bins)))
sample_frequencies_ax.get_xaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
sample_frequencies_ax.set_xlabel('Frequency per sample')
sample_frequencies_ax.set_ylabel('Number of samples')
sample_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'sample-frequencies.pdf'))
sample_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'sample-frequencies.png'))
plt.gcf().clear()
feature_summary, feature_frequencies = _frequency_summary(
table, axis='observation')
if number_of_features > 1:
feature_frequencies_ax = sns.distplot(feature_frequencies, kde=False,
rug=False)
feature_frequencies_ax.set_xlabel('Frequency per feature')
feature_frequencies_ax.set_ylabel('Number of features')
feature_frequencies_ax.set_xscale('log')
feature_frequencies_ax.set_yscale('log')
feature_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'feature-frequencies.pdf'))
feature_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'feature-frequencies.png'))
sample_summary_table = q2templates.df_to_html(
sample_summary.apply('{:,}'.format).to_frame('Frequency'))
feature_summary_table = q2templates.df_to_html(
feature_summary.apply('{:,}'.format).to_frame('Frequency'))
index = os.path.join(TEMPLATES, 'summarize_assets', 'index.html')
context = {
'number_of_samples': number_of_samples,
'number_of_features': number_of_features,
'total_frequencies': int(np.sum(sample_frequencies)),
'sample_summary_table': sample_summary_table,
'feature_summary_table': feature_summary_table,
}
feature_qualitative_data = _compute_qualitative_summary(table)
sample_frequencies.sort_values(inplace=True, ascending=False)
feature_frequencies.sort_values(inplace=True, ascending=False)
sample_frequencies.to_csv(
os.path.join(output_dir, 'sample-frequency-detail.csv'))
feature_frequencies.to_csv(
os.path.join(output_dir, 'feature-frequency-detail.csv'))
feature_frequencies = feature_frequencies.astype(int) \
.apply('{:,}'.format).to_frame('Frequency')
feature_frequencies['# of Samples Observed In'] = \
pd.Series(feature_qualitative_data).astype(int).apply('{:,}'.format)
feature_frequencies_table = q2templates.df_to_html(feature_frequencies)
sample_frequency_template = os.path.join(
TEMPLATES, 'summarize_assets', 'sample-frequency-detail.html')
feature_frequency_template = os.path.join(
TEMPLATES, 'summarize_assets', 'feature-frequency-detail.html')
context.update({'max_count': sample_frequencies.max(),
'feature_frequencies_table': feature_frequencies_table,
'feature_qualitative_data': feature_qualitative_data,
'tabs': [{'url': 'index.html',
'title': 'Overview'},
{'url': 'sample-frequency-detail.html',
'title': 'Interactive Sample Detail'},
{'url': 'feature-frequency-detail.html',
'title': 'Feature Detail'}]})
# Create a JSON object containing the Sample Frequencies to build the
# table in sample-frequency-detail.html
sample_frequencies_json = sample_frequencies.to_json()
templates = [index, sample_frequency_template, feature_frequency_template]
context.update({'frequencies_list':
json.dumps(sorted(sample_frequencies.values.tolist()))})
if sample_metadata is not None:
context.update({'vega_spec':
json.dumps(vega_spec(sample_metadata,
sample_frequencies
))
})
context.update({'sample_frequencies_json': sample_frequencies_json})
q2templates.util.copy_assets(os.path.join(TEMPLATES,
'summarize_assets',
'vega'),
output_dir)
q2templates.render(templates, output_dir, context=context)
def _compute_descriptive_stats(lst: list):
"""Basic descriptive statistics and a (parametric) seven-number summary.
Calculates descriptive statistics for a list of numerical values, including
count, min, max, mean, and a parametric seven-number-summary. This summary
includes values for the lower quartile, median, upper quartile, and
percentiles 2, 9, 91, and 98. If the data is normally distributed, these
seven percentiles will be equally spaced when plotted.
Parameters
----------
lst : list of int or float values
Returns
-------
dict
a dictionary containing the following descriptive statistics:
count
int: the number of items in `lst`
min
int or float: the smallest number in `lst`
max
int or float: the largest number in `lst`
mean
float: the mean of `lst`
range
int or float: the range of values in `lst`
std
float: the standard deviation of values in `lst`
seven_num_summ_percentiles
list of floats: the parameter percentiles used to calculate this
seven-number summary: [2, 9, 25, 50, 75, 91, 98]
seven_num_summ_values
list of floats: the calculated percentile values of the summary
"""
# NOTE: With .describe(), NaN values in passed lst are excluded by default
if len(lst) == 0:
raise ValueError('No values provided.')
seq_lengths = pd.Series(lst)
seven_num_summ_percentiles = [0.02, 0.09, 0.25, 0.5, 0.75, 0.91, 0.98]
descriptive_stats = seq_lengths.describe(
percentiles=seven_num_summ_percentiles)
return {'count': int(descriptive_stats.loc['count']),
'min': descriptive_stats.loc['min'],
'max': descriptive_stats.loc['max'],
'range': descriptive_stats.loc['max'] -
descriptive_stats.loc['min'],
'mean': descriptive_stats.loc['mean'],
'std': descriptive_stats.loc['std'],
'seven_num_summ_percentiles': seven_num_summ_percentiles,
'seven_num_summ_values': descriptive_stats.loc['2%':'98%'].tolist()
}
def _write_tsvs_of_descriptive_stats(dictionary: dict, output_dir: str):
descriptive_stats = ['count', 'min', 'max', 'mean', 'range', 'std']
stat_list = []
for key in descriptive_stats:
stat_list.append(dictionary[key])
descriptive_stats = pd.DataFrame(
{'Statistic': descriptive_stats, 'Value': stat_list})
descriptive_stats.to_csv(
os.path.join(output_dir, 'descriptive_stats.tsv'),
sep='\t', index=False, float_format='%g')
seven_number_summary = pd.DataFrame(
{'Quantile': dictionary['seven_num_summ_percentiles'],
'Value': dictionary['seven_num_summ_values']})
seven_number_summary.to_csv(
os.path.join(output_dir, 'seven_number_summary.tsv'),
sep='\t', index=False, float_format='%g')
def _compute_qualitative_summary(table):
table = table.transpose()
sample_count = {}
for count_vector, feature_id, _ in table.iter():
sample_count[feature_id] = (count_vector != 0).sum()
return sample_count
def _frequencies(table, axis):
return pd.Series(data=table.sum(axis=axis), index=table.ids(axis=axis))
def _frequency_summary(table, axis='sample'):
frequencies = _frequencies(table, axis=axis)
summary = pd.Series([frequencies.min(), frequencies.quantile(0.25),
frequencies.median(), frequencies.quantile(0.75),
frequencies.max(), frequencies.mean()],
index=['Minimum frequency', '1st quartile',
'Median frequency', '3rd quartile',
'Maximum frequency', 'Mean frequency'])
return summary, frequencies
| bsd-3-clause | 6,885,744,701,589,621,000 | 41.449057 | 79 | 0.598631 | false |
AlexanderHaase/eppraise | eppraise.py | 1 | 16057 | #!/usr/bin/python3
from ebaysdk.finding import Connection as Finding
from ebaysdk.exception import ConnectionError
import itertools
import functools
import openpyxl
import sys
import re
import yaml
import argparse
import logging
import json
import datetime
import collections
from flask import Flask, redirect, request, render_template, make_response
from sqlalchemy.ext.declarative import declarative_base, declared_attr, as_declarative
from sqlalchemy import Table, Column, Integer, String, DateTime, ForeignKey, Boolean, create_engine
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
logger = logging.getLogger( __name__ )
def consume(iterator, n = None):
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def apply( function, iterator ):
consume( map( function, iterator ) )
def unique( iterator ):
seenValues = set()
while True:
value = next( iterator )
if value not in seenValues:
seenValues.add( value )
yield value
def scrub( keywords ):
keys = re.sub( '(\s)', ' ', keywords ).split()
filtered = map( lambda key: re.sub( '(\W)', '', key ), keys )
return ' '.join( filter( None, filtered ) )
#
# Setup SQL Schema
#
@as_declarative()
class SQLBase( object ):
'''Common properties for all sql objects'''
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column( Integer, primary_key = True, autoincrement = True )
def dict( self, append = tuple(), exclude = ('text',) ):
'''Serialize keys via reflection'''
keys = itertools.chain( self.__table__.columns.keys(), append )
keys = filter( lambda key: key not in exclude, keys )
attrs = map( functools.partial( getattr, self ), dir( self ) )
funcs = filter( lambda attr: hasattr( attr, '__serialize__' ) and attr.__serialize__, attrs )
result = collections.OrderedDict( map( lambda key: (key, getattr( self, key )), keys ) )
result.update( collections.OrderedDict( map( lambda func: (func.__name__, func()), funcs ) ) )
return result
def serialize( func ):
func.__serialize__ = True
return func
class JSONProps( object ):
'''Mix-in for text<->json'''
text = Column( String, nullable = False )
@property
def json( self ):
if not hasattr( self, '__jsonCache__'):
self.__jsonCache__ = json.loads( self.text )
return self.__jsonCache__
@json.setter
def json( self, data ):
self.text = json.dumps( data )
self.__jsonCache__ = data # TODO: Deep copy/reload data....
associate_watch_item = Table( 'associate_watch_item', SQLBase.metadata,
Column( "watch_id", Integer, ForeignKey( "watch.id" ), primary_key = True ),
Column( "item_id", Integer, ForeignKey( "item.id" ), primary_key = True )
)
class Watch( SQLBase ):
'''Saved watch for items'''
keywords = Column( String, nullable = False, unique = True )
enabled = Column( Boolean, nullable = False, default = True )
items = relationship("Item", back_populates="watches", secondary = associate_watch_item )
queries = relationship("Query", back_populates="watch")
@SQLBase.serialize
def estimate( self ):
'''Mean sold price'''
(total, qty) = functools.reduce( (lambda accum, item: ( accum[ 0 ] + item.price(), accum[ 1 ] + 1.0 ) ), self.items, ( 0.0, 0.0 ) )
return total / qty if qty > 0 else None
@classmethod
def queryAll( cls, context, connection ):
activeWatches = context.session().query( cls ).filter( cls.enabled == True ).all()
return map( functools.partial( Query.fromWatch, context, connection ), activeWatches )
@classmethod
def fromFile( cls, context, filePath, inputRange ):
wb = openpyxl.load_workbook( filePath )
sheet = wb.active
return map( lambda cell: context.upsert( Watch, keywords = cell.value ), itertools.chain.from_iterable( sheet[ inputRange ] ) )
class Query( SQLBase, JSONProps ):
'''Record of executing a query. Future-proofing our data!'''
watch_id = Column( Integer, ForeignKey( Watch.id ), nullable = False )
watch = relationship( Watch, back_populates = "queries" )
retrieved = Column( DateTime, default = datetime.datetime.utcnow, nullable = False )
keywords = Column( String, nullable = False )
@classmethod
def fromWatch( cls, context, connection, watch ):
'''Create a query from a watch'''
keywords = scrub( watch.keywords )
result = connection.query( keywords )
return context.upsert( cls, keywords = keywords, watch = watch, json = result.dict() )
class Item( SQLBase, JSONProps ):
'''Record from ebay. We're watching completed items, so one per item is enough.'''
ebayID = Column( String, unique = True, nullable = False )
watches = relationship( Watch, back_populates = "items", secondary = associate_watch_item )
@SQLBase.serialize
def date( self ):
return self.json[ 'listingInfo' ][ 'endTime' ]
@SQLBase.serialize
def url( self ):
return self.json[ 'viewItemURL' ]
@SQLBase.serialize
def sold( self ):
return self.json[ 'sellingStatus' ][ 'sellingState' ] == 'EndedWithSales'
@SQLBase.serialize
def price( self ):
'''Fetch an iterator of sold prices'''
return float( self.json[ 'sellingStatus' ][ 'currentPrice' ][ 'value' ] )
@classmethod
def fromQuery( cls, context, query ):
'''Creates NEW objects from query'''
items = query.json[ 'searchResult' ].get( 'item', tuple() )
return map( lambda item: context.upsert( cls, watches = [ query.watch ], json = item, ebayID = item['itemId'] ), items )
class Database( object ):
def __init__( self, dbURL = 'sqlite:///:memory:', base = SQLBase ):
self.engine = create_engine( dbURL )
self.sessionMaker = sessionmaker( self.engine )
base.metadata.create_all( self.engine )
def refresh( self ):
self.engine = create_engine( self.dbURL )
self.sessionMaker = sessionmaker( self.engine )
base.metadata.create_all( self.engine )
class SessionContext( object ):
def __init__( self, db ):
self.db = db
def session( self ):
return self.activeSession
def __call__( self, func, key = 'context' ):
'''decorator'''
@functools.wraps( func )
def wrapper( *args, **kwargs ):
with self:
kwargs[ key ] = self
return func( *args, **kwargs )
return wrapper
def __enter__( self ):
self.activeSession = self.db.sessionMaker()
return self
def __exit__( self, type, value, traceback ):
if value:
self.activeSession.rollback()
else:
self.activeSession.commit()
self.activeSession.close()
del self.activeSession
def refresh( self ):
self.activeSession.rollback()
self.activeSession.close()
self.activeSession = self.db.sessionMaker()
@staticmethod
def identifyingColumns( cls ):
return filter( lambda column: column.unique or column.primary_key, cls.__table__.columns )
@classmethod
def queryArgs( this, cls, kwargs ):
present = filter( lambda column: column.name in kwargs, this.identifyingColumns( cls ) )
return map( lambda column: getattr( cls, column.name ) == kwargs[ column.name ], present )
@staticmethod
def updateKey( obj, key, value ):
if key in obj.__class__.__table__.columns:
setattr( obj, key, value )
elif hasattr( obj, key ) and isinstance( getattr( obj, key ), list ):
getattr( obj, key ).extend( value )
else:
setattr( obj, key, value )
def upsert( self, cls, **kwargs ):
try:
queryArgs = tuple(self.queryArgs( cls, kwargs) )
if len( queryArgs ) == 0:
raise KeyError( queryArgs )
obj = self.session().query( cls ).filter( *queryArgs ).one()
logger.info( "Already exists: {} {}".format( obj.__class__.__name__, obj.dict() ) )
apply( lambda item: self.updateKey( obj, item[ 0 ], item[ 1 ] ), kwargs.items() )
except (NoResultFound, KeyError):
obj = cls( **kwargs )
self.session().add( obj )
logger.info( "Added new item: {} {}".format( obj.__class__.__name__, obj.dict() ) )
return obj
def commitIfNew( self, obj ):
try:
self.activeSession.add( obj )
self.activeSession.commit()
logger.info( "Added new item: {} {}".format( obj.__class__.__name__, obj.dict() ) )
return True
except IntegrityError as e:
self.activeSession.rollback()
logger.info( "Already exists: {} {}\n{}".format( obj.__class__.__name__, obj.dict(), e ) )
return False
def context( self ):
return self.SessionContext( self )
#
# Ebay connections
#
class Estimate( object ):
'''
Estimate for a single search result, focusing on sold items. Mostly
focuses on dynamically extracting features from data, rather than
statically compution/storing them.
'''
def __init__( self, keyword, result ):
self.raw = result
self.keyword = keyword
self.items = result.dict()[ 'searchResult' ].get( 'item', tuple() )
def sold( self ):
'''Fetch an iterator of sold items'''
return filter( (lambda item: item[ 'sellingStatus' ][ 'sellingState' ] == 'EndedWithSales' ), self.items )
def prices( self ):
'''Fetch an iterator of sold prices'''
return map( (lambda item: item[ 'sellingStatus' ][ 'currentPrice' ][ 'value' ] ), self.sold() )
def mean( self ):
'''Mean sold price'''
(total, qty) = functools.reduce( (lambda accum, price: ( accum[ 0 ] + float(price), accum[ 1 ] + 1.0 ) ), self.prices(), ( 0.0, 0.0 ) )
return total / qty if qty > 0 else None
class Connection( object ):
'''Syntatic sugar for interacting with the ebay sdk'''
def __init__( self, **kwargs ):
self.api = Finding( **kwargs )
def query( self, item ):
return self.api.execute( 'findCompletedItems', {'keywords': item, } )
def estimate( self, item ):
'''Create an estimate for the given item'''
return Estimate( item, self.query( item ) )
def estimateFile( self, file, inputRange, outputRange ):
'''Proof of concept method for dumping this to/from a file'''
wb = openpyxl.load_workbook( file )
sheet = wb.active
ioRange = zip( sheet[ inputRange ], sheet[ outputRange ] )
def handleElement( ioElement ):
keys = re.sub( '(\s)', ' ', ioElement[ 0 ][ 0 ].value ).split()
filtered = map( lambda key: re.sub( '(\W)', '', key ), keys )
key = ' '.join( filter( None, filtered ) )
sys.stderr.write( key )
est = self.estimate( key )
mean = est.mean()
sys.stderr.write( ': {}\n'.format( mean ) )
ioElement[ 1 ][ 0 ].value = mean
functools.reduce( lambda x,y: None, map( handleElement, ioRange ) )
wb.save( file )
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel( logging.DEBUG )
handler.setFormatter( logging.Formatter( "%(asctime)s - %(levelname)s\t<%(name)s:%(lineno)d>: %(message)s" ) )
logger.addHandler( handler )
# Setup parser
parser = argparse.ArgumentParser()
parser.add_argument('-d','--database-url', default = 'sqlite:///eppraise.db', help = "Database connection url" )
parser.add_argument( '-v', '--verbose', default = "WARNING", help = "Set logging level in DEBUG, INFO, WARNING, ERROR, CRITICAL" )
subparsers = parser.add_subparsers( help = "command help", dest = "command" )
xlsxParser = subparsers.add_parser( "xlsx", help = "Interact with spreadsheet" )
xlsxParser.add_argument("spreadsheet", help = "Input spreadsheet" )
xlsxParser.add_argument('-i', '--input-range', required = True, help = "Range of items in spreadsheet to estimate, one per cell" )
xlsxParser.add_argument('-o', '--output-range', required = False, help = "Range for output estimates in sheet, same size as input range" )
watchParser = subparsers.add_parser( "watch", help = "Create or modify a watch" )
watchParser.add_argument( "watch", help = "Keywords to watch" )
watchParser.add_argument( "--disable", action='store_true', help = "Disable specified watch" )
watchParser.add_argument( "--estimate", action='store_true', help = "Provide an estimate based on database data" )
queryParser = subparsers.add_parser( "update", help = "Update all active watches" )
queryParser.add_argument('-c','--config', default = "./config.yaml", help = "Configuration for ebay API" )
itemParser = subparsers.add_parser( "item" )
webParser = subparsers.add_parser( "web" )
webParser.add_argument( '-a', '--host', default = "0.0.0.0", help = "Host IP address for binding server" )
webParser.add_argument( '-p', '--port', default = "5000", help = "Host port for binding server", type = int )
#todo webParser..
# parse args
args = parser.parse_args();
# setup logger
logger.setLevel( getattr( logging, args.verbose ) )
logger.debug( args )
# connect to database
logger.debug( "Connecting to database: '{}'...".format( args.database_url ) )
db = Database( args.database_url )
if args.command == 'xlsx':
with db.context() as context:
def updateWatch( inputCell, outputCell ):
watch = context.upsert( Watch, keywords = inputCell.value )
if outputCell:
outputCell.value = watch.estimate()
workbook = openpyxl.load_workbook( args.spreadsheet )
sheet = workbook.active
inputCells = itertools.chain.from_iterable( sheet[ args.input_range ] )
if args.output_range:
outputCells = itertools.chain.from_iterable( sheet[ args.output_range ] )
else:
outputCells = itertools.repeat( None )
consume( itertools.starmap( updateWatch, zip( inputCells, outputCells ) ) )
workbook.save( args.spreadsheet )
#map( lambda cell: context.upsert( Watch, keywords = cell.value ), itertools.chain.from_iterable( sheet[ inputRange ] ) )
#watches = Watch.fromFile( context, args.spreadsheet, args.input_range )
# TODO output range
elif args.command == 'watch':
with db.context() as context:
try:
watch = context.session().query( Watch ).filter( Watch.keywords == args.watch ).one()
except NoResultFound:
watch = Watch( keywords = args.watch )
context.session().add( watch )
watch.enabled = not args.disable
print( watch.dict() )
elif args.command == 'update':
# read config
with open( args.config, 'r' ) as handle:
config = yaml.safe_load( handle )
# connect
con = Connection( config_file = None, appid = config[ 'ebay' ][ 'id' ] )
with db.context() as context:
for query in Watch.queryAll( context, con ):
# Commit and filter new items
apply( context.session().expunge, itertools.filterfalse( Item.sold, Item.fromQuery( context, query ) ) )
elif args.command == 'item':
with db.context() as context:
apply( sys.stdout.write, map( "{}\n".format, map( SQLBase.dict, context.session().query( Item ).all() ) ) )
elif args.command == 'web':
app = Flask( __name__ )
def no_cache( func ):
@functools.wraps( func )
def wrapper( *args, **kwargs ):
response = func( *args, **kwargs )
if isinstance( response, str ):
response = make_response( response, 200 )
response.headers[ 'Cache-Control' ] = 'no-cache, no-store, must-revalidate'
response.headers[ 'Pragma' ] = 'no-cache'
return response
return wrapper
def serialize( iterator, status = 200 ):
response = make_response( json.dumps( list( map( SQLBase.dict, iterator ) ) ), status )
response.headers[ 'Content-Type' ] = 'appliction/json'
return response
@app.route( '/watch' )
@no_cache
@db.context()
def watch( context ):
return serialize( context.session().query( Watch ).all() )
@app.route( '/watch/<int:watchId>/items' )
@no_cache
@db.context()
def watchItems( watchId, context ):
return serialize( context.session().query( Watch ).filter( Watch.id == watchId ).one().items )
@app.route( '/' )
def index():
return render_template( 'index.html' )
@app.route( '/jsonp/<name>' )
@no_cache
def jsonp( name ):
return render_template( name )
app.run( args.host, port = args.port, debug = True )
| gpl-3.0 | 1,612,893,145,415,447,600 | 31.438384 | 139 | 0.667746 | false |
fopina/pyspeedtest | pyspeedtest.py | 1 | 13319 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import bisect
import itertools
import logging
import random
import re
import string
import sys
import platform
from math import sqrt
from threading import currentThread, Thread
from time import time
try:
from httplib import HTTPConnection
except ImportError:
from http.client import HTTPConnection
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
__program__ = 'pyspeedtest'
__version__ = '1.2.7'
__description__ = 'Test your bandwidth speed using Speedtest.net servers.'
__supported_formats__ = ('default', 'json', 'xml')
class SpeedTest(object):
USER_AGENTS = {
'Linux': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0',
'Darwin': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:41.0) Gecko/20100101 Firefox/41.0',
'Windows': 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0',
'Java': 'Java/1.6.0_12',
}
DOWNLOAD_FILES = [
'/speedtest/random350x350.jpg',
'/speedtest/random500x500.jpg',
'/speedtest/random1500x1500.jpg'
]
UPLOAD_FILES = [
132884,
493638
]
ALPHABET = string.digits + string.ascii_letters
def __init__(self, host=None, http_debug=0, runs=2):
self._host = host
self.http_debug = http_debug
self.runs = runs
@property
def host(self):
if not self._host:
self._host = self.chooseserver()
return self._host
@host.setter
def host(self, new_host):
self._host = new_host
def connect(self, url):
try:
connection = HTTPConnection(url)
connection.set_debuglevel(self.http_debug)
connection.connect()
return connection
except:
raise Exception('Unable to connect to %r' % url)
def downloadthread(self, connection, url):
connection.request('GET', url, None, {'Connection': 'Keep-Alive'})
response = connection.getresponse()
self_thread = currentThread()
self_thread.downloaded = len(response.read())
def download(self):
total_downloaded = 0
connections = [
self.connect(self.host) for i in range(self.runs)
]
total_start_time = time()
for current_file in SpeedTest.DOWNLOAD_FILES:
threads = []
for run in range(self.runs):
thread = Thread(
target=self.downloadthread,
args=(connections[run],
'%s?x=%d' % (current_file, int(time() * 1000))))
thread.run_number = run + 1
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
total_downloaded += thread.downloaded
LOG.debug('Run %d for %s finished',
thread.run_number, current_file)
total_ms = (time() - total_start_time) * 1000
for connection in connections:
connection.close()
LOG.info('Took %d ms to download %d bytes',
total_ms, total_downloaded)
return total_downloaded * 8000 / total_ms
def uploadthread(self, connection, data):
url = '/speedtest/upload.php?x=%d' % randint()
connection.request('POST', url, data, {
'Connection': 'Keep-Alive',
'Content-Type': 'application/x-www-form-urlencoded'
})
response = connection.getresponse()
reply = response.read().decode('utf-8')
self_thread = currentThread()
self_thread.uploaded = int(reply.split('=')[1])
def upload(self):
connections = [
self.connect(self.host) for i in range(self.runs)
]
post_data = [
urlencode({'content0': content(s)}) for s in SpeedTest.UPLOAD_FILES
]
total_uploaded = 0
total_start_time = time()
for data in post_data:
threads = []
for run in range(self.runs):
thread = Thread(target=self.uploadthread,
args=(connections[run], data))
thread.run_number = run + 1
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
LOG.debug('Run %d for %d bytes finished',
thread.run_number, thread.uploaded)
total_uploaded += thread.uploaded
total_ms = (time() - total_start_time) * 1000
for connection in connections:
connection.close()
LOG.info('Took %d ms to upload %d bytes',
total_ms, total_uploaded)
return total_uploaded * 8000 / total_ms
def ping(self, server=None):
if not server:
server = self.host
connection = self.connect(server)
times = []
worst = 0
for _ in range(5):
total_start_time = time()
connection.request(
'GET',
'/speedtest/latency.txt?x=%d' % randint(),
None,
{'Connection': 'Keep-Alive'})
response = connection.getresponse()
response.read()
total_ms = time() - total_start_time
times.append(total_ms)
if total_ms > worst:
worst = total_ms
times.remove(worst)
total_ms = sum(times) * 250 # * 1000 / number of tries (4) = 250
connection.close()
LOG.debug('Latency for %s - %d', server, total_ms)
return total_ms
def chooseserver(self):
connection = self.connect('www.speedtest.net')
now = int(time() * 1000)
# really contribute to speedtest.net OS statistics
# maybe they won't block us again...
extra_headers = {
'Connection': 'Keep-Alive',
'User-Agent': self.USER_AGENTS.get(platform.system(), self.USER_AGENTS['Linux'])
}
connection.request(
'GET', '/speedtest-config.php?x=%d' % now, None, extra_headers)
response = connection.getresponse()
reply = response.read().decode('utf-8')
match = re.search(
r'<client ip="([^"]*)" lat="([^"]*)" lon="([^"]*)"', reply)
location = None
if match is None:
LOG.info('Failed to retrieve coordinates')
return None
location = match.groups()
LOG.info('Your IP: %s', location[0])
LOG.info('Your latitude: %s', location[1])
LOG.info('Your longitude: %s', location[2])
connection.request(
'GET', '/speedtest-servers.php?x=%d' % now, None, extra_headers)
response = connection.getresponse()
reply = response.read().decode('utf-8')
server_list = re.findall(
r'<server url="([^"]*)" lat="([^"]*)" lon="([^"]*)"', reply)
my_lat = float(location[1])
my_lon = float(location[2])
sorted_server_list = []
for server in server_list:
s_lat = float(server[1])
s_lon = float(server[2])
distance = sqrt(pow(s_lat - my_lat, 2) + pow(s_lon - my_lon, 2))
bisect.insort_left(sorted_server_list, (distance, server[0]))
best_server = (999999, '')
for server in sorted_server_list[:10]:
LOG.debug(server[1])
match = re.search(
r'http://([^/]+)/speedtest/upload\.php', server[1])
if match is None:
continue
server_host = match.groups()[0]
latency = self.ping(server_host)
if latency < best_server[0]:
best_server = (latency, server_host)
if not best_server[1]:
raise Exception('Cannot find a test server')
LOG.debug('Best server: %s', best_server[1])
return best_server[1]
def content(length):
"""Return alphanumeric string of indicated length."""
cycle = itertools.cycle(SpeedTest.ALPHABET)
return ''.join(next(cycle) for i in range(length))
def init_logging(loglevel=logging.WARNING):
"""Initialize program logger."""
scriptlogger = logging.getLogger(__program__)
# ensure logger is not reconfigured
# it would be nice to use hasHandlers here, but that's Python 3 only
if not scriptlogger.handlers:
# set log level
scriptlogger.setLevel(loglevel)
# log message format
fmt = '%(name)s:%(levelname)s: %(message)s'
# configure terminal log
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(logging.Formatter(fmt))
scriptlogger.addHandler(streamhandler)
def parseargs(args):
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
"""argparse.RawTextHelpFormatter._split_lines"""
if text.startswith('r|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def positive_int(value):
try:
ivalue = int(value)
if ivalue < 0:
raise ValueError
return ivalue
except ValueError:
raise argparse.ArgumentTypeError(
'invalid positive int value: %r' % value)
def format_enum(value):
if value.lower() not in __supported_formats__:
raise argparse.ArgumentTypeError(
'output format not supported: %r' % value)
return value
parser = argparse.ArgumentParser(
add_help=False,
description=__description__,
formatter_class=SmartFormatter,
usage='%(prog)s [OPTION]...')
parser.add_argument(
'-d', '--debug',
default=0,
help='set http connection debug level (default is 0)',
metavar='L',
type=positive_int)
parser.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser.add_argument(
'-m', '--mode',
choices=range(1, 8),
default=7,
help='''r|test mode: 1 - download
2 - upload
4 - ping
1 + 2 + 4 = 7 - all (default)''',
metavar='M',
type=int)
parser.add_argument(
'-r', '--runs',
default=2,
help='use N runs (default is 2)',
metavar='N',
type=positive_int)
parser.add_argument(
'-s', '--server',
help='use specific server',
metavar='H')
parser.add_argument(
'-f', '--format',
default='default',
help='output format ' + str(__supported_formats__),
metavar='F',
type=format_enum)
parser.add_argument(
'-v', '--verbose',
action='store_true',
dest='verbose',
help='output additional information')
parser.add_argument(
'--version',
action='version',
version='%(prog)s ' + __version__)
return parser.parse_args(args)
def perform_speedtest(opts):
speedtest = SpeedTest(opts.server, opts.debug, opts.runs)
if opts.format in __supported_formats__:
if opts.format == 'default':
print('Using server: %s' % speedtest.host)
if opts.mode & 4 == 4:
print('Ping: %d ms' % speedtest.ping())
if opts.mode & 1 == 1:
print('Download speed: %s' % pretty_speed(speedtest.download()))
if opts.mode & 2 == 2:
print('Upload speed: %s' % pretty_speed(speedtest.upload()))
else:
stats = dict(server=speedtest.host)
if opts.mode & 4 == 4:
stats['ping'] = speedtest.ping()
if opts.mode & 1 == 1:
stats['download'] = speedtest.download()
if opts.mode & 2 == 2:
stats['upload'] = speedtest.upload()
if opts.format == 'json':
from json import dumps
print(dumps(stats))
elif opts.format == 'xml':
from xml.etree.ElementTree import Element, tostring
xml = Element('data')
for key, val in stats.items():
child = Element(key)
child.text = str(val)
xml.append(child)
print(tostring(xml).decode('utf-8'))
else:
raise Exception('Output format not supported: %s' % opts.format)
def main(args=None):
opts = parseargs(args)
init_logging(logging.DEBUG if opts.verbose else logging.WARNING)
try:
perform_speedtest(opts)
except Exception as e:
if opts.verbose:
LOG.exception(e)
else:
LOG.error(e)
sys.exit(1)
def pretty_speed(speed):
units = ['bps', 'Kbps', 'Mbps', 'Gbps']
unit = 0
while speed >= 1024:
speed /= 1024
unit += 1
return '%0.2f %s' % (speed, units[unit])
def randint():
"""Return a random 12 digit integer."""
return random.randint(100000000000, 999999999999)
LOG = logging.getLogger(__program__)
if __name__ == '__main__':
main()
| mit | 3,044,778,514,834,765,000 | 30.863636 | 103 | 0.546362 | false |
ProjetSigma/backend | sigma_chat/models/message.py | 1 | 1271 | # -*- coding: utf-8 -*-
from django.db import models
from sigma_chat.models.chat_member import ChatMember
from sigma_chat.models.chat import Chat
def chat_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'uploads/chats/{0}/{1}'.format(instance.chat_id.id, filename)
class Message(models.Model):
text = models.TextField(blank=True)
chatmember_id = models.ForeignKey(ChatMember, related_name='chatmember_message')
chat_id = models.ForeignKey(Chat, related_name='message')
date = models.DateTimeField(auto_now=True)
attachment = models.FileField(upload_to=chat_directory_path, blank=True)
################################################################
# PERMISSIONS #
################################################################
@staticmethod
def has_read_permission(request):
return True
def has_object_read_permission(self, request):
return request.user.is_member(self.chat)
@staticmethod
def has_write_permission(request):
return True
def has_object_write_permission(self, request):
return request.user == self.chatmember.user and self.chatmember.is_member
| agpl-3.0 | 1,941,828,085,756,653,000 | 34.305556 | 84 | 0.608183 | false |
electrumalt/electrum-ixc | gui/qt/util.py | 1 | 6755 | from electrum_ixc.i18n import _
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import os.path
import time
import traceback
import sys
import threading
import platform
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
class WaitingDialog(QThread):
def __init__(self, parent, message, run_task, on_success=None, on_complete=None):
QThread.__init__(self)
self.parent = parent
self.d = QDialog(parent)
self.d.setWindowTitle('Please wait')
l = QLabel(message)
vbox = QVBoxLayout(self.d)
vbox.addWidget(l)
self.run_task = run_task
self.on_success = on_success
self.on_complete = on_complete
self.d.connect(self.d, SIGNAL('done'), self.close)
self.d.show()
def run(self):
self.error = None
try:
self.result = self.run_task()
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.error = str(e)
self.d.emit(SIGNAL('done'))
def close(self):
self.d.accept()
if self.error:
QMessageBox.warning(self.parent, _('Error'), self.error, _('OK'))
else:
if self.on_success:
if type(self.result) is not tuple:
self.result = (self.result,)
self.on_success(*self.result)
if self.on_complete:
self.on_complete()
class Timer(QThread):
def run(self):
while True:
self.emit(SIGNAL('timersignal'))
time.sleep(0.5)
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
apply(self.func,())
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(20)
self.alt = None
self.clicked.connect(self.onclick)
def set_alt(self, func):
self.alt = func
def onclick(self):
if self.alt:
apply(self.alt)
else:
QMessageBox.information(self, 'Help', self.help_text, 'OK')
def close_button(dialog, label=None):
hbox = QHBoxLayout()
hbox.addStretch(1)
b = QPushButton(label or _("Close"))
hbox.addWidget(b)
b.clicked.connect(dialog.close)
b.setDefault(True)
return hbox
def ok_cancel_buttons2(dialog, ok_label=None, cancel_label=None):
hbox = QHBoxLayout()
hbox.addStretch(1)
b = QPushButton(cancel_label or _('Cancel'))
hbox.addWidget(b)
b.clicked.connect(dialog.reject)
b = QPushButton(ok_label or _("OK"))
hbox.addWidget(b)
b.clicked.connect(dialog.accept)
b.setDefault(True)
return hbox, b
def ok_cancel_buttons(dialog, ok_label=None, cancel_label=None):
hbox, b = ok_cancel_buttons2(dialog, ok_label, cancel_label)
return hbox
def line_dialog(parent, title, label, ok_label, default=None):
dialog = QDialog(parent)
dialog.setMinimumWidth(500)
dialog.setWindowTitle(title)
dialog.setModal(1)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(ok_cancel_buttons(dialog, ok_label))
if dialog.exec_():
return unicode(txt.text())
def text_dialog(parent, title, label, ok_label, default=None):
from qrtextedit import ScanQRTextEdit
dialog = QDialog(parent)
dialog.setMinimumWidth(500)
dialog.setWindowTitle(title)
dialog.setModal(1)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = ScanQRTextEdit(parent)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(ok_cancel_buttons(dialog, ok_label))
if dialog.exec_():
return unicode(txt.toPlainText())
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses:
address_e.setText(addresses[0])
def func():
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = unicode(filename_e.text())
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p = unicode( QFileDialog.getSaveFileName(None, select_msg, text, _filter))
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = unicode(filename_e.text())
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class MyTreeWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self, parent)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.itemActivated.connect(self.on_activated)
def on_activated(self, item):
if not item: return
for i in range(0,self.viewport().height()/5):
if self.itemAt(QPoint(0,i*5)) == item:
break
else:
return
for j in range(0,30):
if self.itemAt(QPoint(0,i*5 + j)) != item:
break
self.emit(SIGNAL('customContextMenuRequested(const QPoint&)'), QPoint(50, i*5 + j - 1))
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done", _('OK')))
t.start()
app.exec_()
| gpl-3.0 | -9,210,945,300,034,786,000 | 26.684426 | 133 | 0.610067 | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/internals/construction.py | 1 | 26379 | """
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from collections import OrderedDict, abc
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
import pandas.compat as compat
from pandas.compat import PY36, raise_with_traceback
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_extension_type,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIndexClass,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, common as com
from pandas.core.arrays import Categorical, ExtensionArray, period_array
from pandas.core.index import (
Index,
_get_objs_combined_axis,
_union_indexes,
ensure_index,
)
from pandas.core.indexes import base as ibase
from pandas.core.internals import (
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
from pandas.core.internals.arrays import extract_array
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [ensure_index(columns), index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def init_ndarray(values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype(
dtype
):
if not hasattr(values, "dtype"):
values = prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
elif is_extension_array_dtype(values):
# GH#19157
if columns is None:
columns = [0]
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError(
"failed to cast to '{dtype}' (Exception "
"was: {orig})".format(dtype=dtype, orig=orig)
)
raise_with_traceback(e)
index, columns = _get_axes(*values.shape, index=index, columns=columns)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dvals_list = [maybe_infer_to_datetimelike(row) for row in values]
for n in range(len(dvals_list)):
if isinstance(dvals_list[n], np.ndarray):
dvals_list[n] = dvals_list[n].reshape(1, -1)
from pandas.core.internals.blocks import make_block
# TODO: What about re-joining object columns?
block_values = [
make_block(dvals_list[n], placement=[n]) for n in range(len(dvals_list))
]
else:
datelike_vals = maybe_infer_to_datetimelike(values)
block_values = [datelike_vals]
else:
block_values = [values]
return create_block_manager_from_blocks(block_values, [columns, index])
def init_dict(data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isnull()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or np.issubdtype(dtype, np.flexible):
# GH#1783
nan_dtype = object
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
else:
keys = com.dict_keys_to_ordered_list(data)
columns = data_names = Index(keys)
arrays = (com.maybe_iterable_to_list(data[k]) for k in keys)
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
arrays = [
arr if not isinstance(arr, ABCIndexClass) else arr._data for arr in arrays
]
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
# ---------------------------------------------------------------------
def prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], "len"):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError("Must pass 2-d input")
return values
def _homogenize(data, index, dtype=None):
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
else:
if isinstance(val, dict):
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):
val = com.dict_compat(val)
else:
val = dict(val)
val = lib.fast_multiget(val, oindex.values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
homogenized.append(val)
return homogenized
def extract_index(data):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
have_ordered = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
if isinstance(val, OrderedDict):
have_ordered = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
if have_series:
index = _union_indexes(indexes)
elif have_dicts:
index = _union_indexes(indexes, sort=not (compat.PY36 or have_ordered))
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("arrays must all be same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
if lengths[0] != len(index):
msg = (
"array length {length} does not match index "
"length {idx_len}".format(length=lengths[0], idx_len=len(index))
)
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (
columns is not None
and len(columns)
and arr_columns is not None
and len(arr_columns)
):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def get_names_from_index(data):
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = "Unnamed {count}".format(count=count)
count += 1
return index
def _get_axes(N, K, index, columns):
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns.
"""
if isinstance(data, ABCDataFrame):
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], abc.Mapping):
return _list_of_dict_to_arrays(
data, columns, coerce_float=coerce_float, dtype=dtype
)
elif isinstance(data[0], ABCSeries):
return _list_of_series_to_arrays(
data, columns, coerce_float=coerce_float, dtype=dtype
)
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif (
isinstance(data, (np.ndarray, ABCSeries, Index))
and data.dtype.names is not None
):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = [tuple(x) for x in data]
return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
# gh-26429 do not raise user-facing AssertionError
try:
result = _convert_object_array(
content, columns, dtype=dtype, coerce_float=coerce_float
)
except AssertionError as e:
raise ValueError(e) from e
return result
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _get_objs_combined_axis(data, sort=False)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = com.values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(
content, columns, dtype=dtype, coerce_float=coerce_float
)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
"""Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and (on Python>=3.6) dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
coerce_float : bool
dtype : np.dtype
Returns
-------
tuple
arrays, columns
"""
if columns is None:
gen = (list(x.keys()) for x in data)
types = (dict, OrderedDict) if PY36 else OrderedDict
sort = not any(isinstance(d, types) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(
content, columns, dtype=dtype, coerce_float=coerce_float
)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = ibase.default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
"{col:d} columns passed, passed data had "
"{con} columns".format(col=len(columns), con=len(content))
)
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
# ---------------------------------------------------------------------
# Series-Based
def sanitize_index(data, index, copy=False):
"""
Sanitize an index type to return an ndarray of the underlying, pass
through a non-Index.
"""
if index is None:
return data
if len(data) != len(index):
raise ValueError("Length of values does not match length of index")
if isinstance(data, ABCIndexClass) and not copy:
pass
elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)):
data = data._values
if copy:
data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ["M", "m"]:
data = sanitize_array(data, index, copy=copy)
return data
def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""
Sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified.
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
# extract ndarray or ExtensionArray, ensure we have no PandasArray
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ExtensionArray):
# it is already ensured above this is not a PandasArray
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, str):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
if (
not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype))
and is_object_dtype(subarr.dtype)
and not is_object_dtype(dtype)
):
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred == "period":
try:
subarr = period_array(subarr)
except IncompatibleFrequency:
pass
return subarr
def _try_cast(arr, dtype, copy, raise_cast_failure):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : array-like
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
"""
# perf shortcut as this is the most common case
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
# GH#15832: Check if we are requesting a numeric dype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
subarr = maybe_cast_to_integer_array(arr, dtype)
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_type(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
# in case of out of bound datetime64 -> always raise
raise
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
# We *do* allow casting to categorical, since we know
# that Categorical is the only array type for 'category'.
subarr = Categorical(arr, dtype.categories, ordered=dtype._ordered)
elif is_extension_array_dtype(dtype):
# create an extension array from its dtype
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
| apache-2.0 | -9,052,426,239,437,025,000 | 31.566667 | 88 | 0.585731 | false |
michaelkuty/mbot | mbot/backends/slack.py | 1 | 3363 |
import logging
from mbot.event import Event
from mbot.state import User
from mbot.utils.packages import install_package
from .base import Dispatcher
LOG = logging.getLogger(__name__)
class Slack(Dispatcher):
"""Yet another simple bot
Uses slack RTM and calls middlewares for all messages
:param: slack_token: slack token
"""
@property
def sc(self):
"""Returns slack client"""
if not hasattr(self, "client"):
try:
from slackclient import SlackClient
except ImportError:
install_package("slackclient")
from slackclient import SlackClient
self.client = SlackClient(self.conf['token'])
return self.client
def connect(self):
"""Returns True if connection is successfull"""
try:
return self.sc.rtm_connect()
except Exception as e:
LOG.exception(e)
def upload(self, data, initial_comment=None, channel=None):
if isinstance(data, list):
results = []
for datum in data:
results.append(self.sc.api_call(
"files.upload",
channel=channel,
**datum))
return results
response = self.sc.api_call(
"files.upload",
channel=channel,
attachments=data)
LOG.debug(response)
return response
def read(self):
try:
events = self.sc.rtm_read()
except:
self.connect()
try:
events = self.sc.rtm_read()
except Exception as e:
LOG.exception(e)
return self.process_events(events)
def send(self, *args, **kwargs):
return self.sc.rtm_send_message(*args, **kwargs)
def reply(self, message, text, attachments=None, *args, **kwargs):
"""Reply to a message"""
if 'channel' not in message.body:
LOG.error("Cannot reply on message %s" % message)
return
if attachments:
return self.upload(attachments, text, message.body['channel'])
return self.send(message.body['channel'], text, *args, **kwargs)
@property
def author_id(self):
if not hasattr(self, "_author_id"):
self._author_id = self.sc.api_call("auth.test")['user_id']
return self._author_id
def process_events(self, events):
"""Returns new events
"""
_events = []
for event in events:
# skip own events
if 'user' in event and event['user'] == self.author_id:
continue
# skip event types
msg_type = event.get("type", None)
if msg_type and self.bot.process_types:
if msg_type not in self.process_types:
continue
_events.append(Event(self.bot, self, event))
return _events
def get_users(self):
"""Returns dictionary of users"""
try:
members = self.sc.api_call("users.list")['members']
except:
members = []
return {u['id']: User(**{
'name': u['name'],
'real_name': u['real_name'],
'is_bot': u['is_bot'],
'id': u['id'],
}) for u in members}
| mit | -8,143,825,461,480,550,000 | 25.480315 | 74 | 0.531966 | false |
hiconversion/spark-ec2 | spark_ec2.py | 1 | 65557 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division, print_function, with_statement
import codecs
import hashlib
import itertools
import logging
import os
import os.path
import pipes
import random
import shutil
import string
from stat import S_IRUSR
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import time
import warnings
from datetime import datetime
from optparse import OptionParser
from sys import stderr
if sys.version < "3":
from urllib2 import urlopen, Request, HTTPError
else:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
raw_input = input
xrange = range
SPARK_EC2_VERSION = "2.1.1"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
VALID_SPARK_VERSIONS = set([
"0.7.3",
"0.8.0",
"0.8.1",
"0.9.0",
"0.9.1",
"0.9.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.1.0",
"1.1.1",
"1.2.0",
"1.2.1",
"1.3.0",
"1.3.1",
"1.4.0",
"1.4.1",
"1.5.0",
"1.5.1",
"1.5.2",
"1.6.0",
"1.6.1",
"1.6.2",
"1.6.3",
"2.0.0-preview",
"2.0.0",
"2.0.1",
"2.0.2",
"2.1.0",
"2.1.1",
"2.2.0",
"2.2.1",
"2.3.0",
"2.3.1",
"2.3.2",
"2.4.0",
"2.4.1",
"2.4.2",
"2.4.3",
"2.4.4",
"2.4.5",
"2.4.6",
"2.4.7",
"3.0.0",
"3.0.1",
"3.0.2",
"3.1.1"
])
SPARK_TACHYON_MAP = {
"1.0.0": "0.4.1",
"1.0.1": "0.4.1",
"1.0.2": "0.4.1",
"1.1.0": "0.5.0",
"1.1.1": "0.5.0",
"1.2.0": "0.5.0",
"1.2.1": "0.5.0",
"1.3.0": "0.5.0",
"1.3.1": "0.5.0",
"1.4.0": "0.6.4",
"1.4.1": "0.6.4",
"1.5.0": "0.7.1",
"1.5.1": "0.7.1",
"1.5.2": "0.7.1",
"1.6.0": "0.8.2",
"1.6.1": "0.8.2",
"1.6.2": "0.8.2",
"2.0.0-preview": ""
}
DEFAULT_SPARK_VERSION = SPARK_EC2_VERSION
DEFAULT_SPARK_GITHUB_REPO = "https://github.com/apache/spark"
# Default location to get the spark-ec2 scripts (and ami-list) from
DEFAULT_SPARK_EC2_GITHUB_REPO = "https://github.com/amplab/spark-ec2"
DEFAULT_SPARK_EC2_BRANCH = "branch-2.0"
def setup_external_libs(libs):
"""
Download external libraries from PyPI to SPARK_EC2_DIR/lib/ and prepend them to our PATH.
"""
PYPI_URL_PREFIX = "https://pypi.python.org/packages/source"
SPARK_EC2_LIB_DIR = os.path.join(SPARK_EC2_DIR, "lib")
if not os.path.exists(SPARK_EC2_LIB_DIR):
print("Downloading external libraries that spark-ec2 needs from PyPI to {path}...".format(
path=SPARK_EC2_LIB_DIR
))
print("This should be a one-time operation.")
os.mkdir(SPARK_EC2_LIB_DIR)
for lib in libs:
versioned_lib_name = "{n}-{v}".format(n=lib["name"], v=lib["version"])
lib_dir = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name)
if not os.path.isdir(lib_dir):
tgz_file_path = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name + ".tar.gz")
print(" - Downloading {lib}...".format(lib=lib["name"]))
lib_url = lib.get("url",
"{prefix}/{first_letter}/{lib_name}/{lib_name}-{lib_version}.tar.gz".format(
prefix=PYPI_URL_PREFIX,
first_letter=lib["name"][:1],
lib_name=lib["name"],
lib_version=lib["version"])
)
print(lib_url)
download_stream = urlopen(lib_url)
with open(tgz_file_path, "wb") as tgz_file:
tgz_file.write(download_stream.read())
with open(tgz_file_path, "rb") as tar:
if hashlib.md5(tar.read()).hexdigest() != lib["md5"]:
print("ERROR: Got wrong md5sum for {lib}.".format(lib=lib["name"]), file=stderr)
sys.exit(1)
tar = tarfile.open(tgz_file_path)
tar.extractall(path=SPARK_EC2_LIB_DIR)
tar.close()
os.remove(tgz_file_path)
print(" - Finished downloading {lib}.".format(lib=lib["name"]))
sys.path.insert(1, lib_dir)
# Only PyPI libraries are supported.
external_libs = [
{
"name": "boto",
"version": "2.47.0",
"url": "https://pypi.python.org/packages/bc/ee/e674c01b10972765511705dc77b824b550646a30994cbc428087c4910ac3/boto-2.47.0.tar.gz#md5=c7ed986a6f369fe93f04ec62d16299ac",
"md5": "c7ed986a6f369fe93f04ec62d16299ac"
}
]
setup_external_libs(external_libs)
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
class UsageError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
prog="spark-ec2",
version="%prog {v}".format(v=SPARK_EC2_VERSION),
usage="%prog [options] <action> <cluster_name>\n\n"
+ "<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves")
parser.add_option(
"-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-w", "--wait", type="int",
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-p", "--profile", default=None,
help="If you have multiple profiles (AWS or boto config), you can configure " +
"additional, named profiles by using this option (default: %default)")
parser.add_option(
"-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default="us-east-1",
help="EC2 region used to launch instances in, or to find them in (default: %default)")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies) (default: a single zone chosen at random)")
parser.add_option(
"-a", "--ami",
help="Amazon Machine Image ID to use")
parser.add_option(
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
parser.add_option(
"--spark-git-repo",
default=DEFAULT_SPARK_GITHUB_REPO,
help="Github repo from which to checkout supplied commit hash (default: %default)")
parser.add_option(
"--spark-ec2-git-repo",
default=DEFAULT_SPARK_EC2_GITHUB_REPO,
help="Github repo from which to checkout spark-ec2 (default: %default)")
parser.add_option(
"--spark-ec2-git-branch",
default=DEFAULT_SPARK_EC2_BRANCH,
help="Github repo branch of spark-ec2 to use (default: %default)")
parser.add_option(
"--deploy-root-dir",
default=None,
help="A directory to copy into / on the first master. " +
"Must be absolute. Note that a trailing slash is handled as per rsync: " +
"If you omit it, the last directory of the --deploy-root-dir path will be created " +
"in / before copying its contents. If you append the trailing slash, " +
"the directory is not created and its contents are copied directly into /. " +
"(default: %default).")
parser.add_option(
"--hadoop-major-version", default="yarn",
help="Major version of Hadoop. Valid options are 1 (Hadoop 1.0.4), 2 (CDH 4.2.0), yarn " +
"(Hadoop 2.4.0) (default: %default)")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=200,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="gp2",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0. " +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--placement-group", type="string", default=None,
help="Which placement group to try and launch " +
"instances into. Assumes placement group is already " +
"created.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option(
"-u", "--user", default="root",
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES. Not used if YARN " +
"is used as Hadoop major version (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMIs interpret this as an initialization script)")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--additional-tags", type="string", default="",
help="Additional tags to set on the machines; tags are comma-separated, while name and " +
"value are colon separated; ex: \"Task:MySparkProject,Env:production\"")
parser.add_option(
"--tag-volumes", action="store_true", default=False,
help="Apply the tags given in --additional-tags to any EBS volumes " +
"attached to master and slave instances.")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
parser.add_option(
"--subnet-id", default=None,
help="VPC subnet to launch instances in")
parser.add_option(
"--vpc-id", default=None,
help="VPC to launch instances in")
parser.add_option(
"--private-ips", action="store_true", default=False,
help="Use private IPs for instances rather than public if VPC/subnet " +
"requires that.")
parser.add_option(
"--instance-initiated-shutdown-behavior", default="stop",
choices=["stop", "terminate"],
help="Whether instances should terminate when shut down or just stop")
parser.add_option(
"--instance-profile-name", default=None,
help="IAM profile name to launch instances under")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
# If there is no boto config, check aws credentials
if not os.path.isfile(home_dir + '/.aws/credentials'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print("ERROR: The environment variable AWS_ACCESS_KEY_ID must be set",
file=stderr)
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print("ERROR: The environment variable AWS_SECRET_ACCESS_KEY must be set",
file=stderr)
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name, vpc_id):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print("Creating security group " + name)
return conn.create_security_group(name, "Spark EC2 group", vpc_id)
def validate_spark_hadoop_version(spark_version, hadoop_version):
if "." in spark_version:
parts = spark_version.split(".")
if parts[0].isdigit():
spark_major_version = float(parts[0])
if spark_major_version > 1.0 and hadoop_version != "yarn":
print("Spark version: {v}, does not support Hadoop version: {hv}".
format(v=spark_version, hv=hadoop_version), file=stderr)
sys.exit(1)
else:
print("Invalid Spark version: {v}".format(v=spark_version), file=stderr)
sys.exit(1)
def get_validate_spark_version(version, repo):
if "." in version:
# Remove leading v to handle inputs like v1.5.0
version = version.lstrip("v")
if version not in VALID_SPARK_VERSIONS:
print("Don't know about Spark version: {v}".format(v=version), file=stderr)
sys.exit(1)
return version
else:
github_commit_url = "{repo}/commit/{commit_hash}".format(repo=repo, commit_hash=version)
request = Request(github_commit_url)
request.get_method = lambda: 'HEAD'
try:
response = urlopen(request)
except HTTPError as e:
print("Couldn't validate Spark commit: {url}".format(url=github_commit_url),
file=stderr)
print("Received HTTP response code of {code}.".format(code=e.code), file=stderr)
sys.exit(1)
return version
# Source: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
# Last Updated: 2015-06-19
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
EC2_INSTANCE_TYPES = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.large": "hvm",
"c3.xlarge": "hvm",
"c3.2xlarge": "hvm",
"c3.4xlarge": "hvm",
"c3.8xlarge": "hvm",
"c4.large": "hvm",
"c4.xlarge": "hvm",
"c4.2xlarge": "hvm",
"c4.4xlarge": "hvm",
"c4.8xlarge": "hvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"d2.xlarge": "hvm",
"d2.2xlarge": "hvm",
"d2.4xlarge": "hvm",
"d2.8xlarge": "hvm",
"g2.2xlarge": "hvm",
"g2.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.xlarge": "hvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m3.medium": "hvm",
"m3.large": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"m4.large": "hvm",
"m4.xlarge": "hvm",
"m4.2xlarge": "hvm",
"m4.4xlarge": "hvm",
"m4.10xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"r4.large": "hvm",
"r4.xlarge": "hvm",
"r4.2xlarge": "hvm",
"r4.4xlarge": "hvm",
"r4.8xlarge": "hvm",
"r4.16xlarge": "hvm",
"r5.large": "hvm",
"r5.xlarge": "hvm",
"r5.2xlarge": "hvm",
"r5.4xlarge": "hvm",
"r5.8xlarge": "hvm",
"r5.12xlarge": "hvm",
"r5.24xlarge": "hvm",
"r5a.large": "hvm",
"r5a.xlarge": "hvm",
"r5a.2xlarge": "hvm",
"r5a.4xlarge": "hvm",
"r5a.12xlarge": "hvm",
"r5a.24xlarge": "hvm",
"t1.micro": "pvm",
"t2.micro": "hvm",
"t2.small": "hvm",
"t2.medium": "hvm",
"t2.large": "hvm",
"x1.16xlarge": "hvm",
"x1.32xlarge": "hvm"
}
def get_tachyon_version(spark_version):
return SPARK_TACHYON_MAP.get(spark_version, "")
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
def get_spark_ami(opts):
if opts.instance_type in EC2_INSTANCE_TYPES:
instance_type = EC2_INSTANCE_TYPES[opts.instance_type]
else:
instance_type = "pvm"
print("Don't recognize %s, assuming type is pvm" % opts.instance_type, file=stderr)
# URL prefix from which to fetch AMI information
ami_prefix = "{r}/{b}/ami-list".format(
r=opts.spark_ec2_git_repo.replace("https://github.com", "https://raw.github.com", 1),
b=opts.spark_ec2_git_branch)
ami_path = "%s/%s/%s" % (ami_prefix, opts.region, instance_type)
reader = codecs.getreader("ascii")
try:
ami = reader(urlopen(ami_path)).read().strip()
except:
print("Could not resolve AMI at: " + ami_path, file=stderr)
sys.exit(1)
print("Spark AMI: " + ami)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print("ERROR: Must provide an identity file (-i) for ssh connections.", file=stderr)
sys.exit(1)
if opts.key_pair is None:
print("ERROR: Must provide a key pair name (-k) to use on instances.", file=stderr)
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print("Setting up security groups... customized")
master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
authorized_address = opts.authorized_address
ssh_strict_group_name = 'ssh-strict-sg';
ssh_strict_group = get_or_make_group(conn, ssh_strict_group_name, opts.vpc_id)
if master_group.rules == []: # Group was just now created
if opts.vpc_id is None:
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
else:
# master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
# src_group=master_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=master_group)
# master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
# src_group=master_group)
# master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
# src_group=slave_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=slave_group)
# master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
# src_group=slave_group)
master_group.authorize('tcp', 8080, 8081, authorized_address) # spark master,worker ui
master_group.authorize('tcp', 18080, 18080, authorized_address) # spark history ui
# master_group.authorize('tcp', 19999, 19999, authorized_address) # tachyon
# master_group.authorize('tcp', 50030, 50030, authorized_address) # mapred jobtracker
# master_group.authorize('tcp', 50070, 50070, authorized_address) # hdfs / dfs health
# master_group.authorize('tcp', 60070, 60070, authorized_address) # ???
master_group.authorize('tcp', 4040, 4045, authorized_address) # ??? spark running job/application ui
## Rstudio (GUI for R) needs port 8787 for web access
# master_group.authorize('tcp', 8787, 8787, authorized_address)
## HDFS NFS gateway requires 111,2049,4242 for tcp & udp
# master_group.authorize('tcp', 111, 111, authorized_address)
# master_group.authorize('udp', 111, 111, authorized_address)
# master_group.authorize('tcp', 2049, 2049, authorized_address)
# master_group.authorize('udp', 2049, 2049, authorized_address)
# master_group.authorize('tcp', 4242, 4242, authorized_address)
# master_group.authorize('udp', 4242, 4242, authorized_address)
# RM in YARN mode uses 8088
# master_group.authorize('tcp', 8088, 8088, authorized_address) # hadoop cluster ui
# if opts.ganglia:
# master_group.authorize('tcp', 5080, 5080, authorized_address)
if slave_group.rules == []: # Group was just now created
if opts.vpc_id is None:
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
else:
# slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
# src_group=master_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=master_group)
# slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
# src_group=master_group)
# slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
# src_group=slave_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=slave_group)
# slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
# src_group=slave_group)
# slave_group.authorize('tcp', 8080, 8081, authorized_address)
# slave_group.authorize('tcp', 50060, 50060, authorized_address)
# slave_group.authorize('tcp', 50075, 50075, authorized_address)
# slave_group.authorize('tcp', 60060, 60060, authorized_address)
# slave_group.authorize('tcp', 60075, 60075, authorized_address)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print("ERROR: There are already instances running in group %s or %s" %
(master_group.name, slave_group.name), file=stderr)
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
# we use group ids to work around https://github.com/boto/boto/issues/350
additional_group_ids = []
if opts.additional_security_group:
additional_group_ids = [sg.id
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
# our custom security group
print("Adding custom security group...")
additional_group_ids.append(ssh_strict_group.id)
print("Launching instances...")
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print("Could not find AMI " + opts.ami, file=stderr)
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.ascii_letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_profile_name=opts.instance_profile_name)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print("Waiting for spot instances to be granted...")
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print("All %d slaves granted" % opts.slaves)
reservations = conn.get_all_reservations(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print("%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves))
except:
print("Canceling spot instance requests")
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print(("WARNING: %d instances are still running" % running), file=stderr)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
instance_profile_name=opts.instance_profile_name)
slave_nodes += slave_res.instances
print("Launched {s} slave{plural_s} in {z}, regid = {r}".format(
s=num_slaves_this_zone,
plural_s=('' if num_slaves_this_zone == 1 else 's'),
z=zone,
r=slave_res.id))
i += 1
# Launch or resume masters
if existing_masters:
print("Starting master...")
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(
key_name=opts.key_pair,
security_group_ids=[master_group.id] + additional_group_ids,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
instance_profile_name=opts.instance_profile_name)
master_nodes = master_res.instances
print("Launched master in %s, regid = %s" % (zone, master_res.id))
# This wait time corresponds to SPARK-4983
print("Waiting for AWS to propagate instance metadata...")
time.sleep(15)
# Give the instances descriptive names and set additional tags
additional_tags = {}
if opts.additional_tags.strip():
additional_tags = dict(
map(str.strip, tag.split(':', 1)) for tag in opts.additional_tags.split(',')
)
print('Applying tags to master nodes')
for master in master_nodes:
master.add_tags(
dict(additional_tags, Name='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
)
print('Applying tags to slave nodes')
for slave in slave_nodes:
slave.add_tags(
dict(additional_tags, Name='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
)
if opts.tag_volumes:
if len(additional_tags) > 0:
print('Applying tags to volumes')
all_instance_ids = [x.id for x in master_nodes + slave_nodes]
volumes = conn.get_all_volumes(filters={'attachment.instance-id': all_instance_ids})
for v in volumes:
v.add_tags(additional_tags)
else:
print('--tag-volumes has no effect without --additional-tags')
# Return all the instances
return (master_nodes, slave_nodes)
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
"""
Get the EC2 instances in an existing cluster if available.
Returns a tuple of lists of EC2 instance objects for the masters and slaves.
"""
print("Searching for existing cluster {c} in region {r}...".format(
c=cluster_name, r=opts.region))
def get_instances(group_names):
"""
Get all non-terminated instances that belong to any of the provided security groups.
EC2 reservation filters and instance states are documented here:
http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
"""
reservations = conn.get_all_reservations(
filters={"instance.group-name": group_names})
instances = itertools.chain.from_iterable(r.instances for r in reservations)
return [i for i in instances if i.state not in ["shutting-down", "terminated"]]
master_instances = get_instances([cluster_name + "-master"])
slave_instances = get_instances([cluster_name + "-slaves"])
if any((master_instances, slave_instances)):
print("Found {m} master{plural_m}, {s} slave{plural_s}.".format(
m=len(master_instances),
plural_m=('' if len(master_instances) == 1 else 's'),
s=len(slave_instances),
plural_s=('' if len(slave_instances) == 1 else 's')))
if not master_instances and die_on_error:
print("ERROR: Could not find a master for cluster {c} in region {r}.".format(
c=cluster_name, r=opts.region), file=sys.stderr)
sys.exit(1)
return (master_instances, slave_instances)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = get_dns_name(master_nodes[0], opts.private_ips)
if deploy_ssh_key:
print("Generating cluster's SSH key on master...")
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print("Transferring cluster's SSH key to slaves...")
for slave in slave_nodes:
slave_address = get_dns_name(slave, opts.private_ips)
print(slave_address)
ssh_write(slave_address, opts, ['tar', 'x'], dot_ssh_tar)
# orginal full set of modules
# modules = ['spark', 'ephemeral-hdfs', 'persistent-hdfs',
# 'mapreduce', 'spark-standalone', 'tachyon', 'rstudio']
# install minimal set of modules
modules = ['spark', 'ephemeral-hdfs', 'spark-standalone']
if opts.hadoop_major_version == "1":
modules = list(filter(lambda x: x != "mapreduce", modules))
if opts.ganglia:
modules.append('ganglia')
# Clear SPARK_WORKER_INSTANCES if running on YARN
if opts.hadoop_major_version == "yarn":
opts.worker_instances = ""
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
print("Cloning spark-ec2 scripts from {r}/tree/{b} on master...".format(
r=opts.spark_ec2_git_repo, b=opts.spark_ec2_git_branch))
ssh(
host=master,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone {r} -b {b} spark-ec2".format(r=opts.spark_ec2_git_repo,
b=opts.spark_ec2_git_branch)
)
print("Deploying files to master...")
deploy_files(
conn=conn,
root_dir=SPARK_EC2_DIR + "/" + "deploy.generic",
opts=opts,
master_nodes=master_nodes,
slave_nodes=slave_nodes,
modules=modules
)
if opts.deploy_root_dir is not None:
print("Deploying {s} to master...".format(s=opts.deploy_root_dir))
deploy_user_files(
root_dir=opts.deploy_root_dir,
opts=opts,
master_nodes=master_nodes
)
print("Running setup on master...")
setup_spark_cluster(master, opts)
print("Done!")
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
print("Spark standalone cluster started at http://%s:8080" % master)
if opts.ganglia:
print("Ganglia started at http://%s:5080/ganglia" % master)
def is_ssh_available(host, opts, print_ssh_output=True):
"""
Check if SSH is available on a host.
"""
s = subprocess.Popen(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT # we pipe stderr through stdout to preserve output order
)
cmd_output = s.communicate()[0] # [1] is stderr, which we redirected to stdout
if s.returncode != 0 and print_ssh_output:
# extra leading newline is for spacing in wait_for_cluster_state()
print(textwrap.dedent("""\n
Warning: SSH connection error. (This could be temporary.)
Host: {h}
SSH return code: {r}
SSH output: {o}
""").format(
h=host,
r=s.returncode,
o=cmd_output.strip()
))
return s.returncode == 0
def is_cluster_ssh_available(cluster_instances, opts):
"""
Check if SSH is available on all the instances in a cluster.
"""
for i in cluster_instances:
dns_name = get_dns_name(i, opts.private_ips)
if not is_ssh_available(host=dns_name, opts=opts):
return False
else:
return True
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state):
"""
Wait for all the instances in the cluster to reach a designated state.
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
start_time = datetime.now()
num_attempts = 0
while True:
time.sleep(5 * num_attempts) # seconds
for i in cluster_instances:
i.update()
max_batch = 100
statuses = []
for j in xrange(0, len(cluster_instances), max_batch):
batch = [i.id for i in cluster_instances[j:j + max_batch]]
statuses.extend(conn.get_all_instance_status(instance_ids=batch))
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
all(s.system_status.status == 'ok' for s in statuses) and \
all(s.instance_status.status == 'ok' for s in statuses) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
end_time = datetime.now()
print("Cluster is now in '{s}' state. Waited {t} seconds.".format(
s=cluster_state,
t=(end_time - start_time).seconds
))
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2015-06-19
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.large": 2,
"c3.xlarge": 2,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c4.large": 0,
"c4.xlarge": 0,
"c4.2xlarge": 0,
"c4.4xlarge": 0,
"c4.8xlarge": 0,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"d2.xlarge": 3,
"d2.2xlarge": 6,
"d2.4xlarge": 12,
"d2.8xlarge": 24,
"g2.2xlarge": 1,
"g2.8xlarge": 2,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.xlarge": 1,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"m1.small": 1,
"m1.medium": 1,
"m1.large": 2,
"m1.xlarge": 4,
"m2.xlarge": 1,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m3.medium": 1,
"m3.large": 1,
"m3.xlarge": 2,
"m3.2xlarge": 2,
"m4.large": 0,
"m4.xlarge": 0,
"m4.2xlarge": 0,
"m4.4xlarge": 0,
"m4.10xlarge": 0,
"r3.large": 1,
"r3.xlarge": 1,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"r4.large": 0,
"r4.xlarge": 0,
"r4.2xlarge": 0,
"r4.4xlarge": 0,
"r4.8xlarge": 0,
"r4.16xlarge": 0,
"r5.large": 0,
"r5.xlarge": 0,
"r5.2xlarge": 0,
"r5.4xlarge": 0,
"r5.8xlarge": 0,
"r5.12xlarge": 0,
"r5.24xlarge": 0,
"r5a.large": 0,
"r5a.xlarge": 0,
"r5a.2xlarge": 0,
"r5a.4xlarge": 0,
"r5a.12xlarge": 0,
"r5a.24xlarge": 0,
"t1.micro": 0,
"t2.micro": 0,
"t2.small": 0,
"t2.medium": 0,
"t2.large": 0,
"x1.16xlarge": 1,
"x1.32xlarge": 2,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type, file=stderr)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
#
# root_dir should be an absolute path to the directory with the files we want to deploy.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = get_dns_name(master_nodes[0], opts.private_ips)
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built Spark deploy
spark_v = get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
validate_spark_hadoop_version(spark_v, opts.hadoop_major_version)
tachyon_v = get_tachyon_version(spark_v)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
tachyon_v = ""
if tachyon_v == "":
print("No valid Tachyon version found; Tachyon won't be set up")
try:
modules.remove("tachyon")
except ValueError:
pass # ignore
master_addresses = [get_dns_name(i, opts.private_ips) for i in master_nodes]
slave_addresses = [get_dns_name(i, opts.private_ips) for i in slave_nodes]
worker_instances_str = "%d" % opts.worker_instances if opts.worker_instances else ""
template_vars = {
"master_list": '\n'.join(master_addresses),
"active_master": active_master,
"slave_list": '\n'.join(slave_addresses),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"tachyon_version": tachyon_v,
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": worker_instances_str,
"spark_master_opts": opts.master_opts
}
if opts.copy_aws_credentials:
template_vars["aws_access_key_id"] = conn.aws_access_key_id
template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
else:
template_vars["aws_access_key_id"] = ""
template_vars["aws_secret_access_key"] = ""
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
# Deploy a given local directory to a cluster, WITHOUT parameter substitution.
# Note that unlike deploy_files, this works for binary files.
# Also, it is up to the user to add (or not) the trailing slash in root_dir.
# Files are only deployed to the first master instance in the cluster.
#
# root_dir should be an absolute path.
def deploy_user_files(root_dir, opts, master_nodes):
active_master = get_dns_name(master_nodes[0], opts.private_ips)
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s" % root_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n"
"Please check that you have provided the correct --identity-file and "
"--key-pair parameters and try again.".format(host))
else:
raise e
print("Error executing remote command, retrying after 30 seconds: {0}".format(e),
file=stderr)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print("Error {0} while executing remote command, retrying after 30 seconds".
format(status), file=stderr)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total // num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
# Gets the IP address, taking into account the --private-ips flag
def get_ip_address(instance, private_ips=False):
ip = instance.ip_address if not private_ips else \
instance.private_ip_address
return ip
# Gets the DNS name, taking into account the --private-ips flag
def get_dns_name(instance, private_ips=False):
dns = instance.public_dns_name if not private_ips else \
instance.private_ip_address
if not dns:
raise UsageError("Failed to determine hostname of {0}.\n"
"Please check that you provided --private-ips if "
"necessary".format(instance))
return dns
def real_main():
(opts, action, cluster_name) = parse_args()
# Input parameter validation
spark_v = get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
validate_spark_hadoop_version(spark_v, opts.hadoop_major_version)
if opts.wait is not None:
# NOTE: DeprecationWarnings are silent in 2.7+ by default.
# To show them, run Python with the -Wdefault switch.
# See: https://docs.python.org/3.5/whatsnew/2.7.html
warnings.warn(
"This option is deprecated and has no effect. "
"spark-ec2 automatically waits as long as necessary for clusters to start up.",
DeprecationWarning
)
if opts.identity_file is not None:
if not os.path.exists(opts.identity_file):
print("ERROR: The identity file '{f}' doesn't exist.".format(f=opts.identity_file),
file=stderr)
sys.exit(1)
file_mode = os.stat(opts.identity_file).st_mode
if not (file_mode & S_IRUSR) or not oct(file_mode)[-2:] == '00':
print("ERROR: The identity file must be accessible only by you.", file=stderr)
print('You can fix this with: chmod 400 "{f}"'.format(f=opts.identity_file),
file=stderr)
sys.exit(1)
if opts.instance_type not in EC2_INSTANCE_TYPES:
print("Warning: Unrecognized EC2 instance type for instance-type: {t}".format(
t=opts.instance_type), file=stderr)
if opts.master_instance_type != "":
if opts.master_instance_type not in EC2_INSTANCE_TYPES:
print("Warning: Unrecognized EC2 instance type for master-instance-type: {t}".format(
t=opts.master_instance_type), file=stderr)
# Since we try instance types even if we can't resolve them, we check if they resolve first
# and, if they do, see if they resolve to the same virtualization type.
if opts.instance_type in EC2_INSTANCE_TYPES and \
opts.master_instance_type in EC2_INSTANCE_TYPES:
if EC2_INSTANCE_TYPES[opts.instance_type] != \
EC2_INSTANCE_TYPES[opts.master_instance_type]:
print("Error: spark-ec2 currently does not support having a master and slaves "
"with different AMI virtualization types.", file=stderr)
print("master instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.master_instance_type]), file=stderr)
print("slave instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.instance_type]), file=stderr)
sys.exit(1)
if opts.ebs_vol_num > 8:
print("ebs-vol-num cannot be greater than 8", file=stderr)
sys.exit(1)
# Prevent breaking ami_prefix (/, .git and startswith checks)
# Prevent forks with non spark-ec2 names for now.
if opts.spark_ec2_git_repo.endswith("/") or \
opts.spark_ec2_git_repo.endswith(".git") or \
not opts.spark_ec2_git_repo.startswith("https://github.com") or \
not opts.spark_ec2_git_repo.endswith("spark-ec2"):
print("spark-ec2-git-repo must be a github repo and it must not have a trailing / or .git. "
"Furthermore, we currently only support forks named spark-ec2.", file=stderr)
sys.exit(1)
if not (opts.deploy_root_dir is None or
(os.path.isabs(opts.deploy_root_dir) and
os.path.isdir(opts.deploy_root_dir) and
os.path.exists(opts.deploy_root_dir))):
print("--deploy-root-dir must be an absolute path to a directory that exists "
"on the local file system", file=stderr)
sys.exit(1)
try:
if opts.profile is None:
conn = ec2.connect_to_region(opts.region)
else:
conn = ec2.connect_to_region(opts.region, profile_name=opts.profile)
except Exception as e:
print((e), file=stderr)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print("ERROR: You have to start at least 1 slave", file=sys.stderr)
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
if any(master_nodes + slave_nodes):
print("The following instances will be terminated:")
for inst in master_nodes + slave_nodes:
print("> %s" % get_dns_name(inst, opts.private_ips))
print("ALL DATA ON ALL NODES WILL BE LOST!!")
msg = "Are you sure you want to destroy the cluster {c}? (y/N) ".format(c=cluster_name)
response = raw_input(msg)
if response == "y":
print("Terminating master...")
for inst in master_nodes:
inst.terminate()
print("Terminating slaves...")
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated'
)
print("Deleting security groups (this will take some time)...")
attempt = 1
while attempt <= 3:
print("Attempt %d" % attempt)
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print("Deleting rules in security group " + group.name)
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
# It is needed to use group_id to make it work with VPC
conn.delete_security_group(group_id=group.id)
print("Deleted security group %s" % group.name)
except boto.exception.EC2ResponseError:
success = False
print("Failed to delete security group %s" % group.name)
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print("Failed to delete all security groups after 3 tries.")
print("Try re-running in a few minutes.")
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
if not master_nodes[0].public_dns_name and not opts.private_ips:
print("Master has no public DNS name. Maybe you meant to specify --private-ips?")
else:
master = get_dns_name(master_nodes[0], opts.private_ips)
print("Logging into master " + master + "...")
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Rebooting slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print("Rebooting " + inst.id)
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
if not master_nodes[0].public_dns_name and not opts.private_ips:
print("Master has no public DNS name. Maybe you meant to specify --private-ips?")
else:
print(get_dns_name(master_nodes[0], opts.private_ips))
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Stopping master...")
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print("Stopping slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print("Starting slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print("Starting master...")
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
# Determine types of running instances
existing_master_type = master_nodes[0].instance_type
existing_slave_type = slave_nodes[0].instance_type
# Setting opts.master_instance_type to the empty string indicates we
# have the same instance type for the master and the slaves
if existing_master_type == existing_slave_type:
existing_master_type = ""
opts.master_instance_type = existing_master_type
opts.instance_type = existing_slave_type
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print("Invalid action: %s" % action, file=stderr)
sys.exit(1)
def main():
try:
real_main()
except UsageError as e:
print("\nError:\n", e, file=stderr)
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
| apache-2.0 | 808,082,597,537,682,300 | 38.779733 | 173 | 0.574553 | false |
grembo/ice | python/test/Ice/objects/TestI.py | 1 | 5256 | # **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import Ice, Test
class BI(Test.B):
def __init__(self):
self.preMarshalInvoked = False
self.postUnmarshalInvoked = False
def ice_preMarshal(self):
self.preMarshalInvoked = True
def ice_postUnmarshal(self):
self.postUnmarshalInvoked = True
class CI(Test.C):
def __init__(self):
self.preMarshalInvoked = False
self.postUnmarshalInvoked = False
def ice_preMarshal(self):
self.preMarshalInvoked = True
def ice_postUnmarshal(self):
self.postUnmarshalInvoked = True
class DI(Test.D):
def __init__(self):
self.preMarshalInvoked = False
self.postUnmarshalInvoked = False
def ice_preMarshal(self):
self.preMarshalInvoked = True
def ice_postUnmarshal(self):
self.postUnmarshalInvoked = True
class EI(Test.E):
def __init__(self):
Test.E.__init__(self, 1, "hello")
def checkValues(self, current=None):
return self._i == 1 and self._s == "hello"
class FI(Test.F):
def __init__(self, e=None):
Test.F.__init__(self, e, e)
def checkValues(self, current=None):
return self._e1 != None and self._e1 == self.e2
class II(Ice.InterfaceByValue):
def __init__(self):
Ice.InterfaceByValue.__init__(self, "::Test::I")
class JI(Ice.InterfaceByValue):
def __init__(self):
Ice.InterfaceByValue.__init__(self, "::Test::J")
class HI(Test.H):
pass
class InitialI(Test.Initial):
def __init__(self, adapter):
self._adapter = adapter
self._b1 = BI()
self._b2 = BI()
self._c = CI()
self._d = DI()
self._e = EI()
self._f = FI(self._e)
self._b1.theA = self._b2 # Cyclic reference to another B
self._b1.theB = self._b1 # Self reference.
self._b1.theC = None # Null reference.
self._b2.theA = self._b2 # Self reference, using base.
self._b2.theB = self._b1 # Cyclic reference to another B
self._b2.theC = self._c # Cyclic reference to a C.
self._c.theB = self._b2 # Cyclic reference to a B.
self._d.theA = self._b1 # Reference to a B.
self._d.theB = self._b2 # Reference to a B.
self._d.theC = None # Reference to a C.
def shutdown(self, current=None):
self._adapter.getCommunicator().shutdown()
def getB1(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
return self._b1
def getB2(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
return self._b2
def getC(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
return self._c
def getD(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
self._d.preMarshalInvoked = False
return self._d
def getE(self, current=None):
return self._e
def getF(self, current=None):
return self._f
def setRecursive(self, r, current):
pass
def supportsClassGraphDepthMax(self, current):
return True
def getMB(self, current):
return Test.Initial.GetMBMarshaledResult(self._b1, current)
def getAMDMB(self, current):
return Ice.Future.completed(Test.Initial.GetAMDMBMarshaledResult(self._b1, current))
def getAll(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
self._d.preMarshalInvoked = False
return (self._b1, self._b2, self._c, self._d)
def getI(self, current=None):
return II()
def getJ(self, current=None):
return JI()
def getH(self, current=None):
return HI()
def getD1(self, d1, current=None):
return d1
def throwEDerived(self, current=None):
raise Test.EDerived(Test.A1("a1"), Test.A1("a2"), Test.A1("a3"), Test.A1("a4"))
def setI(self, i, current=None):
pass
def opBaseSeq(self, inSeq, current=None):
return (inSeq, inSeq)
def getCompact(self, current=None):
return Test.CompactExt()
def getInnerA(self, current=None):
return Test.Inner.A(self._b1)
def getInnerSubA(self, current=None):
return Test.Inner.Sub.A(Test.Inner.A(self._b1))
def throwInnerEx(self, current=None):
raise Test.Inner.Ex("Inner::Ex")
def throwInnerSubEx(self, current=None):
raise Test.Inner.Sub.Ex("Inner::Sub::Ex")
class UnexpectedObjectExceptionTestI(Test.UnexpectedObjectExceptionTest):
def op(self, current=None):
return Test.AlsoEmpty()
| gpl-2.0 | 8,795,476,075,585,207,000 | 27.410811 | 92 | 0.599125 | false |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/kmpfit_compare_wei_unwei.py | 1 | 3993 | #!/usr/bin/env python
#------------------------------------------------------------
# Purpose: Demonstrate quality improvement weighted vs
# unweighted fit for Wolberg data. Wolberg's
# best fit parameters for a weighted fit is not
# accurate (a,b) = (1.8926, 4.9982)
# Improved values are derived from the analytical
# solutions and kmpfit: (a,b) = (1.8705, 5.0290)
#
# Vog, 01 Jan 2012
#------------------------------------------------------------
import numpy
from numpy.random import normal
from kapteyn import kmpfit
def model(p, x):
a, b = p
return a + b*x
def residuals(p, my_arrays):
x, y, err = my_arrays
a, b = p
return (y-model(p,x))/err
x = numpy.array([1.0, 2, 3, 4, 5, 6, 7])
y = numpy.array([6.9, 11.95, 16.8, 22.5, 26.2, 33.5, 41.0])
N = len(y)
err = numpy.ones(N)
errw = numpy.array([0.05, 0.1, 0.2, 0.5, 0.8, 1.5, 4.0])
print("Data x:", x)
print("Data y:", y)
print("Errors:", errw)
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, err))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit unit weighting wi=1.0:")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Covariance matrix:")
print(fitobj.covar)
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, 10*err))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit with (scaled) equal weights wi=10*1.0:")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Covariance matrix:")
print(fitobj.covar)
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errw))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit with weights:")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Minimum reduced chi^2: ", fitobj.rchi2_min)
print("Covariance matrix:")
print(fitobj.covar)
rchi2 = fitobj.rchi2_min # Store for future scaling purposes
errw10 = errw * 10.0
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errw10))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit with scaled individual errors (factor=10):")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Minimum reduced chi^2: ", fitobj.rchi2_min)
print("Covariance matrix:")
print(fitobj.covar)
scaled_errw = errw * numpy.sqrt(rchi2)
print("""\n\nNew array with measurement errors, scaled with factor %g to give
a reduced chi-squared of 1.0:"""%rchi2)
print(scaled_errw)
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, scaled_errw))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit with scaled individual errors to force red_chi2=1:")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Minimum reduced chi^2: ", fitobj.rchi2_min)
print("Covariance matrix:")
print(fitobj.covar) | bsd-3-clause | 6,116,116,169,382,425,000 | 41.489362 | 79 | 0.611821 | false |
ngageoint/scale | scale/job/test/tasks/test_manager.py | 1 | 5616 | from __future__ import unicode_literals
import datetime
import django
from django.test import TestCase
from django.utils.timezone import now
import job.test.utils as job_test_utils
from job.tasks.base_task import Task
from job.tasks.manager import TaskManager
from job.tasks.node_task import NodeTask
from job.tasks.update import TaskStatusUpdate
from node.resources.node_resources import NodeResources
# Non-abstract class to test implementation of base NodeTask class
class ImplementedNodeTask(NodeTask):
def __init__(self, task_id, task_name, agent_id):
"""Constructor
"""
super(ImplementedNodeTask, self).__init__(task_id, task_name, agent_id)
self.task_type = 'impl-node-task'
self.title = 'Implemented Node Task'
self.description = 'Test description'
def get_resources(self):
"""Returns the resources that are required/have been scheduled for this task
:returns: The scheduled resources for this task
:rtype: :class:`node.resources.node_resources.NodeResources`
"""
return NodeResources()
# Non-abstract class to test implementation of base SystemTask class
from scheduler.tasks.system_task import SystemTask
class ImplementedSystemTask(SystemTask):
def __init__(self, task_id, task_name, agent_id):
"""Constructor
"""
super(ImplementedSystemTask, self).__init__(task_id, task_name)
self.agent_id = agent_id
self.task_type = 'impl-system-task'
self.title = 'Implemented System Task'
self.description = 'Test description'
def get_resources(self):
"""Returns the resources that are required/have been scheduled for this task
:returns: The scheduled resources for this task
:rtype: :class:`node.resources.node_resources.NodeResources`
"""
return NodeResources()
# Non-abstract class to test implementation of base Task class
class ImplementedTask(Task):
def get_resources(self):
"""Returns the resources that are required/have been scheduled for this task
:returns: The scheduled resources for this task
:rtype: :class:`node.resources.node_resources.NodeResources`
"""
return NodeResources()
class TestTaskManager(TestCase):
"""Tests the TaskManager class"""
def setUp(self):
django.setup()
def test_generate_status_json(self):
"""Tests calling TaskManager.generate_status_json()"""
task_id = 'task_1'
task_name = 'My Task'
agent_id = 'agent_1'
task_1 = ImplementedNodeTask(task_id, task_name, agent_id)
task_id = 'task_2'
task_name = 'My Task'
agent_id = 'agent_1'
task_2 = ImplementedNodeTask(task_id, task_name, agent_id)
task_id = 'task_3'
task_name = 'My Task'
agent_id = 'agent_1'
task_3 = ImplementedSystemTask(task_id, task_name, agent_id)
task_id = 'task_4'
task_name = 'My Task'
agent_id = 'agent_1'
task_4 = ImplementedSystemTask(task_id, task_name, agent_id)
when = now()
manager = TaskManager()
manager.launch_tasks([task_1, task_2, task_3, task_4], when)
nodes_list = [{'agent_id': 'agent_1'}]
manager.generate_status_json(nodes_list)
self.assertEqual(nodes_list[0]['node_tasks'][0]['type'], 'impl-node-task')
self.assertEqual(nodes_list[0]['node_tasks'][0]['count'], 2)
self.assertEqual(nodes_list[0]['system_tasks'][0]['type'], 'impl-system-task')
self.assertEqual(nodes_list[0]['system_tasks'][0]['count'], 2)
def test_handle_task_update(self):
"""Tests calling TaskManager.handle_task_update()"""
task_id = 'task_1'
task_name = 'My Task'
agent_id = 'agent_1'
task_1 = ImplementedTask(task_id, task_name, agent_id)
when_launched = now()
manager = TaskManager()
manager.launch_tasks([task_1], when_launched)
when_finished = datetime.timedelta(seconds=1)
update_1 = job_test_utils.create_task_status_update(task_1.id, task_1.agent_id, TaskStatusUpdate.FINISHED,
when=when_finished)
manager.handle_task_update(update_1)
self.assertTrue(task_1.has_ended)
self.assertEqual(task_1._ended, when_finished)
update_2 = job_test_utils.create_task_status_update('task_2', 'New Agent', TaskStatusUpdate.RUNNING, when=now())
manager.handle_task_update(update_2) # Should ignore, no error
def test_launch_tasks(self):
"""Tests calling TaskManager.launch_tasks()"""
task_id = 'task_1'
task_name = 'My Task'
agent_id = 'agent_1'
task_1 = ImplementedTask(task_id, task_name, agent_id)
task_id = 'task_2'
task_name = 'My Task'
agent_id = 'agent_1'
task_2 = ImplementedTask(task_id, task_name, agent_id)
task_id = 'task_3'
task_name = 'My Task'
agent_id = 'agent_2'
task_3 = ImplementedTask(task_id, task_name, agent_id)
when = now()
manager = TaskManager()
# Duplicate task_3 to test re-launching duplicate tasks
manager.launch_tasks([task_1, task_2, task_3, task_3], when)
self.assertTrue(task_1.has_been_launched)
self.assertEqual(task_1._launched, when)
self.assertTrue(task_2.has_been_launched)
self.assertEqual(task_2._launched, when)
self.assertTrue(task_3.has_been_launched)
self.assertEqual(task_3._launched, when)
| apache-2.0 | 7,182,914,450,638,796,000 | 32.035294 | 120 | 0.631766 | false |
keen99/SickRage | sickbeard/versionChecker.py | 1 | 31984 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import platform
import subprocess
import re
import urllib
import tarfile
import stat
import traceback
import db
import time
import sickbeard
from sickbeard import notifiers
from sickbeard import ui
from sickbeard import logger, helpers
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
import requests
from requests.exceptions import RequestException
import shutil
import shutil_custom
shutil.copyfile = shutil_custom.copyfile_custom
class CheckVersion():
"""
Version check class meant to run as a thread object with the sr scheduler.
"""
def __init__(self):
self.updater = None
self.install_type = None
self.amActive = False
if sickbeard.gh:
self.install_type = self.find_install_type()
if self.install_type == 'git':
self.updater = GitUpdateManager()
elif self.install_type == 'source':
self.updater = SourceUpdateManager()
def run(self, force=False):
if self.updater:
# set current branch version
sickbeard.BRANCH = self.get_branch()
if self.check_for_new_version(force):
if sickbeard.AUTO_UPDATE:
logger.log(u"New update found for SickRage, starting auto-updater ...")
ui.notifications.message('New update found for SickRage, starting auto-updater')
if self.run_backup_if_safe() is True:
if sickbeard.versionCheckScheduler.action.update():
logger.log(u"Update was successful!")
ui.notifications.message('Update was successful')
sickbeard.events.put(sickbeard.events.SystemEvent.RESTART)
else:
logger.log(u"Update failed!")
ui.notifications.message('Update failed!')
def run_backup_if_safe(self):
return self.safe_to_update() is True and self._runbackup() is True
def _runbackup(self):
# Do a system backup before update
logger.log(u"Config backup in progress...")
ui.notifications.message('Backup', 'Config backup in progress...')
try:
backupDir = os.path.join(sickbeard.DATA_DIR, 'backup')
if not os.path.isdir(backupDir):
os.mkdir(backupDir)
if self._keeplatestbackup(backupDir) == True and self._backup(backupDir) == True:
logger.log(u"Config backup successful, updating...")
ui.notifications.message('Backup', 'Config backup successful, updating...')
return True
else:
logger.log(u"Config backup failed, aborting update",logger.ERROR)
ui.notifications.message('Backup', 'Config backup failed, aborting update')
return False
except Exception as e:
logger.log('Update: Config backup failed. Error: {0}'.format(ex(e)),logger.ERROR)
ui.notifications.message('Backup', 'Config backup failed, aborting update')
return False
def _keeplatestbackup(self,backupDir=None):
if backupDir:
import glob
files = glob.glob(os.path.join(backupDir,'*.zip'))
if not files:
return True
now = time.time()
newest = files[0], now - os.path.getctime(files[0])
for file in files[1:]:
age = now - os.path.getctime(file)
if age < newest[1]:
newest = file, age
files.remove(newest[0])
for file in files:
os.remove(file)
return True
else:
return False
# TODO: Merge with backup in helpers
def _backup(self,backupDir=None):
if backupDir:
source = [os.path.join(sickbeard.DATA_DIR, 'sickbeard.db'), sickbeard.CONFIG_FILE]
source.append(os.path.join(sickbeard.DATA_DIR, 'failed.db'))
source.append(os.path.join(sickbeard.DATA_DIR, 'cache.db'))
target = os.path.join(backupDir, 'sickrage-' + time.strftime('%Y%m%d%H%M%S') + '.zip')
for (path, dirs, files) in os.walk(sickbeard.CACHE_DIR, topdown=True):
for dirname in dirs:
if path == sickbeard.CACHE_DIR and dirname not in ['images']:
dirs.remove(dirname)
for filename in files:
source.append(os.path.join(path, filename))
if helpers.backupConfigZip(source, target, sickbeard.DATA_DIR):
return True
else:
return False
else:
return False
def safe_to_update(self):
def db_safe(self):
try:
result = self.getDBcompare(sickbeard.BRANCH)
if result == 'equal':
logger.log(u"We can proceed with the update. New update has same DB version", logger.DEBUG)
return True
elif result == 'upgrade':
logger.log(u"We can't proceed with the update. New update has a new DB version. Please manually update", logger.WARNING)
return False
elif result == 'downgrade':
logger.log(u"We can't proceed with the update. New update has a old DB version. It's not possible to downgrade", logger.ERROR)
return False
else:
logger.log(u"We can't proceed with the update. Unable to check remote DB version", logger.ERROR)
return False
except:
logger.log(u"We can't proceed with the update. Unable to compare DB version", logger.ERROR)
return False
def postprocessor_safe(self):
if not sickbeard.autoPostProcesserScheduler.action.amActive:
logger.log(u"We can proceed with the update. Post-Processor is not running", logger.DEBUG)
return True
else:
logger.log(u"We can't proceed with the update. Post-Processor is running", logger.DEBUG)
return False
def showupdate_safe(self):
if not sickbeard.showUpdateScheduler.action.amActive:
logger.log(u"We can proceed with the update. Shows are not being updated", logger.DEBUG)
return True
else:
logger.log(u"We can't proceed with the update. Shows are being updated", logger.DEBUG)
return False
db_safe = db_safe(self)
postprocessor_safe = postprocessor_safe(self)
showupdate_safe = showupdate_safe(self)
if db_safe == True and postprocessor_safe == True and showupdate_safe == True:
logger.log(u"Proceeding with auto update", logger.DEBUG)
return True
else:
logger.log(u"Auto update aborted", logger.DEBUG)
return False
def getDBcompare(self, branchDest):
try:
response = requests.get("https://raw.githubusercontent.com/SICKRAGETV/SickRage/" + str(branchDest) +"/sickbeard/databases/mainDB.py")
response.raise_for_status()
match = re.search(r"MAX_DB_VERSION\s=\s(?P<version>\d{2,3})",response.text)
branchDestDBversion = int(match.group('version'))
myDB = db.DBConnection()
branchCurrDBversion = myDB.checkDBVersion()
if branchDestDBversion > branchCurrDBversion:
return 'upgrade'
elif branchDestDBversion == branchCurrDBversion:
return 'equal'
else:
return 'downgrade'
except RequestException as e:
return 'error'
except Exception as e:
return 'error'
def find_install_type(self):
"""
Determines how this copy of sr was installed.
returns: type of installation. Possible values are:
'win': any compiled windows build
'git': running from source using git
'source': running from source without git
"""
# check if we're a windows build
if sickbeard.BRANCH.startswith('build '):
install_type = 'win'
elif os.path.isdir(ek.ek(os.path.join, sickbeard.PROG_DIR, u'.git')):
install_type = 'git'
else:
install_type = 'source'
return install_type
def check_for_new_version(self, force=False):
"""
Checks the internet for a newer version.
returns: bool, True for new version or False for no new version.
force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
"""
if not self.updater or not sickbeard.VERSION_NOTIFY and not sickbeard.AUTO_UPDATE and not force:
logger.log(u"Version checking is disabled, not checking for the newest version")
return False
# checking for updates
if not sickbeard.AUTO_UPDATE:
logger.log(u"Checking for updates using " + self.install_type.upper())
if not self.updater.need_update():
sickbeard.NEWEST_VERSION_STRING = None
if force:
ui.notifications.message('No update needed')
logger.log(u"No update needed")
# no updates needed
return False
# found updates
self.updater.set_newest_text()
return True
def update(self):
if self.updater:
# update branch with current config branch value
self.updater.branch = sickbeard.BRANCH
# check for updates
if self.updater.need_update():
return self.updater.update()
def list_remote_branches(self):
if self.updater:
return self.updater.list_remote_branches()
def get_branch(self):
if self.updater:
return self.updater.branch
class UpdateManager():
def get_github_org(self):
return sickbeard.GIT_ORG
def get_github_repo(self):
return sickbeard.GIT_REPO
def get_update_url(self):
return sickbeard.WEB_ROOT + "/home/update/?pid=" + str(sickbeard.PID)
class GitUpdateManager(UpdateManager):
def __init__(self):
self._git_path = self._find_working_git()
self.github_org = self.get_github_org()
self.github_repo = self.get_github_repo()
sickbeard.BRANCH = self.branch = self._find_installed_branch()
self._cur_commit_hash = None
self._newest_commit_hash = None
self._num_commits_behind = 0
self._num_commits_ahead = 0
def get_cur_commit_hash(self):
return self._cur_commit_hash
def get_newest_commit_hash(self):
return self._newest_commit_hash
def get_cur_version(self):
return self._run_git(self._git_path, "describe --abbrev=0 " + self._cur_commit_hash)[0]
def get_newest_version(self):
return self._run_git(self._git_path, "describe --abbrev=0 " + self._newest_commit_hash)[0]
def get_num_commits_behind(self):
return self._num_commits_behind
def _git_error(self):
error_message = 'Unable to find your git executable - Shutdown SickRage and EITHER set git_path in your config.ini OR delete your .git folder and run from source to enable updates.'
sickbeard.NEWEST_VERSION_STRING = error_message
def _find_working_git(self):
test_cmd = 'version'
if sickbeard.GIT_PATH:
main_git = '"' + sickbeard.GIT_PATH + '"'
else:
main_git = 'git'
logger.log(u"Checking if we can use git commands: " + main_git + ' ' + test_cmd, logger.DEBUG)
output, err, exit_status = self._run_git(main_git, test_cmd)
if exit_status == 0:
logger.log(u"Using: " + main_git, logger.DEBUG)
return main_git
else:
logger.log(u"Not using: " + main_git, logger.DEBUG)
# trying alternatives
alternative_git = []
# osx people who start sr from launchd have a broken path, so try a hail-mary attempt for them
if platform.system().lower() == 'darwin':
alternative_git.append('/usr/local/git/bin/git')
if platform.system().lower() == 'windows':
if main_git != main_git.lower():
alternative_git.append(main_git.lower())
if alternative_git:
logger.log(u"Trying known alternative git locations", logger.DEBUG)
for cur_git in alternative_git:
logger.log(u"Checking if we can use git commands: " + cur_git + ' ' + test_cmd, logger.DEBUG)
output, err, exit_status = self._run_git(cur_git, test_cmd)
if exit_status == 0:
logger.log(u"Using: " + cur_git, logger.DEBUG)
return cur_git
else:
logger.log(u"Not using: " + cur_git, logger.DEBUG)
# Still haven't found a working git
error_message = 'Unable to find your git executable - Shutdown SickRage and EITHER set git_path in your config.ini OR delete your .git folder and run from source to enable updates.'
sickbeard.NEWEST_VERSION_STRING = error_message
return None
def _run_git(self, git_path, args):
output = err = exit_status = None
if not git_path:
logger.log(u"No git specified, can't use git commands", logger.WARNING)
exit_status = 1
return (output, err, exit_status)
cmd = git_path + ' ' + args
try:
logger.log(u"Executing " + cmd + " with your shell in " + sickbeard.PROG_DIR, logger.DEBUG)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, cwd=sickbeard.PROG_DIR)
output, err = p.communicate()
exit_status = p.returncode
if output:
output = output.strip()
except OSError:
logger.log(u"Command " + cmd + " didn't work")
exit_status = 1
if exit_status == 0:
logger.log(cmd + u" : returned successful", logger.DEBUG)
exit_status = 0
elif exit_status == 1:
if 'stash' in output:
logger.log(u"Please enable 'git reset' in settings or stash your changes in local files",logger.WARNING)
else:
logger.log(cmd + u" returned : " + str(output), logger.ERROR)
exit_status = 1
elif exit_status == 128 or 'fatal:' in output or err:
logger.log(cmd + u" returned : " + str(output), logger.WARNING)
exit_status = 128
else:
logger.log(cmd + u" returned : " + str(output) + u", treat as error for now", logger.ERROR)
exit_status = 1
return (output, err, exit_status)
def _find_installed_version(self):
"""
Attempts to find the currently installed version of SickRage.
Uses git show to get commit version.
Returns: True for success or False for failure
"""
output, err, exit_status = self._run_git(self._git_path, 'rev-parse HEAD') # @UnusedVariable
if exit_status == 0 and output:
cur_commit_hash = output.strip()
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.log(u"Output doesn't look like a hash, not using it", logger.ERROR)
return False
self._cur_commit_hash = cur_commit_hash
sickbeard.CUR_COMMIT_HASH = str(cur_commit_hash)
return True
else:
return False
def _find_installed_branch(self):
branch_info, err, exit_status = self._run_git(self._git_path, 'symbolic-ref -q HEAD') # @UnusedVariable
if exit_status == 0 and branch_info:
branch = branch_info.strip().replace('refs/heads/', '', 1)
if branch:
sickbeard.BRANCH = branch
return branch
return ""
def _check_github_for_update(self):
"""
Uses git commands to check if there is a newer version that the provided
commit hash. If there is a newer version it sets _num_commits_behind.
"""
self._num_commits_behind = 0
self._num_commits_ahead = 0
# update remote origin url
self.update_remote_origin()
# get all new info from github
output, err, exit_status = self._run_git(self._git_path, 'fetch %s' % sickbeard.GIT_REMOTE)
if not exit_status == 0:
logger.log(u"Unable to contact github, can't check for update", logger.ERROR)
return
# get latest commit_hash from remote
output, err, exit_status = self._run_git(self._git_path, 'rev-parse --verify --quiet "@{upstream}"')
if exit_status == 0 and output:
cur_commit_hash = output.strip()
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.log(u"Output doesn't look like a hash, not using it", logger.DEBUG)
return
else:
self._newest_commit_hash = cur_commit_hash
else:
logger.log(u"git didn't return newest commit hash", logger.DEBUG)
return
# get number of commits behind and ahead (option --count not supported git < 1.7.2)
output, err, exit_status = self._run_git(self._git_path, 'rev-list --left-right "@{upstream}"...HEAD')
if exit_status == 0 and output:
try:
self._num_commits_behind = int(output.count("<"))
self._num_commits_ahead = int(output.count(">"))
except:
logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG)
return
logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash)
+ u", num_commits_behind = " + str(self._num_commits_behind) + u", num_commits_ahead = " + str(
self._num_commits_ahead), logger.DEBUG)
def set_newest_text(self):
# if we're up to date then don't set this
sickbeard.NEWEST_VERSION_STRING = None
if self._num_commits_ahead:
logger.log(u"Local branch is ahead of " + self.branch + ". Automatic update not possible.", logger.WARNING)
newest_text = "Local branch is ahead of " + self.branch + ". Automatic update not possible."
elif self._num_commits_behind > 0:
base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
if self._newest_commit_hash:
url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash
else:
url = base_url + '/commits/'
newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a> '
newest_text += " (you're " + str(self._num_commits_behind) + " commit"
if self._num_commits_behind > 1:
newest_text += 's'
newest_text += ' behind)' + "— <a href=\"" + self.get_update_url() + "\">Update Now</a>"
else:
return
sickbeard.NEWEST_VERSION_STRING = newest_text
def need_update(self):
if self.branch != self._find_installed_branch():
logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
return True
self._find_installed_version()
if not self._cur_commit_hash:
return True
else:
try:
self._check_github_for_update()
except Exception, e:
logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.WARNING)
return False
if self._num_commits_behind > 0:
return True
return False
def update(self):
"""
Calls git pull origin <branch> in order to update SickRage. Returns a bool depending
on the call's success.
"""
# update remote origin url
self.update_remote_origin()
# remove untracked files and performs a hard reset on git branch to avoid update issues
if sickbeard.GIT_RESET:
self.clean()
self.reset()
if self.branch == self._find_installed_branch():
output, err, exit_status = self._run_git(self._git_path, 'pull -f %s %s' % (sickbeard.GIT_REMOTE, self.branch)) # @UnusedVariable
else:
output, err, exit_status = self._run_git(self._git_path, 'checkout -f ' + self.branch) # @UnusedVariable
if exit_status == 0:
self._find_installed_version()
# Notify update successful
if sickbeard.NOTIFY_ON_UPDATE:
notifiers.notify_git_update(sickbeard.CUR_COMMIT_HASH if sickbeard.CUR_COMMIT_HASH else "")
return True
else:
return False
def clean(self):
"""
Calls git clean to remove all untracked files. Returns a bool depending
on the call's success.
"""
output, err, exit_status = self._run_git(self._git_path, 'clean -df ""') # @UnusedVariable
if exit_status == 0:
return True
def reset(self):
"""
Calls git reset --hard to perform a hard reset. Returns a bool depending
on the call's success.
"""
output, err, exit_status = self._run_git(self._git_path, 'reset --hard') # @UnusedVariable
if exit_status == 0:
return True
def list_remote_branches(self):
# update remote origin url
self.update_remote_origin()
sickbeard.BRANCH = self._find_installed_branch()
branches, err, exit_status = self._run_git(self._git_path, 'ls-remote --heads %s' % sickbeard.GIT_REMOTE) # @UnusedVariable
if exit_status == 0 and branches:
if branches:
return re.findall('\S+\Wrefs/heads/(.*)', branches)
return []
def update_remote_origin(self):
self._run_git(self._git_path, 'config remote.%s.url %s' % (sickbeard.GIT_REMOTE, sickbeard.GIT_REMOTE_URL))
class SourceUpdateManager(UpdateManager):
def __init__(self):
self.github_org = self.get_github_org()
self.github_repo = self.get_github_repo()
self.branch = sickbeard.BRANCH
if sickbeard.BRANCH == '':
self.branch = self._find_installed_branch()
self._cur_commit_hash = sickbeard.CUR_COMMIT_HASH
self._newest_commit_hash = None
self._num_commits_behind = 0
def _find_installed_branch(self):
if sickbeard.CUR_COMMIT_BRANCH == "":
return "master"
else:
return sickbeard.CUR_COMMIT_BRANCH
def get_cur_commit_hash(self):
return self._cur_commit_hash
def get_newest_commit_hash(self):
return self._newest_commit_hash
def get_cur_version(self):
return ""
def get_newest_version(self):
return ""
def get_num_commits_behind(self):
return self._num_commits_behind
def need_update(self):
# need this to run first to set self._newest_commit_hash
try:
self._check_github_for_update()
except Exception, e:
logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.WARNING)
return False
if self.branch != self._find_installed_branch():
logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
return True
if not self._cur_commit_hash or self._num_commits_behind > 0:
return True
return False
def _check_github_for_update(self):
"""
Uses pygithub to ask github if there is a newer version that the provided
commit hash. If there is a newer version it sets SickRage's version text.
commit_hash: hash that we're checking against
"""
self._num_commits_behind = 0
self._newest_commit_hash = None
# try to get newest commit hash and commits behind directly by comparing branch and current commit
if self._cur_commit_hash:
branch_compared = sickbeard.gh.compare(base=self.branch, head=self._cur_commit_hash)
self._newest_commit_hash = branch_compared.base_commit.sha
self._num_commits_behind = branch_compared.behind_by
# fall back and iterate over last 100 (items per page in gh_api) commits
if not self._newest_commit_hash:
for curCommit in sickbeard.gh.get_commits():
if not self._newest_commit_hash:
self._newest_commit_hash = curCommit.sha
if not self._cur_commit_hash:
break
if curCommit.sha == self._cur_commit_hash:
break
# when _cur_commit_hash doesn't match anything _num_commits_behind == 100
self._num_commits_behind += 1
logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash)
+ u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG)
def set_newest_text(self):
# if we're up to date then don't set this
sickbeard.NEWEST_VERSION_STRING = None
if not self._cur_commit_hash:
logger.log(u"Unknown current version number, don't know if we should update or not", logger.DEBUG)
newest_text = "Unknown current version number: If you've never used the SickRage upgrade system before then current version is not set."
newest_text += "— <a href=\"" + self.get_update_url() + "\">Update Now</a>"
elif self._num_commits_behind > 0:
base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
if self._newest_commit_hash:
url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash
else:
url = base_url + '/commits/'
newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a>'
newest_text += " (you're " + str(self._num_commits_behind) + " commit"
if self._num_commits_behind > 1:
newest_text += "s"
newest_text += " behind)" + "— <a href=\"" + self.get_update_url() + "\">Update Now</a>"
else:
return
sickbeard.NEWEST_VERSION_STRING = newest_text
def update(self):
"""
Downloads the latest source tarball from github and installs it over the existing version.
"""
base_url = 'http://github.com/' + self.github_org + '/' + self.github_repo
tar_download_url = base_url + '/tarball/' + self.branch
try:
# prepare the update dir
sr_update_dir = ek.ek(os.path.join, sickbeard.PROG_DIR, u'sr-update')
if os.path.isdir(sr_update_dir):
logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting")
shutil.rmtree(sr_update_dir)
logger.log(u"Creating update folder " + sr_update_dir + " before extracting")
os.makedirs(sr_update_dir)
# retrieve file
logger.log(u"Downloading update from " + repr(tar_download_url))
tar_download_path = os.path.join(sr_update_dir, u'sr-update.tar')
urllib.urlretrieve(tar_download_url, tar_download_path)
if not ek.ek(os.path.isfile, tar_download_path):
logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.WARNING)
return False
if not ek.ek(tarfile.is_tarfile, tar_download_path):
logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR)
return False
# extract to sr-update dir
logger.log(u"Extracting file " + tar_download_path)
tar = tarfile.open(tar_download_path)
tar.extractall(sr_update_dir)
tar.close()
# delete .tar.gz
logger.log(u"Deleting file " + tar_download_path)
os.remove(tar_download_path)
# find update dir name
update_dir_contents = [x for x in os.listdir(sr_update_dir) if
os.path.isdir(os.path.join(sr_update_dir, x))]
if len(update_dir_contents) != 1:
logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR)
return False
content_dir = os.path.join(sr_update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR)
for dirname, dirnames, filenames in os.walk(content_dir): # @UnusedVariable
dirname = dirname[len(content_dir) + 1:]
for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile)
# Avoid DLL access problem on WIN32/64
# These files needing to be updated manually
#or find a way to kill the access from memory
if curfile in ('unrar.dll', 'unrar64.dll'):
try:
os.chmod(new_path, stat.S_IWRITE)
os.remove(new_path)
os.renames(old_path, new_path)
except Exception, e:
logger.log(u"Unable to update " + new_path + ': ' + ex(e), logger.DEBUG)
os.remove(old_path) # Trash the updated file without moving in new path
continue
if os.path.isfile(new_path):
os.remove(new_path)
os.renames(old_path, new_path)
sickbeard.CUR_COMMIT_HASH = self._newest_commit_hash
sickbeard.CUR_COMMIT_BRANCH = self.branch
except Exception, e:
logger.log(u"Error while trying to update: " + ex(e), logger.ERROR)
logger.log(u"Traceback: " + traceback.format_exc(), logger.DEBUG)
return False
# Notify update successful
notifiers.notify_git_update(sickbeard.NEWEST_VERSION_STRING)
return True
def list_remote_branches(self):
return [x.name for x in sickbeard.gh.get_branches() if x]
| gpl-3.0 | 2,027,327,292,162,090,200 | 38.052503 | 189 | 0.576632 | false |
dleecefft/pcapstats | pbin/pcapsessionslicer.py | 1 | 4562 | #!/usr/bin/env python
from scapy.all import *
import re, sys, getopt, shutil
def pcapsessions(pfile):
pssn = rdpcap(pfile)
return pssn
def fullstrsplit(ipportstr):
retlist=[]
sssnlist = ipportstr.split()
# stack up the list and split out the port values
retlist.append(sssnlist[0])
tmpip = sssnlist[1].split(':')
retlist.append(tmpip[0])
retlist.append(tmpip[1])
tmpip = sssnlist[3].split(':')
retlist.append(tmpip[0])
retlist.append(tmpip[1])
return retlist
def partstrsplit(ipportstr):
retlist=[]
sssnlist = ipportstr.split()
# stack up the list and split out the port values
retlist.append(sssnlist[0])
retlist.append(sssnlist[1])
retlist.append('')
retlist.append(sssnlist[3])
retlist.append('')
return retlist
def writesessioncsv(fileline,wfile):
try:
with open(wfile,'a') as wfh:
wfh.write(fileline + "\n")
except Exception as e:
print(e)
pass
return
def sessionparse(ssnobj,include,pktgrep):
sessions = ssnobj.sessions()
for k, v in sessions.iteritems():
rxparse = re.match(r'^\w+\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}).*\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})',k)
if include and rxparse is not None:
# looking for a match
if rxparse.group(1) == pktgrep or rxparse.group(2) == pktgrep :
ksplit = fullstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
print kline
#print k,str(len(v))
elif rxparse is not None:
if rxparse.group(1) != pktgrep and rxparse.group(2) != pktgrep :
#print k,str(len(v))
ksplit = fullstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
print kline
elif not include and rxparse is None:
ksplit = partstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
print kline
#print k,str(len(v))
return
def sessionparsewrite(ssnobj,include,pktgrep,csvoutfile):
sessions = ssnobj.sessions()
for k, v in sessions.iteritems():
rxparse = re.match(r'^\w+\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}).*\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})',k)
if include and rxparse is not None:
# looking for a match
if rxparse.group(1) == pktgrep or rxparse.group(2) == pktgrep :
ksplit = fullstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
writesessioncsv(kline,csvoutfile)
elif rxparse is not None:
if rxparse.group(1) != pktgrep and rxparse.group(2) != pktgrep :
#print k,str(len(v))
ksplit = fullstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
writesessioncsv(kline,csvoutfile)
elif not include and rxparse is None:
ksplit = partstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
writesessioncsv(kline,csvoutfile)
#print k,str(len(v))
return
if __name__ == "__main__":
if len(sys.argv) > 3 :
action=''
outcsv=False
# Use getopt to avoid param order errors
opts, args = getopt.getopt(sys.argv[1:],"f:m:o:t:h:")
for o, a in opts:
if o == '-f':
capfile=a
elif o == '-m':
strmatch=a
elif o == '-o':
outfile=a
outcsv=True
elif o == '-t':
action=a
else:
print("Usage: %s -f file.pcap -m ip:port_string -o [outputfile] -t [exclude] <- ignore these sessions " % sys.argv[0])
exit()
else:
print("Usage: %s -f file.pcap -m ip:port_string -o [outputfile] -t [exclude] <- ignore these sessions " % sys.argv[0])
exit()
# default action is search for string provided vs exclude
if action == "exclude":
action=False
else:
action=True
# grab sessions from pcap
thisssnobj = pcapsessions(capfile)
if outcsv:
sessionparsewrite(thisssnobj,action,strmatch,outfile)
else:
sessionparse(thisssnobj,action,strmatch)
| apache-2.0 | 6,310,344,041,747,523,000 | 31.126761 | 134 | 0.531565 | false |
NicoLugil/Yafa | python/TimedActions.py | 1 | 2411 | # Copyright 2014 Nico Lugil <nico at lugil dot be>
#
# This file is part of Yafa!
#
# Yafa! is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yafa! is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Yafa. If not, see <http://www.gnu.org/licenses/>.
import sys
import string
import time
"""
Class used to space actions in time by at least interval seconds.
First time enough_time_passed is cheked it will return true,
from then only if interval seconds have passed since the last True
"""
class IntervalTimer:
def __init__(self,interval):
self.iv=interval;
self.last_time=time.time() # seconds since epoch
self.did_run=False
def set_interval(self,interval):
self.iv=interval;
def reset_timer(self):
self.last_time=time.time()
def get_remaining_time(self):
time_passed = (time.time()-self.last_time)
if time_passed >= self.iv:
return 0
else:
return (self.iv-time_passed)
def enough_time_passed(self):
if not self.did_run:
self.did_run=True
self.reset_timer()
return True
else:
if (time.time()-self.last_time)>self.iv:
self.reset_timer()
return True
else:
return False
"""
Single countdown:
start (re)starts it
"""
class CountDownTimer:
def __init__(self,interval):
self.iv=interval
self.start_time=time.time()
def set_interval(self,interval):
self.iv=interval;
def start(self):
self.start_time=time.time()
def get_remaining_time(self):
time_passed = (time.time()-self.start_time)
if time_passed >= self.iv:
return 0
else:
return (self.iv-time_passed)
def end(self):
self.iv=0
def is_time_passed(self):
if(self.get_remaining_time()==0):
return True
else:
return False
| gpl-3.0 | 1,210,667,763,575,287,800 | 29.1375 | 70 | 0.625052 | false |
haxsaw/actuator | src/tests/infra_tests.py | 1 | 29187 | #
# Copyright (c) 2014 Tom Carroll
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Created on 4 Jun 2014
'''
from actuator import (InfraModel, MultiResourceGroup, ComponentGroup, ctxt)
from actuator.modeling import (ModelReference, ModelInstanceReference, AbstractModelReference,
AbstractModelingEntity, MultiComponent)
from actuator.infra import (with_resources, InfraException, ResourceGroup,
MultiResource, MultiResourceGroup)
from actuator.provisioners.example_resources import Server, Database
MyInfra = None
def setup():
global MyInfra
class MyInfraLocal(InfraModel):
other = "some data"
server = Server("wibble", mem="16GB")
grid = MultiResource(Server("grid-comp", mem="8GB"))
database = Database("db")
workers = MultiResourceGroup("workers",
handler=Server("handler", mem="4GB"),
query=Server("query", mem="8GB"),
ncube=Server("ncube", mem="16GB"))
composite = MultiResourceGroup("grid",
grid=MultiResource(Server("grid-comp", mem="8GB")),
workers=MultiResourceGroup("inner_workers",
handler=Server("handler", mem="4GB"),
query=Server("query", mem="8GB"),
ncube=Server("ncube", mem="16GB")))
MyInfra = MyInfraLocal
def test01():
assert type({}) == type(MyInfra.__components), "the __components attr is missing or the wrong type"
def test02():
assert type(MyInfra.server) != Server, "the type of a resource attribute isn't a ref"
def test03():
assert MyInfra.server is MyInfra.server, "references aren't being reused"
def test04():
assert type(MyInfra.server.name) is ModelReference, \
"data member on a resource isn't being wrapped with a reference"
def test05():
assert MyInfra.other == "some data", "plain class attrs aren't being passed thru"
def test06():
try:
_ = MyInfra.server.wibble
assert False, "failed to raise AttributeError when accessing a bad attr"
except AttributeError, _:
pass
def test07():
assert MyInfra.server.name is MyInfra.server.name, \
"reference reuse not occurring on resource attribute"
def test08():
assert MyInfra.server.mem, "failed to create ref for kw-created attr"
def test09():
assert type(MyInfra.server.provisionedName) == ModelReference, \
"data member on a resource isn't being wrapped with a reference"
def test10():
assert MyInfra.server.value().__class__ is Server, \
"value underlying a resource ref is the wrong class"
def test11():
assert MyInfra.server.provisionedName.value() is None, \
"expected empty value for this attr"
def test12():
try:
_ = MyInfra.server['wow']
assert False, "Should not have been allowed to perform keyed access on the server"
except TypeError, _:
pass
def test13():
assert MyInfra.grid[1].__class__ == ModelReference, \
"did not get a ref for a keyed MultiResource"
def test14():
assert MyInfra.grid.__class__ == ModelReference, \
"did not get a ref for a MultiResource"
def test15():
assert MyInfra.grid[1] is MyInfra.grid[1], \
"refs not being reused for keyed MultiCcmponent"
def test16():
assert MyInfra.grid[2].name.__class__ == ModelReference
def test17():
assert (MyInfra.grid[3].name is MyInfra.grid[3].name)
def test18():
assert MyInfra.grid[4].name is not MyInfra.grid[5].name
def test19():
assert MyInfra.grid[6].mem
def test20():
assert MyInfra.server.get_path() == ["server"]
def test21():
assert MyInfra.server.mem.get_path() == ["server", "mem"]
def test22():
assert MyInfra.server.provisionedName.get_path() == ["server", "provisionedName"]
def test23():
assert MyInfra.grid.get_path() == ["grid"]
def test24():
assert MyInfra.grid[1].get_path() == ["grid", "1"]
def test25():
assert MyInfra.grid[2].mem.value() == "8GB"
def test26():
assert MyInfra.grid[22].mem.get_path() == ["grid", "22", "mem"]
def test27():
assert MyInfra.grid[5].name.get_path() == ["grid", "5", "name"]
def test28():
inst = MyInfra("test28")
assert inst
def test29():
inst = MyInfra("test29")
assert inst.other == "some data"
def test30():
inst = MyInfra("test30")
assert inst.server.__class__ is ModelInstanceReference
def test31():
inst = MyInfra("test31")
assert inst.grid.__class__ is ModelInstanceReference
def test32():
inst = MyInfra("test32")
assert inst.grid[1].__class__ is ModelInstanceReference
def test33():
inst = MyInfra("test33")
assert inst.server is inst.server
def test34():
inst = MyInfra("test34")
assert inst.grid is inst.grid
def test35():
inst = MyInfra("test35")
assert inst.grid[1] is inst.grid[1]
def test36():
inst = MyInfra("test36")
assert inst.server.provisionedName is inst.server.provisionedName
def test37():
inst = MyInfra("test37")
assert inst.grid[1].provisionedName is inst.grid[1].provisionedName
def test38():
inst = MyInfra("test38")
assert inst.grid[3] is not inst.grid[4]
def test39():
inst1 = MyInfra("test39a")
inst2 = MyInfra("test39b")
assert inst1.server is not inst2.server
def test40():
inst1 = MyInfra("test40a")
inst2 = MyInfra("test40b")
assert inst1.grid[5] is not inst2.grid[6]
def test41():
inst = MyInfra("test41")
assert inst.grid[1].name.value() == "grid-comp_1"
def test42():
inst = MyInfra("test42")
assert inst.grid[7].provisionedName.get_path() == ["grid", "7", "provisionedName"]
def test43():
inst = MyInfra("test43")
_ = inst.grid[8]
_ = inst.grid[9]
_ = inst.grid[10]
_ = inst.grid[9]
assert len(inst.grid.instances()) == 3
def test44():
inst = MyInfra("test44")
modref = MyInfra.grid[11].provisionedName
assert (modref.get_path() == inst.get_inst_ref(modref).get_path())
def test45():
inst = MyInfra("test45")
for ref in (MyInfra.grid[12], MyInfra.grid[13], MyInfra.grid[14]):
_ = inst.get_inst_ref(ref)
assert len(inst.grid.instances()) == 3
def test46():
assert MyInfra.workers
def test47():
assert MyInfra.workers[1]
def test48():
assert MyInfra.workers[1] is MyInfra.workers[1]
def test49():
assert MyInfra.workers[2].query.__class__ is not Server
def test50():
assert MyInfra.workers[3].query is not MyInfra.workers[4].query
def test51():
assert MyInfra.workers[5].query.value() is MyInfra.workers[6].query.value()
def test52():
assert MyInfra.workers[7].query.name.value() == "query"
def test53():
assert (MyInfra.workers[8].query.name.get_path() ==
["workers", "8", "query", "name"])
def test54():
inst = MyInfra("test54")
assert inst.workers[1]
def test55():
inst = MyInfra("test55")
assert inst.workers[2] is inst.workers[2]
def test56():
inst = MyInfra("test56")
assert inst.workers[3] is not inst.workers[4]
def test57():
inst = MyInfra("test57")
_ = inst.workers[5]
_ = inst.workers[6]
_ = inst.workers[7]
_ = inst.workers[6]
assert len(inst.workers.instances()) == 3
def test58():
inst = MyInfra("test58")
assert inst.workers[8].query.value() is not inst.workers[9].query.value
def test59():
inst = MyInfra("test59")
assert inst.workers[10].query is not MyInfra.workers[10].query
def test60():
inst = MyInfra("test60")
assert (inst.workers[11].handler.name.get_path() ==
["workers", "11", "handler", "name"])
def test61():
inst = MyInfra("test61")
ref = MyInfra.workers[12].ncube.provisionedName
assert inst.get_inst_ref(ref).get_path() == ["workers", "12", "ncube", "provisionedName"]
def test62():
inst = MyInfra("test62")
for ref in (MyInfra.workers[13].query,
MyInfra.workers[14].handler,
MyInfra.workers[15].ncube):
_ = inst.get_inst_ref(ref)
assert len(inst.workers.instances()) == 3
def test63():
inst1 = MyInfra("test63-1")
inst2 = MyInfra("test63-2")
assert inst1.workers[1] is not inst2.workers[1]
def test64():
inst1 = MyInfra("test64-1")
inst2 = MyInfra("test64-2")
assert inst1.workers[2].query.value() is not inst2.workers[2].query.value()
def test65():
inst1 = MyInfra("test65-1")
inst2 = MyInfra("test65-2")
modref = MyInfra.workers[16].query
assert inst1.get_inst_ref(modref) is not inst2.get_inst_ref(modref)
def test66():
inst1 = MyInfra("test66-1")
inst2 = MyInfra("test66-2")
assert inst1.workers[2].query.value().__class__ is inst2.workers[2].query.value().__class__
def test67():
assert MyInfra.composite[1].grid[2].name
def test68():
assert MyInfra.composite[2].workers[1].handler.name
def test69():
assert (MyInfra.composite[3].workers[2].query.mem.get_path() ==
["composite", "3", "workers", "2", "query", "mem"])
def test70():
inst = MyInfra("test70")
ref = MyInfra.composite[4].workers[3].ncube.mem
assert (inst.get_inst_ref(ref).get_path() ==
["composite", "4", "workers", "3", "ncube", "mem"])
def test71():
inst = MyInfra("test71")
_ = MyInfra.composite[5].workers[4].ncube.provisionedName
assert len(inst.composite[5].workers.instances()) == 0
def test72():
assert MyInfra.composite[1].value().__class__.__name__ == "ResourceGroup"
def test73():
assert MyInfra.grid[1].value().__class__ is Server
def test74():
inst = MyInfra("test74")
assert inst.composite[1].value().__class__.__name__ == "ResourceGroup"
def test75():
inst = MyInfra("test75")
assert inst.grid[1].value().__class__ is Server
def test76():
assert MyInfra.composite.value().__class__ in (MultiResourceGroup, MultiResource)
def test77():
inst = MyInfra("test77")
assert inst.composite.value().__class__ in (MultiResourceGroup, MultiResource)
def test78():
modrefs = [MyInfra.composite,
MyInfra.composite[1],
MyInfra.composite[2].grid,
MyInfra.composite[3].grid[1],
MyInfra.composite[3].grid[1].name]
s = set([ref.get_containing_component()
for ref in modrefs
if ref.get_containing_component() is not None])
assert len( s ) == 1, "There was more than one resource"
def test79():
assert MyInfra.grid[1].get_containing_component() == MyInfra.grid[1].name.get_containing_component()
def test80():
assert (MyInfra.composite[1].grid[1].get_containing_component() ==
MyInfra.composite[1].grid[1].name.get_containing_component())
def test81():
inst = MyInfra("test81")
modrefs = [inst.grid[1].name,
inst.grid[2].name,
inst.grid[3].name,
inst.grid[3],
inst.grid]
assert len(set([p for p in [r.get_containing_component() for r in modrefs] if p is not None])) == 3
def test82():
inst = MyInfra("test82")
assert len(inst.components()) == 2
def test83():
class ProvTest(InfraModel):
grid = MultiResource(Server("prov1", mem="8GB"))
inst = ProvTest("prov1")
_ = inst.grid[1]
assert len(inst.components()) == 1
def test84():
inst = MyInfra("test84")
_ = inst.grid[1]
assert len(inst.components()) == 3
def test85():
inst = MyInfra("test85")
for i in range(5):
_ = inst.grid[i]
assert len(inst.components()) == 7
def test86():
inst = MyInfra("test86")
_ = inst.workers[1]
assert len(inst.components()) == 5
def test87():
inst = MyInfra("test87")
_ = inst.workers[1].handler
assert len(inst.components()) == 5
def test88():
inst = MyInfra("test88")
for i in range(2):
_ = inst.workers[i]
assert len(inst.components()) == 8
def test89():
inst = MyInfra("test89")
_ = inst.composite[1]
assert len(inst.components()) == 2
def test90():
inst = MyInfra("test90")
_ = inst.composite[1].grid[1]
assert len(inst.components()) == 3
def test91():
inst = MyInfra("test91")
_ = inst.composite[1].workers
assert len(inst.components()) == 2
def test92():
inst = MyInfra("test92")
_ = inst.composite[1].workers[1]
assert len(inst.components()) == 5
def test93():
inst = MyInfra("test93")
_ = inst.composite[1].workers[1]
for i in range(2):
_ = inst.composite[i+2].grid[1]
assert len(inst.components()) == 7
def test94():
inst = MyInfra("test94")
for i in range(5):
_ = inst.grid[i]
assert len(inst.grid) == 5
def test95():
inst = MyInfra("test95")
assert not inst.grid
def test96():
inst = MyInfra("test96")
assert inst.server
def test97():
inst = MyInfra("test97")
assert inst.server.name
def test98():
inst = MyInfra("test98")
assert not inst.server.provisionedName
def test99():
inst = MyInfra("test99")
inst.grid[1]
assert inst.grid
def test100():
inst = MyInfra("test100")
inst.grid[1]
assert inst.grid[1].name
def test101():
inst = MyInfra("test101")
inst.grid[1]
assert not inst.grid[1].provisionedName
def test102():
inst = MyInfra("test102")
assert not inst.workers
def test103():
inst = MyInfra("test103")
inst.workers[1]
assert inst.workers
def test104():
inst = MyInfra("test104")
inst.workers[1]
assert inst.workers[1].handler
def test105():
inst = MyInfra("test105")
inst.workers[1]
assert inst.workers[1].handler.name
def test106():
inst = MyInfra("test106")
inst.workers[1]
assert not inst.workers[1].handler.provisionedName
def test107():
inst = MyInfra("test107")
assert not inst.composite
def test108():
inst = MyInfra("test108")
inst.composite[1]
assert inst.composite
def test109():
inst = MyInfra("test109")
inst.composite[1]
assert not inst.composite[1].grid
def test110():
inst = MyInfra("test110")
inst.composite[1]
inst.composite[1].grid[1]
assert inst.composite[1].grid
def test111():
inst = MyInfra("test111")
inst.composite[1]
inst.composite[1].grid[1]
assert inst.composite[1].grid[1].name
def test112():
inst = MyInfra("test112")
inst.composite[1]
inst.composite[1].grid[1]
assert not inst.composite[1].grid[1].provisionedName
def test113():
inst = MyInfra("test113")
inst.composite[1]
assert not inst.composite[1].workers
def test114():
inst = MyInfra("test114")
inst.composite[1].workers[1]
assert inst.composite[1].workers
def test115():
inst = MyInfra("test115")
inst.composite[1].workers[1]
assert inst.composite[1].workers[1].handler
def test116():
inst = MyInfra("test116")
inst.composite[1].workers[1]
assert inst.composite[1].workers[1].handler.name
def test117():
inst = MyInfra("test117")
inst.composite[1].workers[1]
assert not inst.composite[1].workers[1].handler.provisionedName
def test118():
#this is just ensuring we throw if a component derived class fails to
#implement fix_arguments()
class MissedMethod1(AbstractModelingEntity):
def __init__(self, name, arg1, arg2):
super(MissedMethod1, self).__init__(name)
self.arg1 = arg1
self.arg2 = arg2
comp = MissedMethod1("oops!", 1, 2)
try:
comp.fix_arguments()
assert False, "fix_arguments should have thrown an exception"
except TypeError, _:
assert True
except Exception, e:
assert False, "got an unexpected exception: '%s'" % e.message
def test119():
class CGTest1(InfraModel):
group = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
db=Database("db"))
inst = CGTest1("cgtest1")
assert inst.group.reqhandler
def test120():
class CGTest2(InfraModel):
group = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
grid=MultiResource(Server("grid", mem="8GB")))
inst = CGTest2("cgt2")
_ = inst.group.grid[1]
assert inst.group.grid[1] is inst.group.grid[1]
def test121():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
grid=MultiResource(Server("grid", mem="8GB")))
class CGTest3(InfraModel):
overlord = Server("overlord", mem="8GB")
groups = MultiResource(group_thing)
inst = CGTest3("cgt3")
_ = inst.groups[1].grid[2]
assert inst.groups[1].grid[2].value() is not inst.groups[2].grid[1].value()
def test122():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
grid=MultiResource(Server("grid", mem="8GB")))
class CGTest4(InfraModel):
group = group_thing
inst1 = CGTest4("cgt4-1")
inst2 = CGTest4("cgt4-2")
assert inst1.group.reqhandler.value() is not inst2.group.reqhandler.value()
def test123():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
grid=MultiResource(Server("grid", mem="8GB")))
class CGTest5a(InfraModel):
group = group_thing
class CGTest5b(InfraModel):
group = group_thing
inst1 = CGTest5a("cgt5a-1")
inst2 = CGTest5b("cgt5b-2")
assert inst1.group.value() is not inst2.group.value()
def test124():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
grid=MultiResource(Server("grid", mem="8GB",
rhm=ctxt.comp.container.container.reqhandler.mem)))
class CGTest6(InfraModel):
group = group_thing
inst = CGTest6("ctg6")
inst.group.grid[0]
inst.group.grid[1]
inst.refs_for_components()
try:
inst.group.fix_arguments()
except Exception, e:
assert False, "Fixing the arguments failed; %s" % e.message
def test125():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
grid=MultiResource(Server("grid", mem="8GB",
rhm=ctxt.comp.container.container.reqhandler.mem)))
class CGTest7(InfraModel):
group = group_thing
inst = CGTest7("ctg7")
inst.group.grid[0]
inst.group.grid[1]
inst.group.fix_arguments()
assert inst.group.grid[0].rhm.value() is inst.group.grid[1].rhm.value()
def test126():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
slave=Server("grid", mem=ctxt.comp.container.reqhandler.mem))
class CGTest8(InfraModel):
group = group_thing
inst = CGTest8("ctg8")
inst.refs_for_components()
inst.group.fix_arguments()
assert inst.group.slave.mem.value() == "8GB"
def test127():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
slave=Server("grid", mem=ctxt.comp.container.reqhandler.mem))
class CGTest9(InfraModel):
top = Server("topper", mem=ctxt.model.group.reqhandler.mem)
group = group_thing
inst = CGTest9("ctg9")
inst.refs_for_components()
_ = inst.components()
inst.top.fix_arguments()
inst.group.fix_arguments()
assert inst.top.mem.value() == "8GB"
def test128():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
slave=Server("grid", mem=ctxt.comp.container.reqhandler.mem,
path=ctxt.comp.container.reqhandler._path))
class CGTest10(InfraModel):
group = group_thing
inst = CGTest10("cgt10")
inst.components()
inst.refs_for_components()
inst.group.fix_arguments()
assert inst.group.slave.path.value() == ("reqhandler", "container", "comp")
def test130():
class BadRefClass(AbstractModelReference):
pass
class Test130(InfraModel):
ref_class = BadRefClass
grid = MultiResource(Server("grid", mem="8GB"))
inst = Test130("t130")
try:
_ = inst.grid[1]
assert False, "Should have raised a TypeError about _get_item_ref_obj()"
except TypeError, e:
assert "get_item_ref_obj" in e.message
def test131():
class Test131(InfraModel):
server = Server("dummy", mem="8GB", adict={'a':1, 'b':2, 'c':3})
inst = Test131("t131")
assert inst.server.adict['a'] == 1
def test132():
class Test132(InfraModel):
server = Server("dummy", mem="8GB", no_key=5)
inst = Test132("t132")
try:
inst.server.no_key[2]
assert False, "We were allowed to use a key on a non-collection attribute"
except TypeError, e:
assert "keyed" in e.message
def test133():
def tfunc(context):
return context.model.server.mem
class Test133(InfraModel):
reqhandler = Server("dummy1", mem=tfunc)
server = Server("dummy2", mem="16GB")
inst = Test133("t133")
_ = inst.components()
inst.reqhandler.fix_arguments()
assert inst.reqhandler.mem.value() == "16GB"
def test134():
components = {"server":Server("dummy", mem="16GB"),
"db":Database("db", wibble=9)}
class Test134(InfraModel):
with_resources(**components)
inst = Test134("t134")
assert inst.server.mem.value() == "16GB" and inst.db.wibble.value() == 9
def test135():
group_thing = ResourceGroup("group",
reqhandler=Server("reqhandler", mem="8GB"),
slaves=MultiResource(Server("grid", mem=ctxt.comp.container.container.reqhandler.mem)))
components = {"group":group_thing}
class Test135(InfraModel):
with_resources(**components)
inst = Test135("t135")
_ = inst.group.slaves[1]
_ = inst.group.slaves[2]
inst.components()
inst.refs_for_components()
inst.group.fix_arguments()
assert inst.group.slaves[2].mem.value() == "8GB"
def test136():
class Test136(InfraModel):
hive = MultiResource(Server("worker", mem="8GB"))
inst = Test136("t136")
for i in range(5):
_ = inst.hive[i]
assert len(inst.hive.keys()) == 5
def test137():
class Test137(InfraModel):
hive = MultiResource(Server("drone", mem="8GB"))
inst = Test137("t137")
for i in range(5):
_ = inst.hive[i]
assert len(inst.hive.values()) == 5 and inst.hive[2] in inst.hive.values()
def test138():
class Test138(InfraModel):
hive = MultiResource(Server("drone", mem="8GB"))
inst = Test138("t138")
for i in range(5):
_ = inst.hive[i]
d = {k:v for k, v in inst.hive.items()}
assert len(d) == 5 and inst.hive[1] in d.values()
def test139():
class Test139(InfraModel):
hive = MultiResource(Server("drone", mem="8GB"))
inst = Test139("t139")
for i in range(5):
_ = inst.hive[i]
assert inst.hive.has_key(3)
def test140():
class Test140(InfraModel):
hive = MultiResource(Server("drone", mem="8GB"))
inst = Test140("t140")
for i in range(5):
_ = inst.hive[i]
assert inst.hive.get(3) is inst.hive[3]
def test141():
class Test141(InfraModel):
hive = MultiResource(Server("drone", mem="8GB"))
inst = Test141("t141")
for i in range(5):
_ = inst.hive[i]
assert len([k for k in inst.hive.iterkeys()]) == 5
def test142():
class Test142(InfraModel):
hive = MultiResource(Server("drone", mem="8GB"))
inst = Test142("t142")
for i in range(5):
_ = inst.hive[i]
l = [v for v in inst.hive.itervalues()]
assert len(l) == 5 and inst.hive[4] in l
def test143():
class Test143(InfraModel):
hive = MultiResource(Server("drone", mem="8GB"))
inst = Test143("t143")
for i in range(5):
_ = inst.hive[i]
d = {k:v for k, v in inst.hive.iteritems()}
assert len(d) == 5 and inst.hive[0] in d.values()
def test144():
class Test144(InfraModel):
hive = MultiResource(Server("drone", mem="8GB"))
inst = Test144("t144")
for i in range(5):
_ = inst.hive[i]
assert 3 in inst.hive
def test145():
class Test145(InfraModel):
hive = MultiResource(ResourceGroup("crowd", drones=Server("drone", mem="8GB"),
dregs=MultiResource(Server("dreg", mem="2GB"))))
inst = Test145("t145")
for i in range(5):
_ = inst.hive[0].dregs[i]
d = {k:v for k, v in inst.hive[0].dregs.items()}
assert (len(inst.hive[0].dregs) == 5 and
len(inst.hive[0].dregs.keys()) == 5 and
3 in inst.hive[0].dregs and
len(inst.hive[0].dregs.values()) == 5 and
len(d) == 5 and
inst.hive[0].dregs[2] in d.values())
def test146():
class Test(InfraModel):
grid = MultiResource(Server("grid-node", mem="8GB"))
inst = Test("key")
for i in range(5):
_ = inst.grid[i]
assert inst.grid[3]._name == "3"
def test147():
try:
class Test(InfraModel):
app_server = Server("app_server", mem="8GB")
with_resources(grid="not a resource")
assert False, "The class def should have raised an exception"
except InfraException, e:
assert "grid is not derived" in e.message
def test148():
class Test(InfraModel):
app_server = Server("app_server", mem="8GB")
inst = Test("inst")
assert not inst.provisioning_been_computed()
def test149():
class Test(InfraModel):
app_server = Server("app_server", mem="8GB")
inst = Test("inst")
inst.compute_provisioning_from_refs([Test.app_server])
try:
inst.compute_provisioning_from_refs([Test.app_server])
assert False, "was allowed to compute provisioning twice"
except InfraException, e:
assert "has already been" in e.message
def test150():
from actuator.infra import IPAddressable
class NoAdminIP(IPAddressable):
pass
s = NoAdminIP()
try:
_ = s.get_ip()
raise False, "Should not have been able to call get_ip()"
except TypeError, e:
assert "Not implemented" in e.message
def test151():
s = Server("someserver", mem="8GB")
class Test(InfraModel):
s1 = s
s2 = s
s3 = s
inst = Test("test")
assert len(inst.components()) == 3
def do_all():
setup()
for k, v in globals().items():
if k.startswith("test") and callable(v):
v()
if __name__ == "__main__":
do_all()
| mit | 5,746,547,234,374,223,000 | 29.278008 | 120 | 0.588173 | false |
bugsnag/bugsnag-python | tests/integrations/test_bottle.py | 1 | 2970 | from webtest import TestApp
import bottle
from bottle import route, template
import bugsnag
from bugsnag.wsgi.middleware import BugsnagMiddleware
from tests.utils import IntegrationTest
class TestBottle(IntegrationTest):
def setUp(self):
super(TestBottle, self).setUp()
bugsnag.configure(endpoint=self.server.url,
session_endpoint=self.server.url,
auto_capture_sessions=False,
api_key='3874876376238728937',
asynchronous=False)
def test_routing_error(self):
@route('/beans')
def index():
raise Exception('oh no!')
app = bottle.app()
app.catchall = False
app = TestApp(BugsnagMiddleware(app))
self.assertRaises(Exception, lambda: app.get('/beans'))
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertTrue(event['unhandled'])
self.assertEqual(event['context'], 'GET /beans')
self.assertEqual(event['exceptions'][0]['errorClass'], 'Exception')
self.assertEqual(event['exceptions'][0]['message'], 'oh no!')
runtime_versions = event['device']['runtimeVersions']
self.assertEqual(runtime_versions['bottle'], '0.12.18')
assert 'environment' not in event['metaData']
def test_enable_environment(self):
bugsnag.configure(send_environment=True)
@route('/beans')
def index():
raise Exception('oh no!')
app = bottle.app()
app.catchall = False
app = TestApp(BugsnagMiddleware(app))
self.assertRaises(Exception, lambda: app.get('/beans'))
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
metadata = payload['events'][0]['metaData']
self.assertEqual(metadata['environment']['PATH_INFO'], '/beans')
def test_template_error(self):
@route('/berries/<variety>')
def index(variety):
return template('{{type1}} {{type2}}', type1=variety)
app = bottle.app()
app.catchall = False
app = TestApp(BugsnagMiddleware(app))
self.assertRaises(Exception, lambda: app.get('/berries/red'))
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertTrue(event['unhandled'])
self.assertEqual(event['context'], 'GET /berries/red')
self.assertEqual(event['exceptions'][0]['errorClass'], 'NameError')
self.assertEqual(event['exceptions'][0]['message'],
"name 'type2' is not defined")
assert 'environment' not in event['metaData']
runtime_versions = event['device']['runtimeVersions']
self.assertEqual(runtime_versions['bottle'], bottle.__version__)
| mit | -3,055,690,052,233,505,300 | 37.076923 | 75 | 0.611785 | false |
nsynapse/edison_cat | catserver/control/control_system.py | 1 | 1326 | #-*- coding:utf-8 -*-
from models import DBSystemInfo as DB
import psutil
class Control_System(object):
def __init__(self, request):
self.request = request
def __del__(self):
pass
def update(self):
if self.request.method == 'POST':
try:
_net_if = self.request.POST.get('net_if','')
if DB.objects.exists():
_db = DB.objects.latest('id')
_db.net_if = _net_if
_nets = psutil.net_if_addrs()
_db.net_address = _nets[_net_if][0].address
_db.websocket_port = self.request.POST.get('websocket_port',9002)
_db.save()
else:
_new_db = DB()
_new_db.net_if = self.request.POST.get('net_if','')
_new_db.websocket_port = self.request.POST.get('websocket_port',9002)
_nets = psutil.net_if_addrs()
_new_db.net_address = _nets[_net_if][0].address
_new_db.save()
return True
except Exception, e:
print "Exception(Control_System update) : ", e
return False
| mit | -5,306,238,310,272,985,000 | 32.175 | 89 | 0.435143 | false |
helixyte/TheLMA | thelma/tools/libcreation/base.py | 1 | 10770 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Base classes and constants for library creation ticket.
AAB
"""
from thelma.tools.iso.poolcreation.base import StockSampleCreationLayout
from thelma.tools.utils.base import round_up
from thelma.tools.utils.converters import BaseLayoutConverter
from thelma.tools.utils.layouts import ParameterSet
from thelma.tools.utils.layouts import WorkingLayout
from thelma.tools.utils.layouts import WorkingPosition
from thelma.entities.moleculetype import MOLECULE_TYPE_IDS
__docformat__ = 'reStructuredText en'
__all__ = ['NUMBER_SECTORS',
'NUMBER_MOLECULE_DESIGNS',
'MOLECULE_DESIGN_TRANSFER_VOLUME',
'POOL_STOCK_RACK_CONCENTRATION',
'PREPARATION_PLATE_CONCENTRATION',
'ALIQUOT_PLATE_CONCENTRATION',
'ALIQUOT_PLATE_VOLUME',
'STARTING_NUMBER_ALIQUOTS',
'get_stock_pool_buffer_volume',
'get_source_plate_transfer_volume',
'LibraryBaseLayoutParameters',
'LibraryBaseLayoutPosition',
'LibraryBaseLayout',
'LibraryBaseLayoutConverter',
'LibraryLayout']
#: The number of rack sectors (96-to-384 plate transition).
NUMBER_SECTORS = 4
#: The molecule type ID for the library.
MOLECULE_TYPE = MOLECULE_TYPE_IDS.SIRNA
#: The number of molecule designs per pool.
NUMBER_MOLECULE_DESIGNS = 3
#: The transfer volume of each molecule design in the pool (from single
#: molecule design stock to pool) in ul.
MOLECULE_DESIGN_TRANSFER_VOLUME = 3
#: The volume of the pool stock racks in ul.
POOL_STOCK_RACK_VOLUME = 45
#: The concentration of the pool stock racks in nM.
POOL_STOCK_RACK_CONCENTRATION = 10000 # 10 uM
#: The concentration of the prepartion plate in nM.
PREPARATION_PLATE_CONCENTRATION = 1270 # 1270 nM
#: The sample volume (after dilution, before aliquot plate creation) in the
#: preparation plate in ul.
PREPARATION_PLATE_VOLUME = 43.3 # 43.3 ul
#: The concentration of the library plate in nM.
ALIQUOT_PLATE_CONCENTRATION = 1270 # 1270 nM
#: The final sample volume in the library aliquot plate in ul.
ALIQUOT_PLATE_VOLUME = 4
#: The number of aliquot plates generated for each layout.
STARTING_NUMBER_ALIQUOTS = 8
OPTIMEM_DILUTION_FACTOR = 3
def get_stock_pool_buffer_volume():
"""
Returns the buffer volume required to generate the pool stock samples.
"""
total_transfer_volume = NUMBER_MOLECULE_DESIGNS \
* MOLECULE_DESIGN_TRANSFER_VOLUME
return POOL_STOCK_RACK_VOLUME - total_transfer_volume
def get_source_plate_transfer_volume():
"""
Returns the volume that is transferred from a pool stock rack to a
library source (preparation) plate in ul.
"""
dilution_factor = float(POOL_STOCK_RACK_CONCENTRATION) \
/ PREPARATION_PLATE_CONCENTRATION
vol = PREPARATION_PLATE_VOLUME / dilution_factor
return round_up(vol)
class LibraryBaseLayoutParameters(ParameterSet):
"""
This layout defines which positions in a library will contain samples.
"""
DOMAIN = 'library_base_layout'
#: If *True* the position in a library plate will contain a library sample.
IS_SAMPLE_POS = 'is_sample_position'
REQUIRED = [IS_SAMPLE_POS]
ALL = [IS_SAMPLE_POS]
ALIAS_MAP = {IS_SAMPLE_POS : []}
DOMAIN_MAP = {IS_SAMPLE_POS : DOMAIN}
class LibraryBaseLayoutPosition(WorkingPosition):
"""
There is actually only one value for a position in a library base layout
and this is the availability for library samples.
**Equality condition**: equal :attr:`rack_position` and
:attr:`is_sample_pos`
"""
PARAMETER_SET = LibraryBaseLayoutParameters
def __init__(self, rack_position, is_sample_position=True):
"""
Constructor:
:param rack_position: The rack position.
:type rack_position: :class:`thelma.entities.rack.RackPosition`.
:param is_sample_position: Is this position available for samples?
:type is_sample_position: :class:`bool`
"""
WorkingPosition.__init__(self, rack_position)
if not isinstance(is_sample_position, bool):
msg = 'The "sample position" flag must be a bool (obtained: %s).' \
% (is_sample_position.__class__.__name__)
raise TypeError(msg)
#: Is this position available for samples?
self.is_sample_position = is_sample_position
def _get_parameter_values_map(self):
"""
Returns a map with key = parameter name, value = associated attribute.
"""
return {self.PARAMETER_SET.IS_SAMPLE_POS : self.is_sample_position}
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.rack_position == self.rack_position and \
other.is_sample_position == self.is_sample_position
def __repr__(self):
str_format = '<%s rack position: %s, is sample position: %s>'
params = (self.__class__.__name__, self.rack_position,
self.is_sample_position)
return str_format % params
class LibraryBaseLayout(WorkingLayout):
"""
Defines which position in a library may contain library samples.
"""
WORKING_POSITION_CLASS = LibraryBaseLayoutPosition
def __init__(self, shape):
"""
Constructor:
:param shape: The rack shape.
:type shape: :class:`thelma.entities.rack.RackShape`
"""
WorkingLayout.__init__(self, shape)
#: You cannot add new positions to a closed layout.
self.is_closed = False
def add_position(self, working_position):
"""
Adds a :class:`Working_position` to the layout.
:param working_position: The working position to be added.
:type working_position: :class:`LibraryBaseLayoutPosition`
:raises ValueError: If the added position is not a
:attr:`WORKING_POSITION_CLASS` object.
:raises AttributeError: If the layout is closed.
:raises TypeError: if the position has the wrong type
"""
if not self.is_closed:
WorkingLayout.add_position(self, working_position)
else:
raise AttributeError('The layout is closed!')
def close(self):
"""
Removes all positions that may not contain samples.
"""
if not self.is_closed:
del_positions = []
for rack_pos, libbase_pos in self._position_map.iteritems():
if not libbase_pos.is_sample_position:
del_positions.append(rack_pos)
for rack_pos in del_positions: del self._position_map[rack_pos]
self.is_closed = True
def create_rack_layout(self):
"""
The layout is closed before rack layout creation.
"""
self.close()
return WorkingLayout.create_rack_layout(self)
class LibraryBaseLayoutConverter(BaseLayoutConverter):
"""
Converts a :class:`thelma.entities.racklayout.RackLayout` into a
:class:`LibraryBaseLayout`.
"""
NAME = 'Library Base Layout Converter'
PARAMETER_SET = LibraryBaseLayoutParameters
LAYOUT_CLS = LibraryBaseLayout
POSITION_CLS = LibraryBaseLayoutPosition
def __init__(self, rack_layout, parent=None):
BaseLayoutConverter.__init__(self, rack_layout, parent=parent)
# intermediate storage of invalid rack positions
self.__invalid_flag = None
def reset(self):
BaseLayoutConverter.reset(self)
self.__invalid_flag = []
def _get_position_init_values(self, parameter_map, rack_pos):
"""
Derives a working position from a parameter map (including validity
checks).
"""
is_sample_pos_str = parameter_map[self.PARAMETER_SET.IS_SAMPLE_POS]
pos_label = rack_pos.label
if is_sample_pos_str is None: return None
values = {str(True) : True, str(False) : False}
if not values.has_key(is_sample_pos_str):
info = '%s (%s)' % (pos_label, is_sample_pos_str)
self.__invalid_flag.append(info)
else:
return dict(is_sample_position=values[is_sample_pos_str])
def _record_errors(self):
BaseLayoutConverter._record_errors(self)
if len(self.__invalid_flag) > 0:
msg = 'The "sample position" flag must be a boolean. The values ' \
'for some positions are invalid. Details: %s.' \
% (', '.join(sorted(self.__invalid_flag)))
self.add_error(msg)
def _perform_layout_validity_checks(self, working_layout):
"""
We do not check anything but we close the layout.
"""
working_layout.close()
class LibraryLayout(StockSampleCreationLayout):
"""
A special :class:`StockSampleCreationLayout` for a plate involived
in library generation (either :class:`IsoAliquotPlate` (rack shape 16x24)
or :class:`IsoSectorPreparationPlate` (rack shape 8x12)).
"""
def __init__(self, shape):
"""
Constructor:
:param shape: The rack shape.
:type shape: :class:`thelma.entities.rack.RackShape`
"""
StockSampleCreationLayout.__init__(self, shape)
#: Allows validation of new position (is only set, if the layout is
#: initialised via :func:`from_base_layout`.
self.base_layout_positions = None
@classmethod
def from_base_layout(cls, base_layout):
"""
Creates a new library layout which will only accept positions that
are part of the base layout.
"""
base_layout.close()
layout = LibraryLayout(shape=base_layout.shape)
layout.base_layout_positions = base_layout.get_positions()
return layout
def add_position(self, working_position):
"""
Adds a :class:`Working_position` to the layout.
:param working_position: The transfer position to be added.
:type working_position: :class:`LibraryPosition`
:raise ValueError: If the rack position is not allowed by the
base layout.
:raises TypeError: If the added position is not a
:class:`TransferPosition` object.
"""
rack_pos = working_position.rack_position
if not self.base_layout_positions is None and \
not rack_pos in self.base_layout_positions:
msg = 'Position %s is not part of the base layout. It must not ' \
'take up samples.' % (rack_pos)
raise ValueError(msg)
WorkingLayout.add_position(self, working_position)
| mit | 6,090,208,923,717,987,000 | 33.408946 | 80 | 0.644475 | false |
ReddyLab/1000Genomes | make-intron-retention-slurms.py | 1 | 1769 | #!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Copyright (C)2017 William H. Majoros ([email protected]).
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
from SlurmWriter import SlurmWriter
THOUSAND="/home/bmajoros/1000G/assembly"
GEUVADIS=THOUSAND+"/geuvadis.txt"
SLURM_DIR=THOUSAND+"/intron-slurms"
JOB_NAME="INTRON"
MAX_PARALLEL=1000
NICE=500
MEMORY=0
THREADS=0
#=========================================================================
# main()
#=========================================================================
dirs=[]
with open(GEUVADIS,"rt") as IN:
for line in IN:
id=line.rstrip()
dir=THOUSAND+"/combined/"+id
dirs.append(dir)
writer=SlurmWriter()
for dir in dirs:
writer.addCommand("cd "+dir+"/RNA3\n"+
THOUSAND+"/src/get-intron-retentions.py "+
"../1.gff ../2.gff ../1.lengths ../2.lengths "+
"depth.txt.gz > IR.txt\n"
)
writer.setQueue("new,all")
writer.nice(NICE)
if(MEMORY): writer.mem(MEMORY)
if(THREADS): writer.threads(THREADS)
writer.writeArrayScript(SLURM_DIR,JOB_NAME,MAX_PARALLEL)
| gpl-2.0 | 681,284,877,154,889,300 | 35.854167 | 74 | 0.560204 | false |
luisfg30/Webserver | tests/table (1).py | 1 | 1978 | from tkinter import *
class ExampleApp(Tk):
def __init__(self):
Tk.__init__(self)
list2=["Data","Página","Tipo de Resuisição"]
t = SimpleTable(self,list2)
t.pack(side="top", fill="x")
list=["abc","def","ghi"]
t.insert_row(list)
t.insert_row(list)
t.set_cell(1,0,"hello world")
#print(t._widgets)
t.set_row(1,list2)
class SimpleTable(Canvas):
def __init__(self, parent,value_names):
self.rows=0
self.columns=len(value_names)
# use black background so it "peeks through" to
# form grid lines
Canvas.__init__(self, parent, background="black")
self._widgets = []
self.current_row=0
for j in range(self.columns):
self.grid_columnconfigure(j, weight=1)
#add first line
new_row = []
for j in range(self.columns):
label = Label(self, text=value_names[j],font=("Verdana 9 bold"),borderwidth=0, width=len(value_names[j]))
label.grid(row=self.current_row, column=j, sticky="nsew", padx=1, pady=1)
new_row.append(label)
self._widgets.append(new_row)
self.current_row+=1
self.rows+=1
def set_cell(self, i, j, value):
widget = self._widgets[i][j]
widget.configure(text=value)
def set_row(self,i,values):
for j in range(len(values)):
widget = self._widgets[i][j]
widget.configure(text=values[j])
def insert_row(self,values):
self.current_row+=1
self.rows+=1
new_row = []
for j in range(len(values)):
label = Label(self, text=values[j],borderwidth=0, width=10)
label.grid(row=self.current_row, column=j, sticky="nsew", padx=1, pady=1)
new_row.append(label)
self._widgets.append(new_row)
if __name__ == "__main__":
app = ExampleApp()
app.mainloop() | gpl-2.0 | -3,464,466,802,696,428,000 | 29.875 | 117 | 0.547342 | false |
eubr-bigsea/tahiti | migrations/versions/185a7d89aa72_remove_field_from_form_85.py | 1 | 3078 | """Remove Field from Form 85
Revision ID: 185a7d89aa72
Revises: 2eaeb4b0c43f
Create Date: 2018-04-17 14:28:35.098385
"""
import json
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = '185a7d89aa72'
down_revision = '2eaeb4b0c43f'
branch_labels = None
depends_on = None
X_FORMAT_ID = 307
LEGEND_ID = 310
FORM_ID = 85
def upgrade():
op.execute('DELETE FROM operation_form_field WHERE id={}'.format(X_FORMAT_ID))
op.execute('DELETE FROM operation_form_field_translation WHERE id={}'.format(X_FORMAT_ID))
op.execute('DELETE FROM operation_form_field WHERE id={}'.format(LEGEND_ID))
op.execute('DELETE FROM operation_form_field_translation WHERE id={}'.format(LEGEND_ID))
def downgrade():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer),
)
columns = [c.name for c in tb.columns]
supported_formats = [
{"key": "%Y-%m-%dT%H:%M:%S.%LZ",
"value": "%Y-%m-%dT%H:%M:%S.%LZ"},
{"key": "%m-%d", "value": "%m-%d"},
{"key": "%d-%", "value": "%d-%m"},
{"key": "%Y-%m-%d", "value": "%Y-%m-%d"},
{"key": "%m-%Y-%d", "value": "%m-%Y-%d"},
{"key": "%m-%Y-%d", "value": "%m-%Y-%d"},
{"key": "%m-%Y-%d %H:%M",
"value": "%m-%Y-%d %H:%M"},
{"key": "%m-%Y-%d %H:%M",
"value": "%m-%Y-%d %H:%M"},
{"key": "%m-%Y-%d %H:%M:%S", "value": "%m-%Y-%d %H:%M:%S"},
{"key": "%m-%Y-%d %H:%M:%S",
"value": "%m-%Y-%d %H:%M:%S"},
{"key": "%H:%M", "value": "%H:%M"},
{"key": "%H:%M:%S", "value": "%H:%M:%S"},
{"key": ".2", "value": ".2"},
{"key": ".4", "value": ".4"},
{"key": "%", "value": "%"},
{"key": "p", "value": "p"},
{"key": "d", "value": "d"}
]
data = [
[X_FORMAT_ID, 'x_format', 'TEXT', 0, 8, None, 'select2', None, json.dumps(supported_formats), 'EXECUTION', FORM_ID],
[LEGEND_ID, 'legend', 'INTEGER', 0, 5, 1, 'checkbox', None, None, 'EXECUTION', FORM_ID],
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = [c.name for c in tb.columns]
data = [
[X_FORMAT_ID, 'en', 'X-axis format', 'X-axis format'],
[X_FORMAT_ID, 'pt', 'Formato para eixo X', 'Formato para eixo X'],
[LEGEND_ID, 'en', 'Display Legend', 'Display Legend'],
[LEGEND_ID, 'pt', 'Exibir Legenda', 'Exibir Legenda'],
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
| apache-2.0 | 9,139,908,008,826,830,000 | 30.408163 | 120 | 0.536387 | false |
clach04/Stache | __init__.py | 1 | 17723 | from __future__ import generators
import sys
from cgi import escape
try:
raise ImportError
import itertools
itertools_takewhile = itertools.takewhile
except ImportError:
# fake it
def takewhile(predicate, iterable):
# takewhile(lambda x: x<5, [1,4,6,4,1]) --> 1 4
for x in iterable:
if predicate(x):
yield x
else:
break
itertools_takewhile = takewhile
try:
from sys import intern
except ImportError:
pass
string_func = unicode
TOKEN_RAW = intern('raw')
TOKEN_TAGOPEN = intern('tagopen')
TOKEN_TAGINVERT = intern('taginvert')
TOKEN_TAGCLOSE = intern('tagclose')
TOKEN_TAGCOMMENT = intern('tagcomment')
TOKEN_TAGDELIM = intern('tagdelim')
TOKEN_TAG = intern('tag')
TOKEN_PARTIAL = intern('partial')
TOKEN_PUSH = intern('push')
TOKEN_BOOL = intern('bool')
BOOTSRAP_PRE = """
(function(data){
var isArray = Array.isArray || function(obj) {
return toString.call(obj) == '[object Array]';
},
each = function(obj, iterator, context) {
if (obj == null) return;
if (Array.prototype.forEach && obj.forEach === Array.prototype.forEach) {
obj.forEach(iterator, context);
} else if (obj.length === +obj.length) {
for (var i = 0, l = obj.length; i < l; i++) {
if (i in obj && iterator.call(context, obj[i], i, obj) === breaker) return;
}
} else {
for (var key in obj) {
if (obj.hasOwnProperty(key)) {
if (iterator.call(context, obj[key], key, obj) === breaker) return;
}
}
}
},
map = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
if (Array.prototype.map && obj.map === Array.prototype.map) return obj.map(iterator, context);
each(obj, function(value, index, list) {
results[results.length] = iterator.call(context, value, index, list);
});
if (obj.length === +obj.length) results.length = obj.length;
return results;
},
htmlEncode = function(str) {
return String(str)
.replace(/&/g, '&')
.replace(/"/g, '"')
.replace(/'/g, ''')
.replace(/</g, '<')
.replace(/>/g, '>');
},
lookup = function (data, datum) {
var i = 0,
l = data ? data.length : 0;
for (; i < l; i += 1) {
if (datum === '.') {
return data[i]
} else if (data[i] !== void 0 && data[i][datum] !== void 0 && data[i][datum] !== false) {
if (toString.call(data[i][datum]) == '[object Function]') {
return data[i][datum](data)
} else {
return data[i][datum]
}
}
}
return '';
},
section = function(data, tagvalue, callback, invert){
invert = invert || false;
if (isArray(tagvalue)) {
if (!invert && tagvalue.length > 0) {
return map(tagvalue, function(v) { return callback([v].concat(data))}).join('')
} else if (invert && tagvalue.length == 0) {
return callback(data);
}
} else {
if((!invert && tagvalue) || (invert && !tagvalue)) {
if (tagvalue !== void 0 || tagvalue !== true) {
return callback([tagvalue].concat(data));
} else {
return callback(data);
}
}
}
};
"""
BOOTSRAP_POST = """
})
"""
def _checkprefix(tag, prefix):
if tag and tag[0] == prefix:
return tag[1:].strip()
else:
return None
def _lookup(data, datum):
for scope in data:
if datum == '.':
return string_func(scope)
elif datum in scope:
return scope[datum]
elif hasattr(scope, datum):
return getattr(scope, datum)
return None
def _renderjsfunction(parts, prefix = "", postfix = "", params="data, tag"):
return "function({params}) {{{prefix} return {content} {postfix} }}".format(
content=_renderjsjoin(*parts),
prefix=prefix,
postfix=postfix,
params=params)
def _renderjsjoin(*args):
return "[{0}].join('');".format(','.join(args))
def render(template, data):
return Stache().render(template, data)
def render_js(template):
return Stache().render_js(template)
class Stache(object):
def __init__(self):
self.otag = '{{'
self.ctag = '}}'
self.templates = {}
self.hoist = {}
self.hoist_data = {}
self.section_counter = 0
def copy(self):
copy = Stache()
copy.templates = self.templates
return copy
def add_template(self, name, template):
self.templates[name] = list(self._tokenize(template))
def render(self, template, data={}):
self.otag = '{{'
self.ctag = '}}'
return ''.join(self._parse(self._tokenize(template), data))
def render_iter(self, template, data={}):
copy = self.copy()
return copy._parse(copy._tokenize(template), data)
def render_template(self, template_name, data={}):
self.otag = '{{'
self.ctag = '}}'
return ''.join(self._parse(iter(list(self.templates[template_name])), data))
def render_template_iter(self, template_name, data={}):
copy = self.copy()
return copy._parse(iter(list(copy.templates[template_name])), data)
def _js_hoisted(self, bare=True):
hoist = ''
if self.templates:
hoist += "\n var templates = {};\n"
for name in self.templates:
render_function = list(self._jsparse(iter(list(self.templates[name]))))
newparams = "data"
prefix = ""
if not bare and self.hoist_data:
hoisted = map(lambda x: '"{0}": {1}, '.format(x, self.hoist_data[x], "baseData"), self.hoist_data.keys())
prefix = ' var data = [dat2, {{{0}}}];'.format(', '.join(hoisted))
self.hoist_data = {}
newparams = 'dat2';
hoist += ' templates["{0}"] = {1};\n'.format(name, _renderjsfunction(render_function, prefix=prefix, params=newparams))
if self.hoist:
for name in self.hoist:
hoist += ' var {0} = {1};\n'.format(name, self.hoist[name])
if bare:
if self.hoist_data:
for name in self.hoist_data:
hoist += ' {2}["{0}"] = {1};\n'.format(name, self.hoist_data[name], "data")
return hoist
def render_js(self, template):
copy = self.copy()
renderedjs = _renderjsjoin(*list(copy._jsparse(copy._tokenize(template))))
hoist = copy._js_hoisted()
jstemplate = "{0}\n {1}\n data = [data];\n return {2};\n{3}"
return jstemplate.format(BOOTSRAP_PRE, hoist, renderedjs, BOOTSRAP_POST)
def render_js_template(self, template_name):
copy = self.copy()
hoist = copy._js_hoisted(bare=False)
jstemplate = "{0}\n {1}\n return templates['{2}']([data]);\n{3}"
return jstemplate.format(BOOTSRAP_PRE, hoist, template_name, BOOTSRAP_POST)
def render_all_js(self):
copy = self.copy()
hoist = copy._js_hoisted(bare=False)
jstemplate = "{0}\n var baseData={{}};\n {1}\n return templates;\n{2}"
return jstemplate.format(BOOTSRAP_PRE, hoist, BOOTSRAP_POST)
def _tokenize(self, template):
rest = template
scope = []
while rest and len(rest) > 0:
pre_section = rest.split(self.otag, 1)
if len(pre_section) == 2:
pre, rest = pre_section
else:
pre, rest = (pre_section[0], None)
if rest:
taglabel, rest = rest.split(self.ctag, 1)
else:
taglabel, rest = (None, None)
if taglabel:
taglabel = taglabel.strip()
else:
taglabel = ''
open_tag = _checkprefix(taglabel, '#')
if not open_tag:
invert_tag = _checkprefix(taglabel, '^')
else:
invert_tag = None
if not invert_tag:
close_tag = _checkprefix(taglabel, '/')
else:
close_tag = None
comment_tag = None
partial_tag = None
push_tag = None
bool_tag = None
booltern_tag = None
unescape_tag = None
if not close_tag:
comment_tag = _checkprefix(taglabel, '!')
if not comment_tag:
partial_tag = _checkprefix(taglabel, '>')
if not partial_tag:
push_tag = _checkprefix(taglabel, '<')
if not push_tag:
bool_tag = _checkprefix(taglabel, '?')
if not bool_tag:
booltern_tag = _checkprefix(taglabel, ':')
if not booltern_tag:
unescape_tag = _checkprefix(taglabel, '{')
if unescape_tag:
rest = rest[1:]
else:
rest = rest # FIXME seems like a NOOP
if not booltern_tag:
unescape_tag = (unescape_tag or _checkprefix(taglabel, '&'))
else:
unescape_tag = None
if not unescape_tag and len(taglabel) >= 2 and taglabel[0] == '=' and taglabel[-1] == '=':
delim_tag = taglabel[1:-1]
else:
delim_tag = None
if delim_tag:
delim_tag = delim_tag.split(' ', 1)
else:
delim_tag = None
if delim_tag and len(delim_tag) == 2:
delim_tag = delim_tag
else:
delim_tag = None
if push_tag:
pre = pre.rstrip()
rest = rest.lstrip()
if pre:
yield TOKEN_RAW, pre, len(scope)
if open_tag:
scope.append(open_tag)
yield TOKEN_TAGOPEN, open_tag, len(scope)
elif bool_tag:
scope.append(bool_tag)
yield TOKEN_BOOL, bool_tag, len(scope)
elif invert_tag:
scope.append(invert_tag)
yield TOKEN_TAGINVERT, invert_tag, len(scope)
elif close_tag is not None:
current_scope = scope.pop()
if close_tag:
assert (current_scope == close_tag), 'Mismatch open/close blocks'
yield TOKEN_TAGCLOSE, current_scope, len(scope)+1
elif booltern_tag:
scope.append(booltern_tag)
yield TOKEN_TAG, booltern_tag, 0
yield TOKEN_TAGINVERT, booltern_tag, len(scope)
elif comment_tag:
yield TOKEN_TAGCOMMENT, comment_tag, 0
elif partial_tag:
yield TOKEN_PARTIAL, partial_tag, 0
elif push_tag:
scope.append(push_tag)
yield TOKEN_PUSH, push_tag, len(scope)
elif delim_tag:
yield TOKEN_TAGDELIM, delim_tag, 0
elif unescape_tag:
yield TOKEN_TAG, unescape_tag, True
else:
yield TOKEN_TAG, taglabel, False
def _parse(self, tokens, *data):
for token in tokens:
#print ' token:' + string_func(token)
tag, content, scope = token
if tag == TOKEN_RAW:
yield string_func(content)
elif tag == TOKEN_TAG:
tagvalue = _lookup(data, content)
#cant use if tagvalue because we need to render tagvalue if it's 0
#testing if tagvalue == 0, doesnt work since False == 0
if tagvalue is not None and tagvalue is not False:
try:
if len(tagvalue) > 0:
if scope:
yield string_func(tagvalue)
else:
yield escape(string_func(tagvalue))
except TypeError:
if scope:
yield string_func(tagvalue)
else:
yield escape(string_func(tagvalue))
elif tag == TOKEN_TAGOPEN or tag == TOKEN_TAGINVERT:
tagvalue = _lookup(data, content)
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
if (tag == TOKEN_TAGOPEN and tagvalue) or (tag == TOKEN_TAGINVERT and not tagvalue):
if hasattr(tagvalue, 'items'):
#print ' its a dict!', tagvalue, untilclose
for part in self._parse(untilclose, tagvalue, *data):
yield part
else:
try:
iterlist = list(iter(tagvalue))
if len(iterlist) == 0:
raise TypeError
#print ' its a list!', list(rest)
#from http://docs.python.org/library/itertools.html#itertools.tee
#In general, if one iterator uses most or all of the data before
#another iterator starts, it is faster to use list() instead of tee().
rest = list(untilclose)
for listitem in iterlist:
for part in self._parse(iter(rest), listitem, *data):
yield part
except TypeError:
#print ' its a bool!'
for part in self._parse(untilclose, *data):
yield part
else:
for ignore in untilclose:
pass
elif tag == TOKEN_BOOL:
tagvalue = _lookup(data, content)
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
if tagvalue:
for part in self._parse(untilclose, *data):
yield part
else:
for part in untilclose:
pass
elif tag == TOKEN_PARTIAL:
if content in self.templates:
for part in self._parse(iter(list(self.templates[content])), *data):
yield part
elif tag == TOKEN_PUSH:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
data[-1][content] = ''.join(self._parse(untilclose, *data))
elif tag == TOKEN_TAGDELIM:
self.otag, self.ctag = content
def _jsparse(self, tokens):
self.otag = '{{'
self.ctag = '}}'
for token in tokens:
tag, content, scope = token
if tag == TOKEN_RAW:
yield "'{0}'".format(string_func(content))
elif tag == TOKEN_TAG:
if content != '':
if scope:
yield "lookup(data, '{0}')".format(content)
else:
yield "htmlEncode(lookup(data, '{0}'))".format(content)
elif tag == TOKEN_TAGOPEN or tag == TOKEN_TAGINVERT or tag == TOKEN_BOOL:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
inside = self._jsparse(untilclose)
if tag == TOKEN_TAGOPEN:
pre = "return section(data, lookup(data, tag), function (data) {"
post = "});"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_TAGINVERT:
pre = "return section(data, lookup(data, tag), function (data) {"
post = "}, true);"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_BOOL:
pre = "var tagvalue = lookup(data, tag); if ((!isArray(tagvalue) && tagvalue) || (isArray(tagvalue)) && tagvalue.length > 0){"
post = "}"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_PARTIAL:
yield "templates['{0}'](data)".format(content)
elif tag == TOKEN_PUSH:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
self.hoist_data[content] = _renderjsfunction(self._jsparse(untilclose), params="data")
elif tag == TOKEN_TAGDELIM:
self.otag, self.ctag = content
| mit | 7,108,883,387,215,044,000 | 37.444685 | 146 | 0.486092 | false |
acrosby/get-git-hash | git.py | 1 | 1455 | #
# Copyright 2013 A.Crosby
# See LICENSE for license information
#
import subprocess, os
def subs(cmd):
p = subprocess.Popen(cmd, shell=True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out = p.communicate()
if len(out[1])>0:
raise ValueError("Error using git through subprocess: %s" % (out[1],))
else:
return out[0]
def check(repo):
cmd = "cd %s && git status | grep 'modified:'" % (repo,)
modified = subs(cmd)
cmd = "cd %s && git status | grep 'new file:'" % (repo,)
new = subs(cmd)
if len(modified) > 0 or len(new) > 0:
raise ValueError("Please commit the changes to the repository '%s'" % (repo,))
def current_hash(repo):
check(repo)
cmd = "cd %s && git log | head -n 1" % (repo,)
out = subs(cmd)
out = out.strip("commit").strip(" ").strip("\n")
return out
def current_branch(repo):
check(repo)
cmd = "cd %s && git status | grep 'On branch'" % (repo,)
out = subs(cmd)
out = out.strip("# On branch ").strip(" ").strip("\n")
return out
def unique(repo):
check(repo)
branch = current_branch(repo)
hash = current_hash(repo)
return branch + "-" + hash
def prepend_unique(repo, filename):
path = os.path.abspath(filename)
fnames = os.path.split(path)
this = unique(repo)
return os.path.join(fnames[0], this+"_"+fnames[1])
unique_path = prepend_unique
| mit | -6,748,326,175,159,556,000 | 27.529412 | 86 | 0.580069 | false |
schwertfjo/PowerEnergyIC_CS5461_python2Driver | CS5461.py | 1 | 4234 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import spidev
import time
class cs5461:
# define command bytes
sync0 = 254
sync1 = 255
reset = 128
compu = 232
# default settings
default_mode = 2
default_speed = 100000
default_inverted = True # due to optocouplers
def __init__(self, mode = default_mode, speed = default_speed, inverted = default_inverted):
self.spi = spidev.SpiDev()
self.spi_mode = mode
self.spi_speed = speed
self.inverted = inverted
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.OUT)
self.Init()
def rw(self, bytes):
send_bytes = []
ret = -1
if type(bytes) is int:
send_bytes = [bytes] + [self.sync0] * 3
elif type(bytes) is list:
send_bytes = bytes + [self.sync0] * (4 - len(bytes))
self.spi.open(0,0)
self.spi.mode = self.spi_mode
self.spi.max_speed_hz = self.spi_speed
if self.inverted:
r = self.spi.xfer2( map(lambda x: x ^ 0xFF, send_bytes) )
ret = map(lambda x: x ^ 0xFF, r)
else:
ret = self.spi.xfer2( send_bytes )
self.spi.close()
return ret
def Reset(self):
self.rw(self.reset)
def Sync(self):
self.rw([self.sync1]*3 + [self.sync0])
def Init(self):
# chip reset cycle via gpio25
GPIO.output(25, True)
time.sleep(1)
GPIO.output(25, False)
time.sleep(1)
self.Sync()
self.Reset()
self.Sync()
wrReg00 = 0x40 # Config
wrReg01 = 0x42 # Current Offset
wrReg02 = 0x44 # Current Gain
wrReg03 = 0x46 # Voltage Offset
wrReg04 = 0x48 # Voltage Gain
wrReg13 = 0x5A # Timebase Calibration
wrReg14 = 0x5C # Power Offset Calibration
wrReg16 = 0x60 # Current Channel AC Offset
wrReg17 = 0x62 # Voltage Channel AC Offset
# good working calibration data for energenie power meter lan (determined by trial)
self.rw([wrReg00, 0b1, 0b0, 0b1])
self.rw([wrReg01, 0xFF, 0xB5, 0x62])
self.rw([wrReg02, 0x54, 0xFE, 0xFF])
self.rw([wrReg03, 0x15, 0x8C, 0x71])
self.rw([wrReg04, 0x3D, 0xE0, 0xEF])
self.rw([wrReg13, 0x83, 0x12, 0x6E])
self.rw([wrReg14, 0xFF, 0xCF, 0xC3])
self.rw([wrReg16, 0x00, 0x01, 0x4A])
self.rw([wrReg17, 0x00, 0x44, 0xCA])
# Perform continuous computation cycles
self.rw(self.compu)
time.sleep(2) # wait until values becomes good
def readregister(self, register):
if register > 31 or register < 0: #just check range
return -1
self.Sync()
received = self.rw(register << 1)
return received[1]*256*256 + received[2]*256 + received[3]
def getregister(self, register):
Expotential= [ 0, -23, -22, -23, -22, -0, -5, -23, -23, -23, # 0:9 decimal point position
-23, -24, -24, -23, -23, 0, -24, -24, -5, -16, # 10:19
0, 0, -22, -23, 0, 0, 0, 0, 0, 0, 0, 0 ] # 20:31
Binary = [0, 15, 26, 28] # binary registers
twosComplement =[1, 7, 8, 9, 10, 14, 19, 23] # two's complement registers
if register > 31 or register < 0: # just check range
return -1
value = self.readregister(register)
if register in Binary:
return bin(value)
elif register in twosComplement:
if value > 2**23:
value = ((value ^ 0xFFFFFF) + 1) * -1 # convert to host two's complement system
return value * 2**Expotential[register]
def main():
Ugain = 400
Igain = 10
Egain = 4000
device = cs5461()
# for i in range(32):
# print i, device.getregister(i)
while True:
Irms = device.getregister(11)
Urms = device.getregister(12)
Erms = device.getregister(10)
I = round(Irms*Igain, 3)
U = round(Urms*Ugain, 1)
E = round(Erms*Egain, 1)
print( "voltage = %.1fV current = %.3fA power = %.1fW" % (U, I, E) )
time.sleep(1)
if __name__ == '__main__':
main()
| gpl-2.0 | 1,260,076,423,460,905,200 | 31.821705 | 101 | 0.549835 | false |
pitluga/elephunk | elephunk/database.py | 1 | 2435 | from urlparse import urlparse
from momoko.clients import AsyncClient
class Row:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Database():
def __init__(self, client):
self.client = client
def select_all(self, operation, parameters=(), record=Row, callback=None):
self.client.execute(operation, parameters, callback = lambda cursor: callback(self._map_cursor(cursor, record)))
def select_one(self, operation, parameters=(), record=Row, callback=None):
self.client.execute(operation, parameters, callback = lambda cursor: callback(self._first_entry(cursor, record)))
def select_scalar(self, operation, parameters=(), callback=None):
self.client.execute(operation, parameters, callback = lambda cursor: callback(self._single_entry(cursor)))
def _first_entry(self, cursor, record):
if cursor.rowcount == 0:
return None
return self._map_cursor(cursor, record)[0]
def _map_cursor(self, cursor, record):
names = [x[0] for x in cursor.description]
return [record(**dict(zip(names, row))) for row in cursor.fetchall()]
def _single_entry(self, cursor):
if cursor.rowcount == 0:
return None
return cursor.fetchall()[0][0]
class DatabaseClients:
def __init__(self, servers, client_factory=AsyncClient):
self._servers = servers
self._client_factory = client_factory
self._clients = {}
def client(self, server_name, database_name):
identifier = (server_name, database_name)
if identifier not in self._clients:
self._clients[identifier] = self._build_client(server_name, database_name)
return self._clients[identifier]
def close(self):
for client in self._clients.itervalues():
client.close()
def server_names(self):
return sorted(self._servers.keys())
def _build_client(self, server_name, database_name):
config = {}
parsed_url = urlparse(self._servers[server_name])
config['host'] = parsed_url.hostname
config['database'] = database_name
if parsed_url.port:
config['port'] = parsed_url.port
if parsed_url.username:
config['user'] = parsed_url.username
if parsed_url.password:
config['password'] = parsed_url.password
return Database(self._client_factory(config))
| mit | -3,482,961,589,982,033,000 | 35.343284 | 121 | 0.638193 | false |
tentangdata/ig | helpers.py | 1 | 1557 | import os
import yaml
class AppConfig(object):
DB_URL_TEMPLATE = "{}://{}:{}@{}:{}/{}"
def __init__(self, db_type,
db_host, db_port, db_name,
db_username, db_password,
file_in_dir, file_out_dir,
posts_dir):
self.db_type = db_type
self.db_host = db_host
self.db_port = db_port
self.db_name = db_name
self.db_username = db_username
self.db_password = db_password
self.file_in_dir = file_in_dir
self.file_out_dir = file_out_dir
self.posts_dir = posts_dir
def get_db_url(self):
return AppConfig.DB_URL_TEMPLATE.format(
self.db_type,
self.db_username,
self.db_password,
self.db_host,
self.db_port,
self.db_name
)
class AppConfigParser(object):
""" IG App Config Parser
only accept yml format
"""
def __init__(self):
self._config_file_path = os.getenv(
'IG_CONF_PATH',
'config.yml'
)
def parse(self):
_config = yaml.load(
open(self._config_file_path, 'r')
)
return AppConfig(**_config)
if __name__ == '__main__':
""" for running simple tests """
app_conf_parser = AppConfigParser()
app_conf = app_conf_parser.parse()
assert app_conf.db_host == 'localhost'
assert app_conf.db_type == 'postgresql'
assert app_conf.get_db_url() \
== 'postgresql://postgres:postgres@localhost:5432/ig' | mit | 8,782,905,886,537,452,000 | 24.966667 | 64 | 0.526012 | false |
NationalSecurityAgency/ghidra | Ghidra/Extensions/SleighDevTools/pcodetest/build.py | 1 | 10519 | ## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
import shutil
import subprocess
import sys
import pwd
import grp
import re
class BuildUtil(object):
def __init__(self):
self.log = False
self.name = False
self.num_errors = 0
self.num_warnings = 0
def run(self, cmd, stdout=False, stderr=False, verbose=True):
if isinstance(cmd, basestring):
if stdout and stderr:
cmd += ' 1>%s 2>%s' % (stdout, stderr)
elif stdout and not stderr:
cmd += ' 1>%s 2>&1' % (stdout)
elif not stdout and stderr:
cmd += ' 2>%s' % (stderr)
if verbose: self.log_info(cmd)
os.system(cmd)
else:
str = ' '.join(cmd);
if stdout:
f = file(stdout, 'w+')
str += ' 1>%s 2>&1' % (stdout)
else:
f = subprocess.PIPE
if verbose: self.log_info(str)
try:
sp = subprocess.Popen(cmd, stdout=f, stderr=subprocess.PIPE)
except OSError as e:
self.log_err("Command: " + str)
self.log_err(e.message)
return 0,e.message#raise
if stdout: f.close()
out, err = sp.communicate()
# print 'run returned %d bytes stdout and %d bytes stderr' % (len(out) if out else 0, len(err) if err else 0)
return out, err
def isdir(self, dname):
return os.path.isdir(dname)
def getcwd(self):
return os.getcwd()
def basename(self, fname):
return os.path.basename(fname)
def dirname(self, fname):
return os.path.dirname(fname)
def getmtime(self, fname):
return os.path.getmtime(fname)
def isfile(self, fname):
return os.path.isfile(fname)
def getenv(self, var, dflt):
return os.getenv(var, dflt)
def pw_name(self, fname):
return pwd.getpwuid(os.stat(fname).st_uid).pw_name
def gr_name(self, fname):
return grp.getgrgid(os.stat(fname).st_gid).gr_name
def isatty(self):
return os.isatty(sys.stdin.fileno())
def is_readable_file(self, fname):
if not self.isfile(fname):
self.log_warn('%s does not exist' % fname)
return False
if os.stat(fname).st_size == 0:
self.log_warn('%s is empty' % fname)
return False
if os.access(fname, os.R_OK) == 0:
self.log_warn('%s is not readable' % fname)
return False
return True
def is_executable_file(self, fname):
if not self.is_readable_file(fname): return False
if os.access(fname, os.X_OK) == 0:
self.log_warn('%s is not executable' % fname)
return False
return True
# export a file to a directory
def export_file(self, fname, dname,):
try:
if not os.path.isdir(dname):
self.makedirs(dname)
if os.path.isfile(fname):
self.copy(fname, dname, verbose=True)
elif os.path.isdir(fname):
self.copy(fname, dname, dir=True, verbose=True)
except IOError as e:
self.log_err('Error occurred exporting %s to %s' % (fname, dname))
self.log_err("Unexpected error: %s" % str(e))
def rmtree(self, dir, verbose=True):
if verbose: self.log_info('rm -r %s' % dir)
shutil.rmtree(dir)
def makedirs(self, dir, verbose=True):
if verbose: self.log_info('mkdir -p %s' % dir)
try: os.makedirs(dir)
except: pass
# copy a file to a directory
def copy(self, fname, dname, verbose=True, dir=False):
if not dir:
if verbose: self.log_info('cp -av %s %s' % (fname, dname))
shutil.copy(fname, dname)
else:
if verbose: self.log_info('cp -avr %s %s' % (fname, dname))
if os.path.exists(dname):
shutil.rmtree(dname)
shutil.copytree(fname, dname)
def chdir(self, dir, verbose=True):
if verbose: self.log_info('cd %s' % dir)
os.chdir(dir)
def remove(self, fname, verbose=True):
if verbose: self.log_info('rm -f %s' % fname)
try: os.remove(fname)
except: pass
def environment(self, var, val, verbose=True):
if verbose: self.log_info('%s=%s' % (var, val))
os.environ[var] = val
def unlink(self, targ, verbose=True):
if verbose: self.log_info('unlink %s' % targ)
os.unlink(targ)
def symlink(self, src, targ, verbose=True):
if verbose: self.log_info('ln -s %s %s' % (src, targ))
if os.path.islink(targ):
os.unlink(targ)
os.symlink(src, targ)
def build_dir(self, root, kind, what):
return root + "/" + re.sub(r'[^a-zA-Z0-9_-]+', '_', 'build-%s-%s' % (kind, what))
def log_prefix(self, kind, what):
return kind.upper() + ' ' + what
def open_log(self, root, kind, what, chdir=False):
build_dir = self.build_dir(root, kind, what)
# Get the name of the log file
logFile = '%s/log.txt' % build_dir
self.log_info('%s LOGFILE %s' % (self.log_prefix(kind, what), logFile))
try: self.rmtree(build_dir, verbose=False)
except: pass
self.makedirs(build_dir, verbose=False)
self.log_open(logFile)
if chdir: self.chdir(build_dir)
def log_open(self, name):
if self.log: self.log_close()
self.log = open(name, 'w')
self.name = name
def log_close(self):
if self.log:
if self.num_errors > 0:
print '# ERROR: There were errors, see %s' % self.name
elif self.num_warnings > 0:
print '# WARNING: There were warnings, see %s' % self.name
self.log.close()
self.log = False
self.name = False
self.num_errors = 0
self.num_warnings = 0
def log_pr(self, prefix, what):
if isinstance(what, basestring):
log_string = prefix + what
else:
log_string = prefix + repr(what)
if self.log:
self.log.write(log_string + '\n')
self.log.flush()
else:
print log_string
sys.stdout.flush()
def log_err(self, what):
self.log_pr('# ERROR: ', what)
self.num_errors += 1
def log_warn(self, what):
self.log_pr('# WARNING: ', what)
self.num_warnings += 1
def log_info(self, what):
self.log_pr('# INFO: ', what)
# create a file with size, type, and symbol info
# the function is here because it is useful and has no dependencies
def mkinfo(self, fname):
ifdefs = { 'i8':'HAS_LONGLONG', 'u8':'HAS_LONGLONG', 'f4':'HAS_FLOAT', 'f8':'HAS_DOUBLE' }
sizes = [
'char', 'signed char', 'unsigned char',
'short', 'signed short', 'unsigned short',
'int', 'signed int', 'unsigned int',
'long', 'signed long', 'unsigned long',
'long long', 'signed long long', 'unsigned long long',
'float', 'double', 'float', 'long double',
'i1', 'i2', 'i4', 'u1', 'u2', 'u4', 'i8', 'u8', 'f4', 'f8']
syms = [
'__AVR32__', '__AVR_ARCH__', 'dsPIC30', '__GNUC__', '__has_feature', 'INT4_IS_LONG',
'__INT64_TYPE__', '__INT8_TYPE__', '__llvm__', '_M_ARM_FP', '__MSP430__', '_MSV_VER',
'__SDCC', '__SIZEOF_DOUBLE__', '__SIZEOF_FLOAT__', '__SIZEOF_SIZE_T__', '__TI_COMPILER_VERSION__',
'__INT8_TYPE__', '__INT16_TYPE__', '__INT32_TYPE__', '__INT64_TYPE__', '__UINT8_TYPE__',
'__UINT16_TYPE__', '__UINT32_TYPE__', '__UINT64_TYPE__', 'HAS_FLOAT', 'HAS_DOUBLE',
'HAS_LONGLONG', 'HAS_FLOAT_OVERRIDE', 'HAS_DOUBLE_OVERRIDE', 'HAS_LONGLONG_OVERRIDE']
typedefs = { 'i1':1, 'i2':2, 'i4':4, 'u1':1, 'u2':2, 'u4':4, 'i8':8, 'u8':8, 'f4':4, 'f8':8 }
f = open(fname, 'w')
f.write('#include "types.h"\n\n')
i = 0
for s in sizes:
i += 1
d = 'INFO sizeof(%s) = ' % s
x = list(d)
x = "', '".join(x)
x = "'%s', '0'+sizeof(%s), '\\n'" % (x, s)
l = 'char size_info_%d[] = {%s};\n' % (i, x)
if s in ifdefs: f.write('#ifdef %s\n' % ifdefs[s])
f.write(l)
if s in ifdefs: f.write('#endif\n')
for s in typedefs:
if s in ifdefs: f.write('#ifdef %s\n' % ifdefs[s])
f.write('_Static_assert(sizeof(%s) == %d, "INFO %s should have size %d, is not correct\\n");\n' % (s, typedefs[s], s, typedefs[s]))
if s in ifdefs: f.write('#endif\n')
for s in syms:
i += 1
f.write('#ifdef %s\n' % s)
f.write('char sym_info_%d[] = "INFO %s is defined\\n\";\n' % (i, s))
f.write('#else\n')
f.write('char sym_info_%d[] = "INFO %s is not defined\\n\";\n' % (i, s))
f.write('#endif\n')
f.close()
class Config(object):
def __init__(self, *obj):
for o in obj:
if isinstance(o, dict): self.__dict__.update(o)
else: self.__dict__.update(o.__dict__)
def format(self, val):
if isinstance(val, basestring) and '%' in val:
return val % self.__dict__
elif isinstance(val, dict):
return dict(map(lambda (k,v): (k,self.format(v)), val.iteritems()))
else: return val
def __getattr__(self, attr):
return ''
def expand(self):
for k,v in self.__dict__.iteritems():
self.__dict__[k] = self.format(v)
def dump(self):
ret = ''
for k,v in sorted(self.__dict__.iteritems()):
if isinstance(v, basestring): vv = "'" + v + "'"
else: vv = str(v)
ret += ' '.ljust(10) + k.ljust(20) + vv + '\n'
return ret
| apache-2.0 | -260,404,642,648,897,020 | 32.823151 | 143 | 0.523624 | false |
DirectlineDev/django-hitcounter | django_hitcounter/models.py | 1 | 2032 | # -*- coding: utf-8 -*-
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models, transaction
from django.db.models import F
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from .managers import CounterManager
__all__ = ['Counter', ]
@python_2_unicode_compatible
class Counter(models.Model):
""" Hits counter per date
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
date = models.DateField(default=timezone.now, verbose_name=_('date'))
hits = models.PositiveIntegerField(default=0, verbose_name=_('hits count'))
# Manager
objects = CounterManager()
class Meta:
verbose_name = _('counter')
verbose_name_plural = _('counters')
unique_together = (('content_type', 'object_pk', 'date'), )
def __str__(self):
return '{date}: {hits}'.format(
date=self.date.strftime('%d-%m-%Y'),
hits=self.hits
)
@classmethod
@transaction.atomic()
def hit(cls, obj, amount=1, date=None):
""" Increase hits counter for particular object on date (now() by default)
:param obj: model object
:param amount: increase amount (1 by default)
:return: None
"""
ct = ContentType.objects.get_for_model(obj)
date = date or timezone.now()
obj, _ = cls.objects.get_or_create(content_type=ct, object_pk=obj._get_pk_val(), date=date,
defaults={'hits': 0})
cls.objects.filter(pk=obj.pk).update(hits=F('hits')+amount)
| apache-2.0 | -8,971,828,384,236,141,000 | 34.649123 | 99 | 0.622047 | false |
z/xonotic-map-manager | xmm/util.py | 1 | 7782 | import configparser
import os
import sys
import json
import time
import hashlib
import subprocess
import urllib.request
from datetime import datetime
from shutil import copyfile
def convert_size(number):
"""
Convert and integer to a human-readable B/KB/MB/GB/TB string.
:param number:
integer to be converted to readable string
:type number: ``int``
:returns: `str`
"""
for x in ['B', 'KB', 'MB', 'GB']:
if number < 1024.0:
string = "%3.1d%s" % (number, x)
return string.strip()
number /= 1024.0
string = "%3.1f%s" % (number, 'TB')
return string.strip()
def reporthook(count, block_size, total_size):
"""
Pretty progress for urllib downloads.
>>> import urllib.request
>>> urllib.request.urlretrieve(url, filename, reporthook)
https://github.com/yahoo/caffe/blob/master/scripts/download_model_binary.py
"""
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed. " %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def download_file(filename_with_path, url, use_curl=False, overwrite=False):
"""
downloads a file from any URL
:param filename_with_path:
filename with path to download file to
:type filename_with_path: ``str``
:param url:
URL to download map from
:type url: ``str``
:param use_curl:
Whether or not to use curl to download the file, default ``False``
:param overwrite:
Whether or not to overwrite the existing file, default ``False``
:type use_curl: ``bool``
"""
if not os.path.exists(filename_with_path) or overwrite:
if not use_curl:
urllib.request.urlretrieve(url, os.path.expanduser(filename_with_path), reporthook)
else:
subprocess.call(['curl', '-o', filename_with_path, url])
print("{}Done.{}".format(zcolors.INFO, zcolors.ENDC))
else:
print("{}file already exists, please remove first.{}".format(zcolors.FAIL, zcolors.ENDC))
return False
def parse_config(config_file):
"""
downloads a file from any URL
:param config_file:
filename with path to config file
:type config_file: ``str``
:returns: ``dict``
"""
if not os.path.isfile(config_file):
print("{}{} not found, please create one.{}".format(zcolors.WARNING, config_file, zcolors.ENDC))
Exception('Config not found.')
conf = configparser.ConfigParser()
conf.read(config_file)
return conf['xmm']
def check_if_not_create(file, template):
"""
Checks for a file, if it doesn't exist, it will be created from a template.
:param file:
filename with path to file
:type file: ``str``
:param template:
filename with path to template file
:type template: ``str``
"""
if not os.path.isfile(file):
os.makedirs(os.path.dirname(file), exist_ok=True)
copyfile(template, file)
def create_if_not_exists(file, contents):
"""
Checks for a file, if it doesn't exist, it will be created from a template.
:param file:
filename with path to file
:type file: ``str``
:param contents:
string contents of the file being created
:type contents: ``str``
"""
if not os.path.isfile(file):
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as f:
f.write(contents)
def file_is_empty(filename):
"""
Checks to see if a file is empty
:param filename:
string filename
:type filename: ``str``
:returns: ``bool``
"""
return os.stat(filename).st_size == 0
def replace_last(string, old, new):
"""
Replace the last occurrence of a pattern in a string
:param string:
string
:type string: ``str``
:param old:
string to find
:type old: ``str``
:param new:
string to replace
:type new: ``str``
:returns: ``str``
"""
return string[::-1].replace(old[::-1], new[::-1], 1)[::-1]
def hash_file(filename):
"""
Returns the SHA-1 hash of the file passed into it
:param filename:
string filename
:type filename: ``str``
:returns: ``str``
"""
# make a hash object
h = hashlib.sha1()
# open file for reading in binary mode
with open(filename, 'rb') as file:
# loop till the end of the file
chunk = 0
while chunk != b'':
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
# return the hex representation of digest
return h.hexdigest()
# http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input
def query_yes_no(question, default="yes"):
"""
Ask a yes/no question via raw_input() and return their answer.
:param question:
a string that is presented to the user.
:type question: ``str``
:param default:
is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
:type default: ``str``
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
# http://stackoverflow.com/a/24030569
class ObjectEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that leverages an object's `__json__()` method,
if available, to obtain its default JSON representation.
"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
if hasattr(obj, '__json__'):
return obj.__json__()
return json.JSONEncoder.default(self, obj)
class zcolors:
"""
Terminal formatting.
Options:
* HEADER
* INFO
* SUCCESS
* WARNING
* FAIL
* ENDC (end color)
* BOLD
* UNDERLINE
>>> "{}eggs{}: {}spam{}".format(zcolors.INFO, zcolors.ENDC, zcolors.UNDERLINE, zcolors.ENDC)
"""
HEADER = '\033[95m'
INFO = '\033[94m'
SUCCESS = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def cprint(string, style='INFO'):
"""
Terminal formatting convenience function.
:param string:
A string to print.
:type string: ``str``
:param style:
A style to print.
Options:
* HEADER
* INFO
* SUCCESS
* WARNING
* FAIL
* ENDC (end color)
* BOLD
* UNDERLINE
:type style: ``str``
>>> cprint("Success", style='SUCCESS')
"""
color = getattr(zcolors, style)
print('{}{}{}'.format(color, string, zcolors.ENDC))
| mit | 841,026,930,794,236,800 | 23.626582 | 104 | 0.577101 | false |
Panda3D-google-code-repositories/naith | game/plugins/dirlight/dirlight.py | 1 | 2667 | # -*- coding: utf-8 -*-
# Copyright Tom SF Haines, Aaron Snoswell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from panda3d.core import NodePath, VBase4, BitMask32
from panda3d.core import DirectionalLight as PDirectionalLight
class DirLight:
"""Creates a simple directional light"""
def __init__(self,manager,xml):
self.light = PDirectionalLight('dlight')
self.lightNode = NodePath(self.light)
self.lightNode.setCompass()
if hasattr(self.lightNode.node(), "setCameraMask"):
self.lightNode.node().setCameraMask(BitMask32.bit(3))
self.reload(manager,xml)
def reload(self,manager,xml):
color = xml.find('color')
if color!=None:
self.light.setColor(VBase4(float(color.get('r')), float(color.get('g')), float(color.get('b')), 1.0))
pos = xml.find('pos')
if pos!=None:
self.lightNode.setPos(float(pos.get('x')), float(pos.get('y')), float(pos.get('z')))
else:
self.lightNode.setPos(0, 0, 0)
lookAt = xml.find('lookAt')
if lookAt!=None:
self.lightNode.lookAt(float(lookAt.get('x')), float(lookAt.get('y')), float(lookAt.get('z')))
lens = xml.find('lens')
if lens!=None and hasattr(self.lightNode.node(), 'getLens'):
if bool(int(lens.get('auto'))):
self.lightNode.reparentTo(base.camera)
else:
self.lightNode.reparentTo(render)
lobj = self.lightNode.node().getLens()
lobj.setNearFar(float(lens.get('near', 1.0)), float(lens.get('far', 100000.0)))
lobj.setFilmSize(float(lens.get('width', 1.0)), float(lens.get('height', 1.0)))
lobj.setFilmOffset(float(lens.get('x', 0.0)), float(lens.get('y', 0.0)))
if hasattr(self.lightNode.node(), 'setShadowCaster'):
shadows = xml.find('shadows')
if shadows!=None:
self.lightNode.node().setShadowCaster(True, int(shadows.get('width', 512)), int(shadows.get('height', 512)), int(shadows.get('sort', -10)))
#self.lightNode.node().setPushBias(float(shadows.get('bias', 0.5)))
else:
self.lightNode.node().setShadowCaster(False)
def start(self):
render.setLight(self.lightNode)
def stop(self):
render.clearLight(self.lightNode)
| apache-2.0 | -1,823,117,428,801,273,600 | 37.1 | 147 | 0.673416 | false |
johjeff/Python-Wesleyen | Week2/ProblemSet2.py | 1 | 11302 | # -ProblemSet2.py *- coding: utf-8 -*-
"""
Each problem will be a function to write.
Remember that you can execute just the code between the #%% signs by clicking
somewhere in that space and the using Ctrl-Enter (Cmd-Enter on Mac). An
alternative is to use the second toolbar green triangle or Menu>Run>Run cell.
On loops especially, you can make an error that causes the program to run
forever. If you don't get immediate response, then this is probably happening.
In that case, try Ctrl-C. If that doesn't stop it click your IPython console
away and open a new one. Look over you code and see why the termination
condition can't be met and fix it. Then run again.
"""
"""
Problem 2_1:
Write a function 'problem2_1()' that sets a variable lis = list(range(20,30)) and
does all of the following, each on a separate line:
(a) print the element of lis with the index 3
(b) print lis itself
(c) write a 'for' loop that prints out every element of lis. Recall that
len() will give you the length of such a data collection if you need that.
Use end=" " to put one space between the elements of the list lis. Allow
the extra space at the end of the list to stand, don't make a special case
of it.
"""
#%%
def problem2_1():
lis = list(range(20,30))
print(lis[3])
print(lis)
for item in range(0,len(lis)):
print(lis[item],end=" ")
#%%
"""
Test run:
problem2_1()
23
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
20 21 22 23 24 25 26 27 28 29
"""
"""
Problem 2_2:
Write a function 'problem2_2()' that takes a list and does the following to it.
Actually, I've started the function for you below. Your function should do all
of the following, each on a separate line. Recall that lists start numbering
with 0.
0) print the whole list (this doesn't require a while or for loop)
1) print the item with index 0
2) print the last item in the list
3) print the items with indexes 3 through 5 but not including 5
4) print the items up to the one with index 3 but not including item 3
5) print the items starting at index 3 and going through the end.
6) print the length of the list ( use len() )
7) Use the append() method of a list to append the letter "z" onto a list.
Print the list with z appended.
Make sure that your function also works with blist below. For this to work,
you cannot use alist as a variable inside your function.
"""
#%%
alist = ["a","e","i","o","u","y"]
blist = ["alpha", "beta", "gamma", "delta", "epsilon", "eta", "theta"]
def problem2_2(my_list):
print(my_list)
print(my_list[0])
lislen = int(len(my_list))-1
print(my_list[lislen])
print(my_list[3:5:1])
print(my_list[:3])
print(my_list[3:])
print(len(my_list))
my_list.append("z")
print(my_list)
#%%
"""
Test run, two of them. The same function should work with either list. The
grader function will use different lists.
problem2_2(alist)
['a', 'e', 'i', 'o', 'u', 'y']
a
y
['o', 'u']
['a', 'e', 'i']
['o', 'u', 'y']
6
['a', 'e', 'i', 'o', 'u', 'y', 'z']
problem2_2(blist)
['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'eta', 'theta']
alpha
theta
['delta', 'epsilon']
['alpha', 'beta', 'gamma']
['delta', 'epsilon', 'eta', 'theta']
7
['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'eta', 'theta', 'z']
"""
"""
Problem 2_3:
Write a function problem2_3() that should have a 'for' loop that steps
through the list below and prints the name of the state and the number of
letters in the state's name. You may use the len() function.
Here is the output from mine:
In [70]: problem2_3(newEngland)
Maine has 5 letters.
New Hampshire has 13 letters.
Vermont has 7 letters.
Rhode Island has 12 letters.
Massachusetts has 13 letters.
Connecticut has 11 letters.
The function is started for you. The grader will not use the list newEngland
so don't use the variable newEngland inside your function.
"""
"""
newEngland = ["Maine","New Hampshire","Vermont", "Rhode Island",
"Massachusetts","Connecticut"]
"""
#%%
def problem2_3(st):
ind = 0
for state in st:
print(st[ind],"has",len(st[ind]),"letters.")
ind = ind + 1
"""
Problem 2_4:
random.random() generates pseudo-random real numbers between 0 and 1. But what
if you needed other random reals? Write a program to use only random.random()
to generate a list of random reals between 30 and 35. This is a simple matter
of multiplication and addition. By multiplying you can spread the random numbers
out to cover the range 0 to 5. By adding you can shift these numbers up to the
required range from 30 to 35. Set the seed in this function to 70 so that
everyone generates the same random numbers and will agree with the grader's
list of random numbers. Print out the list (in list form).
"""
#%%
import random
def problem2_4():
""" Make a list of 10 random reals between 30 and 35 """
random.seed(70)
num1 = []
#num1 = [random.random() * 5 + 30 for _ in range(10)] # same as loop below
for num in range(0,10):
num1.append(random.random() * 5 + 30)
print(num1)
#%%
"""
COMMENT: Note that this uses a pseudorandom number generator. That means
that the list will be different for each person. We issue the command
random.seed(70) inside the function problem2_4() to insure that we generate the
same numbers that the grader expects. If you do this problem correctly, you
should get the list of random numbers below.
Test run:
problem2_4()
[34.54884618961936, 31.470395203793395, 32.297169396656095, 30.681793552717807,
34.97530360173135, 30.773219981037737, 33.36969776732032, 32.990127772708405,
33.57311858494461, 32.052629620057274]
""""""
Problem 2_5:
Let's do a small simulation. Suppose that you rolled a die repeatedly. Each
time that you roll the die you get a integer from 1 to 6, the number of pips
on the die. Use random.randint(a,b) to simulate rolling a die 10 times and
printout the 10 outcomes. The function random.randint(a,b) will
generate an integer (whole number) between the integers a and b inclusive.
Remember each outcome is 1, 2, 3, 4, 5, or 6, so make sure that you can get
all of these outcomes and none other. Print the list, one item to a line so that
there are 10 lines as in the example run. Make sure that it has 10 items
and they are all in the range 1 through 6. Here is one of my runs. In
the problem below I ask you to set the seed to 171 for the benefit of the
auto-grader. In this example, that wasn't done and so your numbers will be
different. Note that the seed must be set BEFORE randint is used.
problem2_5()
4
5
3
1
4
3
5
1
6
3
"""
"""
Problem 2_5:
"""
import random
def problem2_5():
""" Simulates rolling a die 10 times."""
# Setting the seed makes the random numbers always the same
# This is to make the auto-grader's job easier.
random.seed(171) # don't remove when you submit for grading
#die = [random.randint(1,6) for _ in range(10)]
pip = []
for val in range(10):
#die = random.randint(1,6)
pip.append(random.randint(1,6))
print(pip[val])
#%%
"""
Problem 2_6:
Let's continue with our simulation of dice by rolling two of them. This time
each die can come up with a number from 1 to 6, but you have two of them. The
result or outcome is taken to be the sum of the pips on the two dice. Write a
program that will roll 2 dice and produce the outcome. This time let's roll
the two dice 100 times. Print the outcomes one outcome per line.
"""
#%%
import random
def problem2_6():
""" Simulates rolling 2 dice 100 times """
# Setting the seed makes the random numbers always the same
# This is to make the auto-grader's job easier.
random.seed(431) # don't remove when you submit for grading
for val in range(100):
tot = random.randint(1,6) + random.randint(1,6)
print(tot)
#%%
"""
Test run with seed 82, but make sure that you submit with the seed 431:
problem2_6()
6
8
4
9
3
8
6
5
7
5
7
6
5
6
3
9
4
8
11
'
'
'
9
6
7
10
4
"""
"""
Problem 2_7:
Heron's formula for computing the area of a triangle with sides a, b, and c is
as follows. Let s = .5(a + b + c) --- that is, 1/2 of the perimeter of the
triangle. Then the area is the square root of s(s-a)(s-b)(s-c). You can compute
the square root of x by x**.5 (raise x to the 1/2 power). Use an input
statement to get the length of the sides. Don't forget to convert this input
to a real number using float(). Adjust your output to be just like what you
see below. Here is a run of my program:
problem2_7()
Enter length of side one: 9
Enter length of side two: 12
Enter length of side three: 15
Area of a triangle with sides 9.0 12.0 15.0 is 54.0
"""
#%%
def problem2_7():
""" computes area of triangle using Heron's formula. """
side1 = float(input("Enter length of side one: "))
if side1 <= 0.0:
print("Value must be 1 or more")
exit()
side2 = float(input("Enter length of side two: "))
if side2 <= 0.0:
print("Value must be 1 or more")
exit()
side3 = float(input("Enter length of side three: "))
if side3 <= 0.0:
print("Value must be 1 or more")
exit()
s = float((side1 + side2 + side3)/2)
x = s * (s - side1) * (s - side2) * (s - side3)
area = x**.5
print("Area of a triangle with sides",side1,side2,side3,"is",area)
#%%
"""
Problem 2_8:
The following list gives the hourly temperature during a 24 hour day. Please
write a function, that will take such a list and compute 3 things: average
temperature, high (maximum temperature), and low (minimum temperature) for the
day. I will test with a different set of temperatures, so don't pick out
the low or the high and code it into your program. This should work for
other hourly_temp lists as well. This can be done by looping (interating)
through the list. I suggest you not write it all at once. You might write
a function that computes just one of these, say average, then improve it
to handle another, say maximum, etc. Note that there are Python functions
called max() and min() that could also be used to do part of the jobs.
"""
#%%
hourly_temp = [40.0, 39.0, 37.0, 34.0, 33.0, 34.0, 36.0, 37.0, 38.0, 39.0, \
40.0, 41.0, 44.0, 45.0, 47.0, 48.0, 45.0, 42.0, 39.0, 37.0, \
36.0, 35.0, 33.0, 32.0]
#%%
def problem2_8(temp_list):
average = sum(temp_list)/len(temp_list)
print("Average:",average)
high = max(temp_list)
print("High:",high)
low = min(temp_list)
print("Low:",low)
#%%
"""
Sample run using the list hourly_temp. Note that the grader will use a
different hourly list. Be sure that you function works on this list and test
it on at least one other list of your own construction.
Note also, that the list the grader uses may not have the same number of items
as this one.
problem2_8(hourly_temp)
Average: 38.791666666666664
High: 48.0
Low: 32.0
"""
| bsd-2-clause | -8,545,179,733,091,308,000 | 28.711957 | 81 | 0.656432 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.