repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
pism/pism
|
examples/inverse/test_invssa_gn.py
|
1
|
5837
|
#! /usr/bin/env python3
#
# Copyright (C) 2012, 2014, 2015, 2016, 2017, 2018, 2019 David Maxwell
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import petsc4py
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
import os
import math
import PISM
def adjustTauc(mask, tauc):
"""Where ice is floating or land is ice-free, tauc should be adjusted to have some preset default values."""
grid = mask.grid()
high_tauc = grid.ctx().config().get_number("basal_yield_stress.ice_free_bedrock")
with PISM.vec.Access(comm=tauc, nocomm=mask):
for (i, j) in grid.points():
if mask.ocean(i, j):
tauc[i, j] = 0
elif mask.ice_free(i, j):
tauc[i, j] = high_tauc
# Main code starts here
if __name__ == "__main__":
context = PISM.Context()
config = context.config
com = context.com
PISM.set_abort_on_sigint(True)
append_mode = False
input_filename = config.get_string("input.file")
inv_data_filename = PISM.OptionString("-inv_data", "inverse data file", input_filename).value()
use_tauc_prior = PISM.OptionBool("-inv_use_tauc_prior",
"Use tauc_prior from inverse data file as initial guess.")
ssarun = PISM.invert.ssa.SSAForwardRunFromInputFile(input_filename, inv_data_filename, 'tauc')
ssarun.setup()
vecs = ssarun.modeldata.vecs
grid = ssarun.grid
# Determine the prior guess for tauc. This can be one of
# a) tauc from the input file (default)
# b) tauc_prior from the inv_datafile if -use_tauc_prior is set
tauc_prior = PISM.model.createYieldStressVec(grid, 'tauc_prior')
tauc_prior.set_attrs("diagnostic",
"initial guess for (pseudo-plastic) basal yield stress in an inversion",
"Pa", "Pa", "", 0)
tauc = PISM.model.createYieldStressVec(grid)
if use_tauc_prior:
tauc_prior.regrid(inv_data_filename, critical=True)
else:
if not PISM.util.fileHasVariable(input_filename, "tauc"):
PISM.verbPrintf(
1, com, "Initial guess for tauc is not available as 'tauc' in %s.\nYou can provide an initial guess as 'tauc_prior' using the command line option -use_tauc_prior." % input_filename)
exit(1)
tauc.regrid(input_filename, True)
tauc_prior.copy_from(tauc)
adjustTauc(vecs.ice_mask, tauc_prior)
# Convert tauc_prior -> zeta_prior
zeta = PISM.IceModelVec2S()
WIDE_STENCIL = int(grid.ctx().config().get_number("grid.max_stencil_width"))
zeta.create(grid, "", PISM.WITH_GHOSTS, WIDE_STENCIL)
ssarun.tauc_param.convertFromDesignVariable(tauc_prior, zeta)
ssarun.ssa.linearize_at(zeta)
vel_ssa_observed = None
vel_ssa_observed = PISM.model.create2dVelocityVec(grid, '_ssa_observed', stencil_width=2)
if PISM.util.fileHasVariable(inv_data_filename, "u_ssa_observed"):
vel_ssa_observed.regrid(inv_data_filename, True)
else:
if not PISM.util.fileHasVariable(inv_data_filename, "u_surface_observed"):
PISM.verbPrintf(
1, context.com, "Neither u/v_ssa_observed nor u/v_surface_observed is available in %s.\nAt least one must be specified.\n" % inv_data_filename)
exit(1)
vel_surface_observed = PISM.model.create2dVelocityVec(grid, '_surface_observed', stencil_width=2)
vel_surface_observed.regrid(inv_data_filename, True)
sia_solver = PISM.SIAFD
if is_regional:
sia_solver = PISM.SIAFD_Regional
vel_sia_observed = PISM.sia.computeSIASurfaceVelocities(modeldata, sia_solver)
vel_sia_observed.metadata(0).set_name('u_sia_observed')
vel_sia_observed.metadata(0).set_string('long_name', "x-component of the 'observed' SIA velocities")
vel_sia_observed.metadata(1).set_name('v_sia_observed')
vel_sia_observed.metadata(1).set_string('long_name', "y-component of the 'observed' SIA velocities")
vel_ssa_observed.copy_from(vel_surface_observed)
vel_ssa_observed.add(-1, vel_sia_observed)
(designFunctional, stateFunctional) = PISM.invert.ssa.createTikhonovFunctionals(ssarun)
eta = config.get_number("inverse.tikhonov.penalty_weight")
solver_gn = PISM.InvSSATikhonovGN(ssarun.ssa, zeta, vel_ssa_observed, eta, designFunctional, stateFunctional)
seed = PISM.OptionInteger("-inv_seed", "random generator seed")
if seed.is_set():
np.random.seed(seed.value() + PISM.Context().rank)
d1 = PISM.vec.randVectorS(grid, 1)
d2 = PISM.vec.randVectorS(grid, 1)
GNd1 = PISM.IceModelVec2S()
GNd1.create(grid, "", PISM.WITHOUT_GHOSTS)
GNd2 = PISM.IceModelVec2S()
GNd2.create(grid, "", PISM.WITHOUT_GHOSTS)
solver_gn.apply_GN(d1, GNd1)
solver_gn.apply_GN(d2, GNd2)
ip1 = d1.get_vec().dot(GNd2.get_vec())
ip2 = d2.get_vec().dot(GNd1.get_vec())
PISM.verbPrintf(1, grid.com, "Test of Gauss-Newton symmetry (x^t GN y) vs (y^t GN x)\n")
PISM.verbPrintf(1, grid.com, "ip1 %.10g ip2 %.10g\n" % (ip1, ip2))
PISM.verbPrintf(1, grid.com, "relative error %.10g\n" % abs((ip1 - ip2) / ip1))
|
gpl-3.0
| -8,403,609,705,291,183,000 | 2,980,346,882,502,649,300 | 39.818182 | 197 | 0.667124 | false |
paramecio/pastafari
|
scripts/monit/debian_wheezy/alive.py
|
1
|
4720
|
#!/usr/bin/python3 -u
# A script for install alive script
import subprocess
import argparse
import re
import os
import shutil
import pwd
from subprocess import call
parser = argparse.ArgumentParser(description='A script for install alive script and cron')
parser.add_argument('--url', help='The url where notify that this server is alive', required=True)
parser.add_argument('--user', help='The user for pastafari', required=True)
parser.add_argument('--pub_key', help='The pub key used in pastafari user', required=True)
args = parser.parse_args()
url=args.url
check_url = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if check_url.match(args.url):
# Create users
if call("sudo useradd -m -s /bin/sh %s" % args.user, shell=True) > 0:
print('Error, cannot add a new user')
exit(1)
else:
print('Added user')
if call("sudo mkdir -p /home/"+args.user+"/.ssh && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh && sudo chmod 700 /home/"+args.user+"/.ssh", shell=True) > 0:
print('Error, cannot add ssh directory')
exit(1)
else:
print('Added ssh directory')
if call("sudo cp "+args.pub_key+" /home/"+args.user+"/.ssh/authorized_keys && sudo chown "+args.user+":"+args.user+" /home/"+args.user+"/.ssh/authorized_keys && sudo chmod 600 /home/"+args.user+"/.ssh/authorized_keys", shell=True) > 0:
print('Error, cannot pub key to user')
exit(1)
else:
print('Added pub key to user')
# Edit alive cron
with open('modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive') as f:
alive_cron=f.read()
with open('modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive', 'w') as f:
alive_cron=alive_cron.replace('/home/spanel/modules/pastafari/scripts/monit/debian_wheezy/files/get_info.py', '/usr/local/bin/get_info.py')
f.write(alive_cron)
# Edit get_info.py
with open('modules/pastafari/scripts/monit/debian_wheezy/files/get_info.py') as f:
get_info=f.read()
with open('/usr/local/bin/get_info.py', 'w') as f:
get_info=get_info.replace("http://url/to/server/token/ip", args.url)
f.write(get_info)
os.chmod('/usr/local/bin/get_info.py', 0o700)
user_passwd=pwd.getpwnam(args.user)
os.chown('/usr/local/bin/get_info.py', user_passwd[2], user_passwd[3])
#shutil.chown('/usr/local/bin/get_info.py', args.user, args.user)
# Edit get_updates.py
with open('modules/pastafari/scripts/monit/debian_wheezy/files/get_updates.py') as f:
get_updates=f.read()
with open('/etc/cron.daily/get_updates.py', 'w') as f:
url_updates=args.url.replace('/getinfo/', '/getupdates/')
get_updates=get_updates.replace("http://url/to/server/token/ip", url_updates)
f.write(get_updates)
os.chmod('/etc/cron.daily/get_updates.py', 0o700)
# Edit sudo file
with open('modules/pastafari/scripts/monit/debian_wheezy/files/sudoers.d/spanel') as f:
sudoers=f.read()
with open('/etc/sudoers.d/spanel', 'w') as f:
sudoers=sudoers.replace("spanel", args.user)
f.write(sudoers)
# Copy cron alive to /etc/cron.d/
if call("sudo cp modules/pastafari/scripts/monit/debian_wheezy/files/crontab/alive /etc/cron.d/alive", shell=True) > 0:
print('Error, cannot install crontab alive file in cron.d')
exit(1)
else:
print('Added contrab alive file in cron.d')
print('Script installed successfully')
# Copy script for upgrades in /usr/local/bin
if call("mkdir /home/"+args.user+"/bin/ && cp modules/pastafari/scripts/standard/debian_wheezy/upgrade.sh /home/"+args.user+"/bin/ && chown -R "+args.user+":"+args.user+" /home/"+args.user+"/bin/", shell=True) > 0:
print('Error, cannot install upgrade.py in /home/'+args.user+'/bin/')
exit(1)
else:
print('Added /home/'+args.user+'/bin/upgrade.py')
print('Script installed successfully')
# Making first call to site
if subprocess.call('/usr/local/bin/get_info.py', shell=True) > 0:
print('Error')
exit(1)
else:
print('Your server should be up in your panel...')
exit(0)
else:
print('Error installing the module, not valid url')
exit(1)
|
gpl-2.0
| -6,237,779,221,605,797,000 | -4,319,076,900,619,740,700 | 33.962963 | 239 | 0.60911 | false |
broferek/ansible
|
test/units/module_utils/test_database.py
|
75
|
4393
|
import pytest
from ansible.module_utils.database import (
pg_quote_identifier,
SQLParseError,
)
# These are all valid strings
# The results are based on interpreting the identifier as a table name
VALID = {
# User quoted
'"public.table"': '"public.table"',
'"public"."table"': '"public"."table"',
'"schema test"."table test"': '"schema test"."table test"',
# We quote part
'public.table': '"public"."table"',
'"public".table': '"public"."table"',
'public."table"': '"public"."table"',
'schema test.table test': '"schema test"."table test"',
'"schema test".table test': '"schema test"."table test"',
'schema test."table test"': '"schema test"."table test"',
# Embedded double quotes
'table "test"': '"table ""test"""',
'public."table ""test"""': '"public"."table ""test"""',
'public.table "test"': '"public"."table ""test"""',
'schema "test".table': '"schema ""test"""."table"',
'"schema ""test""".table': '"schema ""test"""."table"',
'"""wat"""."""test"""': '"""wat"""."""test"""',
# Sigh, handle these as well:
'"no end quote': '"""no end quote"',
'schema."table': '"schema"."""table"',
'"schema.table': '"""schema"."table"',
'schema."table.something': '"schema"."""table"."something"',
# Embedded dots
'"schema.test"."table.test"': '"schema.test"."table.test"',
'"schema.".table': '"schema."."table"',
'"schema."."table"': '"schema."."table"',
'schema.".table"': '"schema".".table"',
'"schema".".table"': '"schema".".table"',
'"schema.".".table"': '"schema.".".table"',
# These are valid but maybe not what the user intended
'."table"': '".""table"""',
'table.': '"table."',
}
INVALID = {
('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema."table"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
}
HOW_MANY_DOTS = (
('role', 'role', '"role"',
'PostgreSQL does not support role with more than 1 dots'),
('db', 'database', '"db"',
'PostgreSQL does not support database with more than 1 dots'),
('db.schema', 'schema', '"db"."schema"',
'PostgreSQL does not support schema with more than 2 dots'),
('db.schema.table', 'table', '"db"."schema"."table"',
'PostgreSQL does not support table with more than 3 dots'),
('db.schema.table.column', 'column', '"db"."schema"."table"."column"',
'PostgreSQL does not support column with more than 4 dots'),
)
VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID))
INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID))
@pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES)
def test_valid_quotes(identifier, quoted_identifier):
assert pg_quote_identifier(identifier, 'table') == quoted_identifier
@pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES)
def test_invalid_quotes(identifier, id_type, msg):
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier(identifier, id_type)
ex.match(msg)
@pytest.mark.parametrize("identifier, id_type, quoted_identifier, msg", HOW_MANY_DOTS)
def test_how_many_dots(identifier, id_type, quoted_identifier, msg):
assert pg_quote_identifier(identifier, id_type) == quoted_identifier
with pytest.raises(SQLParseError) as ex:
pg_quote_identifier('%s.more' % identifier, id_type)
ex.match(msg)
|
gpl-3.0
| 1,361,952,758,955,280,400 | 3,466,672,452,233,442,300 | 42.93 | 120 | 0.631004 | false |
macborowy/dajsiepoznac-feed
|
DajSiePoznacFeed-Server/tests/workers_tests/test_participant_worker.py
|
1
|
2277
|
import unittest
import mock
from crawler.src.controllers.worker import ParticipantsWorker
import webapp2
import webtest
from google.appengine.ext import testbed
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class ParticipantsWorkerTests(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.taskqueue_stub.Shutdown()
self.testbed.deactivate()
def test_participants_should_enqueue_GetAllParticipantsTask(self):
worker = ParticipantsWorker()
worker._enqueue()
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertTrue(len(tasks) == 1)
@mock.patch("crawler.src.scrapper.blogs.getAllParticipants")
def test_GetAllParticipantsTask_should_spawn_multiple_child_jobs(self, mock_method):
mock_method.return_value = [
{"blog_url": "http://mborowy.com", "author": "Maciej Borowy"},
{"blog_url": "http://google.com", "author": "Google"}
]
parent_task = ParticipantsWorker.GetAllParticipantsTask()
parent_task._start(None)
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertTrue(len(tasks) == 2)
class GetAllParticipantsTaskTest(unittest.TestCase):
@mock.patch("crawler.src.scrapper.blogs.getAllParticipants")
def test_task_runs_getAllParticipants_method_from_scrapper_module(self, mock_method):
task = ParticipantsWorker.GetAllParticipantsTask()
task._task(None)
self.assertTrue(mock_method.called == 1)
class GetFeedUrlTaskTests(unittest.TestCase):
def setUp(self):
app = webapp2.WSGIApplication([("/", ParticipantsWorker.GetFeedUrlTask)])
self.test_app = webtest.TestApp(app)
@mock.patch("crawler.src.scrapper.blogs.getFeedUrl")
def test_blogs_getFeedUrl_is_called_with_params_from_request(self, mock_getFeedUrl):
params = {"blog_url": "http://mborowy.com", "author": "Maciej Borowy"}
self.test_app.post("/", params=params)
mock_getFeedUrl.assert_called_with(participant=params)
|
mit
| 157,470,610,483,488,320 | -29,424,715,613,123,948 | 32.985075 | 89 | 0.690821 | false |
NGSchool2016/ngschool2016-materials
|
src/snpEff/scripts/gsa/checkGeneNames.py
|
2
|
4985
|
#!/usr/bin/env python
import sys
# Debug mode?
debug = False
#------------------------------------------------------------------------------
# Read genes file
#------------------------------------------------------------------------------
def readGenes(genesFile):
print >> sys.stderr, "Reading file " + genesFile
genes2new = {}
genes2old = {}
id2nameNew = {}
id2nameOld = {}
for line in open(genesFile) :
fields = line.rstrip().split("\t")
if debug: print fields
geneId, nameOld = fields[0], fields[1]
nameNew = ''
if len(fields) > 2: nameNew = fields[2]
if nameNew:
genes2new[nameOld] = nameNew
id2nameNew[id] = nameNew
if nameOld:
genes2old[nameNew] = nameOld
id2nameOld[id] = nameOld
return genes2new, genes2old, id2nameNew, id2nameOld
#------------------------------------------------------------------------------
# Read HGNC file: gene names, previous names and synonyms.
#------------------------------------------------------------------------------
def readHgcn(hgncFile):
print >> sys.stderr, "Reading file " + hgncFile
genesHgcn = {}
for line in open(hgncFile) :
fields = line.rstrip().split("\t")
if len(fields) < 8: continue
geneName, prevName, synonyms = fields[1], fields[6], fields[8]
if debug: print "{}\t|{}|\t|{}|".format(geneName, prevName, synonyms)
# Add all 'previous names'
for g in prevName.split(",") :
alias = g.strip()
if alias:
if alias in genesHgcn:
print >> sys.stderr, "Error: Alias '{}' already exists ( {} vs {} )!".format( alias, genesHgcn[alias], geneName )
else :
genesHgcn[alias] = geneName
if debug: print "\tPrev: |{}|".format( alias )
# Add all 'synonyms'
for g in synonyms.split(",") :
alias = g.strip()
if alias:
if alias in genesHgcn:
print >> sys.stderr, "Error: Alias '{}' already exists ( {} vs {} )!".format( alias, genesHgcn[alias], geneName )
else :
genesHgcn[alias] = geneName
if debug: print "\tSyn: |{}|".format( alias )
return genesHgcn
#------------------------------------------------------------------------------
# Find gene
#------------------------------------------------------------------------------
#def findGeneName(g, genes2new, genes2old, genesHgcn):
def findGeneName(g):
# Gene name found, no need to find a new name
if isValid(g, genes2new): return g
# Try translating the name using 'genes2old' dictionary
geneOld = genes2old.get(g, "")
if isValid(geneOld, genes2new): return geneOld
# Try an alias
geneHgcn = genesHgcn.get(g, "")
if isValid(geneHgcn, genes2new): return geneHgcn
# We have an alias, but it was not valid.
if geneHgcn:
# Try to find an 'old' name for the alias
geneNew = genes2old.get(geneHgcn, "")
if isValid(geneNew, genes2new): return geneNew
# Desperate attempt: Find a gene that matches
for gn in genes2new:
if gn.startswith(g): return gn
for gn in genes2old:
if gn.startswith(g): return genes2old[gn]
return ""
# Valid gene name (not empty and is in 'genes' dictionary)
def isValid(gname, genes):
if gname and (gname in genes): return True
return False
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
#---
# Parse command line
#---
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: " + sys.argv[0] + " hgnc_complete_set.txt genes.list"
sys.exit(1)
hgncFile = sys.argv[1] # This argument is a Hugo File. Note: You can download the latest version from ftp://ftp.ebi.ac.uk/pub/databases/genenames/hgnc_complete_set.txt.gz
genesFile = sys.argv[2] # This is a "geneId \t geneName" list created from a GTF file
# Read files
genes2new, genes2old, id2nameNew, id2nameOld = readGenes(genesFile)
genesHgcn = readHgcn(hgncFile)
#---
# Read all lines from STDIN
# Note: This is counter intuitive because we are trying to
# replace 'new' names with 'old' names (and not the
# other way arround which is what you'd expect)
#---
for line in sys.stdin:
f = line.rstrip().split('\t')
geneSet = f[0]
genesNames = f[2:]
# Check that each gene has a valid geneID
missing = ""
missingCount = 0
foundAlias = 0
out = "{}\t{}".format(geneSet, f[1]);
for g in genesNames :
geneOld = findGeneName(g)
if not geneOld:
# No valid replacement found
missing += "\t\t'{}'\n".format(g)
missingCount += 1
elif g != geneOld:
# Replacement found
missingCount += 1
foundAlias += 1
missing += "\t\t'{}'\t->\t'{}'\n".format(g, geneOld)
# Add only if there is a gene name (skip if no replacement has been found)
if geneOld : out += "\t" + geneOld
# Show line (names have been replaced)
print out
if missingCount > 0 :
total = (len(f) - 2)
missingPerc = 100.0 * missingCount / total
print >> sys.stderr, "{}\n\tMissing : {} ( {:.1f}% )\n\tTotal : {}\n\tReplaced: {}\n\tGenes ( -> Replacement ) :\n{}".format(geneSet, missingCount, missingPerc, total, foundAlias, missing)
|
gpl-3.0
| -3,778,722,402,569,602,600 | -4,440,873,318,411,550,000 | 29.396341 | 192 | 0.57653 | false |
slohse/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_appgateway.py
|
4
|
35537
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_appgateway
version_added: "2.7"
short_description: Manage Application Gateway instance.
description:
- Create, update and delete instance of Application Gateway.
options:
resource_group:
description:
- The name of the resource group.
required: True
name:
description:
- The name of the application gateway.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
sku:
description:
- SKU of the application gateway resource.
suboptions:
name:
description:
- Name of an application gateway SKU.
choices:
- 'standard_small'
- 'standard_medium'
- 'standard_large'
- 'waf_medium'
- 'waf_large'
tier:
description:
- Tier of an application gateway.
choices:
- 'standard'
- 'waf'
capacity:
description:
- Capacity (instance count) of an application gateway.
ssl_policy:
description:
- SSL policy of the application gateway resource.
suboptions:
disabled_ssl_protocols:
description:
- List of SSL protocols to be disabled on application gateway.
choices:
- 'tls_v1_0'
- 'tls_v1_1'
- 'tls_v1_2'
policy_type:
description:
- Type of SSL Policy.
choices:
- 'predefined'
- 'custom'
policy_name:
description:
- Name of Ssl C(predefined) policy.
choices:
- 'ssl_policy20150501'
- 'ssl_policy20170401'
- 'ssl_policy20170401_s'
cipher_suites:
description:
- List of SSL cipher suites to be enabled in the specified order to application gateway.
choices:
- tls_ecdhe_rsa_with_aes_256_gcm_sha384
- tls_ecdhe_rsa_with_aes_128_gcm_sha256
- tls_ecdhe_rsa_with_aes_256_cbc_sha384
- tls_ecdhe_rsa_with_aes_128_cbc_sha256
- tls_ecdhe_rsa_with_aes_256_cbc_sha
- tls_ecdhe_rsa_with_aes_128_cbc_sha
- tls_dhe_rsa_with_aes_256_gcm_sha384
- tls_dhe_rsa_with_aes_128_gcm_sha256
- tls_dhe_rsa_with_aes_256_cbc_sha
- tls_dhe_rsa_with_aes_128_cbc_sha
- tls_rsa_with_aes_256_gcm_sha384
- tls_rsa_with_aes_128_gcm_sha256
- tls_rsa_with_aes_256_cbc_sha256
- tls_rsa_with_aes_128_cbc_sha256
- tls_rsa_with_aes_256_cbc_sha
- tls_rsa_with_aes_128_cbc_sha
- tls_ecdhe_ecdsa_with_aes_256_gcm_sha384
- tls_ecdhe_ecdsa_with_aes_128_gcm_sha256
- tls_ecdhe_ecdsa_with_aes_256_cbc_sha384
- tls_ecdhe_ecdsa_with_aes_128_cbc_sha256
- tls_ecdhe_ecdsa_with_aes_256_cbc_sha
- tls_ecdhe_ecdsa_with_aes_128_cbc_sha
- tls_dhe_dss_with_aes_256_cbc_sha256
- tls_dhe_dss_with_aes_128_cbc_sha256
- tls_dhe_dss_with_aes_256_cbc_sha
- tls_dhe_dss_with_aes_128_cbc_sha
- tls_rsa_with_3des_ede_cbc_sha
- tls_dhe_dss_with_3des_ede_cbc_sha
min_protocol_version:
description:
- Minimum version of Ssl protocol to be supported on application gateway.
choices:
- 'tls_v1_0'
- 'tls_v1_1'
- 'tls_v1_2'
gateway_ip_configurations:
description:
- List of subnets used by the application gateway.
suboptions:
subnet:
description:
- Reference of the subnet resource. A subnet from where application gateway gets its private address.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
authentication_certificates:
description:
- Authentication certificates of the application gateway resource.
suboptions:
data:
description:
- Certificate public data - base64 encoded pfx
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
ssl_certificates:
description:
- SSL certificates of the application gateway resource.
suboptions:
data:
description:
- Base-64 encoded pfx certificate.
password:
description:
- Password for the pfx file specified in I(data).
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
frontend_ip_configurations:
description:
- Frontend IP addresses of the application gateway resource.
suboptions:
private_ip_address:
description:
- PrivateIPAddress of the network interface IP Configuration.
private_ip_allocation_method:
description:
- PrivateIP allocation method.
choices:
- 'static'
- 'dynamic'
subnet:
description:
- Reference of the subnet resource.
public_ip_address:
description:
- Reference of the PublicIP resource.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
frontend_ports:
description:
- List of frontend ports of the application gateway resource.
suboptions:
port:
description:
- Frontend port
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
backend_address_pools:
description:
- List of backend address pool of the application gateway resource.
suboptions:
backend_addresses:
description:
- List of backend addresses
suboptions:
fqdn:
description:
- Fully qualified domain name (FQDN).
ip_address:
description:
- IP address
name:
description:
- Resource that is unique within a resource group. This name can be used to access the resource.
backend_http_settings_collection:
description:
- Backend http settings of the application gateway resource.
suboptions:
port:
description:
- Port
protocol:
description:
- Protocol.
choices:
- 'http'
- 'https'
cookie_based_affinity:
description:
- Cookie based affinity.
choices:
- 'enabled'
- 'disabled'
request_timeout:
description:
- "Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable va
lues are from 1 second to 86400 seconds."
authentication_certificates:
description:
- List of references to application gateway authentication certificates.
suboptions:
id:
description:
- Resource ID.
host_name:
description:
- Host header to be sent to the backend servers.
pick_host_name_from_backend_address:
description:
- Whether to pick host header should be picked from the host name of the backend server. Default value is false.
affinity_cookie_name:
description:
- Cookie name to use for the affinity cookie.
path:
description:
- Path which should be used as a prefix for all C(http) requests. Null means no path will be prefixed. Default value is null.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
http_listeners:
description:
- List of HTTP listeners of the application gateway resource.
suboptions:
frontend_ip_configuration:
description:
- Frontend IP configuration resource of an application gateway.
frontend_port:
description:
- Frontend port resource of an application gateway.
protocol:
description:
- Protocol.
choices:
- 'http'
- 'https'
host_name:
description:
- Host name of C(http) listener.
ssl_certificate:
description:
- SSL certificate resource of an application gateway.
require_server_name_indication:
description:
- Applicable only if I(protocol) is C(https). Enables SNI for multi-hosting.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
request_routing_rules:
description:
- List of request routing rules of the application gateway resource.
suboptions:
rule_type:
description:
- Rule I(type).
choices:
- 'basic'
- 'path_based_routing'
backend_address_pool:
description:
- Backend address pool resource of the application gateway.
backend_http_settings:
description:
- Frontend port resource of the application gateway.
http_listener:
description:
- Http listener resource of the application gateway.
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
state:
description:
- Assert the state of the Public IP. Use 'present' to create or update a and
'absent' to delete.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create instance of Application Gateway
azure_rm_appgateway:
resource_group: myresourcegroup
name: myappgateway
sku:
name: standard_small
tier: standard
capacity: 2
gateway_ip_configurations:
- subnet:
id: "{{ subnet_id }}"
name: app_gateway_ip_config
frontend_ip_configurations:
- subnet:
id: "{{ subnet_id }}"
name: sample_gateway_frontend_ip_config
frontend_ports:
- port: 90
name: ag_frontend_port
backend_address_pools:
- backend_addresses:
- ip_address: 10.0.0.4
name: test_backend_address_pool
backend_http_settings_collection:
- port: 80
protocol: http
cookie_based_affinity: enabled
name: sample_appgateway_http_settings
http_listeners:
- frontend_ip_configuration: sample_gateway_frontend_ip_config
frontend_port: ag_frontend_port
name: sample_http_listener
request_routing_rules:
- rule_type: Basic
backend_address_pool: test_backend_address_pool
backend_http_settings: sample_appgateway_http_settings
http_listener: sample_http_listener
name: rule1
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: id
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from copy import deepcopy
from ansible.module_utils.network.common.utils import dict_merge
from ansible.module_utils.common.dict_transformations import (
camel_dict_to_snake_dict, snake_dict_to_camel_dict,
_camel_to_snake, _snake_to_camel,
)
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.network import NetworkManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
ssl_policy_spec = dict(
disabled_ssl_protocols=dict(type='list'),
policy_type=dict(type='str', choices=['predefined', 'custom']),
policy_name=dict(type='str', choices=['ssl_policy20150501', 'ssl_policy20170401', 'ssl_policy20170401_s']),
cipher_suites=dict(type='list'),
min_protocol_version=dict(type='str', choices=['tls_v1_0', 'tls_v1_1', 'tls_v1_2'])
)
class AzureRMApplicationGateways(AzureRMModuleBase):
"""Configuration class for an Azure RM Application Gateway resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
sku=dict(
type='dict'
),
ssl_policy=dict(
type='dict',
options=ssl_policy_spec
),
gateway_ip_configurations=dict(
type='list'
),
authentication_certificates=dict(
type='list'
),
ssl_certificates=dict(
type='list'
),
frontend_ip_configurations=dict(
type='list'
),
frontend_ports=dict(
type='list'
),
backend_address_pools=dict(
type='list'
),
backend_http_settings_collection=dict(
type='list'
),
http_listeners=dict(
type='list'
),
request_routing_rules=dict(
type='list'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.name = None
self.parameters = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMApplicationGateways, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "id":
self.parameters["id"] = kwargs[key]
elif key == "location":
self.parameters["location"] = kwargs[key]
elif key == "sku":
ev = kwargs[key]
if 'name' in ev:
if ev['name'] == 'standard_small':
ev['name'] = 'Standard_Small'
elif ev['name'] == 'standard_medium':
ev['name'] = 'Standard_Medium'
elif ev['name'] == 'standard_large':
ev['name'] = 'Standard_Large'
elif ev['name'] == 'waf_medium':
ev['name'] = 'WAF_Medium'
elif ev['name'] == 'waf_large':
ev['name'] = 'WAF_Large'
if 'tier' in ev:
if ev['tier'] == 'standard':
ev['tier'] = 'Standard'
elif ev['tier'] == 'waf':
ev['tier'] = 'WAF'
self.parameters["sku"] = ev
elif key == "ssl_policy":
ev = kwargs[key]
if 'policy_type' in ev:
ev['policy_type'] = _snake_to_camel(ev['policy_type'], True)
if 'policy_name' in ev:
if ev['policy_name'] == 'ssl_policy20150501':
ev['policy_name'] = 'AppGwSslPolicy20150501'
elif ev['policy_name'] == 'ssl_policy20170401':
ev['policy_name'] = 'AppGwSslPolicy20170401'
elif ev['policy_name'] == 'ssl_policy20170401_s':
ev['policy_name'] = 'AppGwSslPolicy20170401S'
if 'min_protocol_version' in ev:
if ev['min_protocol_version'] == 'tls_v1_0':
ev['min_protocol_version'] = 'TLSv1_0'
elif ev['min_protocol_version'] == 'tls_v1_1':
ev['min_protocol_version'] = 'TLSv1_1'
elif ev['min_protocol_version'] == 'tls_v1_2':
ev['min_protocol_version'] = 'TLSv1_2'
if 'disabled_ssl_protocols' in ev:
protocols = ev['disabled_ssl_protocols']
if protocols is not None:
for i in range(len(protocols)):
if protocols[i] == 'tls_v1_0':
protocols[i] = 'TLSv1_0'
elif protocols[i] == 'tls_v1_1':
protocols[i] = 'TLSv1_1'
elif protocols[i] == 'tls_v1_2':
protocols[i] = 'TLSv1_2'
if 'cipher_suites' in ev:
suites = ev['cipher_suites']
if suites is not None:
for i in range(len(suites)):
suites[i] = suites[i].upper()
elif key == "gateway_ip_configurations":
self.parameters["gateway_ip_configurations"] = kwargs[key]
elif key == "authentication_certificates":
self.parameters["authentication_certificates"] = kwargs[key]
elif key == "ssl_certificates":
self.parameters["ssl_certificates"] = kwargs[key]
elif key == "frontend_ip_configurations":
ev = kwargs[key]
for i in range(len(ev)):
item = ev[i]
if 'private_ip_allocation_method' in item:
item['private_ip_allocation_method'] = _snake_to_camel(item['private_ip_allocation_method'], True)
if 'public_ip_address' in item:
id = public_ip_id(self.subscription_id,
kwargs['resource_group'],
item['public_ip_address'])
item['public_ip_address'] = {'id': id}
self.parameters["frontend_ip_configurations"] = ev
elif key == "frontend_ports":
self.parameters["frontend_ports"] = kwargs[key]
elif key == "backend_address_pools":
self.parameters["backend_address_pools"] = kwargs[key]
elif key == "backend_http_settings_collection":
ev = kwargs[key]
for i in range(len(ev)):
item = ev[i]
if 'protocol' in item:
item['protocol'] = _snake_to_camel(item['protocol'], True)
if 'cookie_based_affinity' in item:
item['cookie_based_affinity'] = _snake_to_camel(item['cookie_based_affinity'], True)
self.parameters["backend_http_settings_collection"] = ev
elif key == "http_listeners":
ev = kwargs[key]
for i in range(len(ev)):
item = ev[i]
if 'frontend_ip_configuration' in item:
id = frontend_ip_configuration_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['frontend_ip_configuration'])
item['frontend_ip_configuration'] = {'id': id}
if 'frontend_port' in item:
id = frontend_port_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['frontend_port'])
item['frontend_port'] = {'id': id}
if 'ssl_certificate' in item:
id = ssl_certificate_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['ssl_certificate'])
item['ssl_certificate'] = {'id': id}
if 'protocol' in item:
item['protocol'] = _snake_to_camel(item['protocol'], True)
ev[i] = item
self.parameters["http_listeners"] = ev
elif key == "request_routing_rules":
ev = kwargs[key]
for i in range(len(ev)):
item = ev[i]
if 'backend_address_pool' in item:
id = backend_address_pool_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['backend_address_pool'])
item['backend_address_pool'] = {'id': id}
if 'backend_http_settings' in item:
id = backend_http_settings_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['backend_http_settings'])
item['backend_http_settings'] = {'id': id}
if 'http_listener' in item:
id = http_listener_id(self.subscription_id,
kwargs['resource_group'],
kwargs['name'],
item['http_listener'])
item['http_listener'] = {'id': id}
if 'protocol' in item:
item['protocol'] = _snake_to_camel(item['protocol'], True)
if 'rule_type' in ev:
item['rule_type'] = _snake_to_camel(item['rule_type'], True)
ev[i] = item
self.parameters["request_routing_rules"] = ev
elif key == "etag":
self.parameters["etag"] = kwargs[key]
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(NetworkManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
if "location" not in self.parameters:
self.parameters["location"] = resource_group.location
old_response = self.get_applicationgateway()
if not old_response:
self.log("Application Gateway instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Application Gateway instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Application Gateway instance has to be deleted or may be updated")
self.to_do = Actions.Update
if (self.to_do == Actions.Update):
if (self.parameters['location'] != old_response['location'] or
self.parameters['sku']['name'] != old_response['sku']['name'] or
self.parameters['sku']['tier'] != old_response['sku']['tier'] or
self.parameters['sku']['capacity'] != old_response['sku']['capacity'] or
not compare_arrays(old_response, self.parameters, 'authentication_certificates') or
not compare_arrays(old_response, self.parameters, 'gateway_ip_configurations') or
not compare_arrays(old_response, self.parameters, 'frontend_ip_configurations') or
not compare_arrays(old_response, self.parameters, 'frontend_ports') or
not compare_arrays(old_response, self.parameters, 'backend_address_pools') or
not compare_arrays(old_response, self.parameters, 'backend_http_settings_collection') or
not compare_arrays(old_response, self.parameters, 'request_routing_rules') or
not compare_arrays(old_response, self.parameters, 'http_listeners')):
self.to_do = Actions.Update
else:
self.to_do = Actions.NoAction
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Application Gateway instance")
if self.check_mode:
self.results['changed'] = True
self.results["parameters"] = self.parameters
return self.results
response = self.create_update_applicationgateway()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Application Gateway instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_applicationgateway()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_applicationgateway():
time.sleep(20)
else:
self.log("Application Gateway instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_applicationgateway(self):
'''
Creates or updates Application Gateway with the specified configuration.
:return: deserialized Application Gateway instance state dictionary
'''
self.log("Creating / Updating the Application Gateway instance {0}".format(self.name))
try:
response = self.mgmt_client.application_gateways.create_or_update(resource_group_name=self.resource_group,
application_gateway_name=self.name,
parameters=self.parameters)
if isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Application Gateway instance.')
self.fail("Error creating the Application Gateway instance: {0}".format(str(exc)))
return response.as_dict()
def delete_applicationgateway(self):
'''
Deletes specified Application Gateway instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Application Gateway instance {0}".format(self.name))
try:
response = self.mgmt_client.application_gateways.delete(resource_group_name=self.resource_group,
application_gateway_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Application Gateway instance.')
self.fail("Error deleting the Application Gateway instance: {0}".format(str(e)))
return True
def get_applicationgateway(self):
'''
Gets the properties of the specified Application Gateway.
:return: deserialized Application Gateway instance state dictionary
'''
self.log("Checking if the Application Gateway instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.application_gateways.get(resource_group_name=self.resource_group,
application_gateway_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Application Gateway instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Application Gateway instance.')
if found is True:
return response.as_dict()
return False
def public_ip_id(subscription_id, resource_group_name, name):
"""Generate the id for a frontend ip configuration"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/publicIPAddresses/{2}'.format(
subscription_id,
resource_group_name,
name
)
def frontend_ip_configuration_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for a frontend ip configuration"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/frontendIPConfigurations/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def frontend_port_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for a frontend port"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/frontendPorts/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def ssl_certificate_id(subscription_id, resource_group_name, ssl_certificate_name, name):
"""Generate the id for a frontend port"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/sslCertificates/{3}'.format(
subscription_id,
resource_group_name,
ssl_certificate_name,
name
)
def backend_address_pool_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for an address pool"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/backendAddressPools/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def backend_http_settings_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for a http settings"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/backendHttpSettingsCollection/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def http_listener_id(subscription_id, resource_group_name, appgateway_name, name):
"""Generate the id for a http listener"""
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/httpListeners/{3}'.format(
subscription_id,
resource_group_name,
appgateway_name,
name
)
def compare_arrays(old_params, new_params, param_name):
old = old_params.get(param_name) or []
new = new_params.get(param_name) or []
oldd = {}
for item in old:
name = item['name']
oldd[name] = item
newd = {}
for item in new:
name = item['name']
newd[name] = item
newd = dict_merge(oldd, newd)
return newd == oldd
def main():
"""Main execution"""
AzureRMApplicationGateways()
if __name__ == '__main__':
main()
|
gpl-3.0
| -6,046,026,555,737,368,000 | -4,443,034,663,333,901,000 | 40.710094 | 157 | 0.506542 | false |
Tithen-Firion/youtube-dl
|
youtube_dl/extractor/ondemandkorea.py
|
62
|
2036
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
js_to_json,
)
class OnDemandKoreaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?P<id>[^/]+)\.html'
_GEO_COUNTRIES = ['US', 'CA']
_TEST = {
'url': 'http://www.ondemandkorea.com/ask-us-anything-e43.html',
'info_dict': {
'id': 'ask-us-anything-e43',
'ext': 'mp4',
'title': 'Ask Us Anything : E43',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': 'm3u8 download'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id, fatal=False)
if not webpage:
# Page sometimes returns captcha page with HTTP 403
raise ExtractorError(
'Unable to access page. You may have been blocked.',
expected=True)
if 'msg_block_01.png' in webpage:
self.raise_geo_restricted(
msg='This content is not available in your region',
countries=self._GEO_COUNTRIES)
if 'This video is only available to ODK PLUS members.' in webpage:
raise ExtractorError(
'This video is only available to ODK PLUS members.',
expected=True)
title = self._og_search_title(webpage)
jw_config = self._parse_json(
self._search_regex(
r'(?s)jwplayer\(([\'"])(?:(?!\1).)+\1\)\.setup\s*\((?P<options>.+?)\);',
webpage, 'jw config', group='options'),
video_id, transform_source=js_to_json)
info = self._parse_jwplayer_data(
jw_config, video_id, require_title=False, m3u8_id='hls',
base_url=url)
info.update({
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
})
return info
|
unlicense
| -6,905,620,428,248,528,000 | 2,083,980,560,881,348,600 | 31.83871 | 88 | 0.532908 | false |
t-neumann/slamdunk
|
bin/_preamble.py
|
1
|
1062
|
# Copyright (c) 2015 Tobias Neumann, Philipp Rescheneder.
#
# This file is part of Slamdunk.
#
# Slamdunk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Slamdunk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os
path = os.path.abspath(sys.argv[0])
while os.path.dirname(path) != path:
if os.path.exists(os.path.join(path, 'slamdunk', '__init__.py')):
#sys.path.insert(0, os.path.join(path, 'slamdunk'))
sys.path.insert(0, path)
break
path = os.path.dirname(path)
|
agpl-3.0
| -2,873,219,902,147,482,600 | -3,560,786,727,515,591,700 | 38.333333 | 74 | 0.707156 | false |
jmartinezchaine/OpenERP
|
openerp/addons/base/test/test_ir_cron.py
|
15
|
5020
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
import openerp
JOB = {
'function': u'_0_seconds',
'interval_type': u'minutes',
'user_id': 1,
'name': u'test',
'args': False,
'numbercall': 1,
'nextcall': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'priority': 5,
'doall': True,
'active': True,
'interval_number': 1,
'model': u'ir.cron'
}
class test_ir_cron(openerp.osv.osv.osv):
""" Add a few handy methods to test cron jobs scheduling. """
_inherit = "ir.cron"
def _0_seconds(a, b, c):
print ">>> _0_seconds"
def _20_seconds(self, cr, uid):
print ">>> in _20_seconds"
time.sleep(20)
print ">>> out _20_seconds"
def _80_seconds(self, cr, uid):
print ">>> in _80_seconds"
time.sleep(80)
print ">>> out _80_seconds"
def test_0(self, cr, uid):
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
t2 = (now + relativedelta(minutes=1, seconds=5)).strftime('%Y-%m-%d %H:%M:%S')
t3 = (now + relativedelta(minutes=1, seconds=10)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_0 _20_seconds A', function='_20_seconds', nextcall=t1))
self.create(cr, uid, dict(JOB, name='test_0 _20_seconds B', function='_20_seconds', nextcall=t2))
self.create(cr, uid, dict(JOB, name='test_0 _20_seconds C', function='_20_seconds', nextcall=t3))
def test_1(self, cr, uid):
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_1 _20_seconds * 3', function='_20_seconds', nextcall=t1, numbercall=3))
def test_2(self, cr, uid):
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_2 _80_seconds * 2', function='_80_seconds', nextcall=t1, numbercall=2))
def test_3(self, cr, uid):
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
t2 = (now + relativedelta(minutes=1, seconds=5)).strftime('%Y-%m-%d %H:%M:%S')
t3 = (now + relativedelta(minutes=1, seconds=10)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_3 _80_seconds A', function='_80_seconds', nextcall=t1))
self.create(cr, uid, dict(JOB, name='test_3 _20_seconds B', function='_20_seconds', nextcall=t2))
self.create(cr, uid, dict(JOB, name='test_3 _20_seconds C', function='_20_seconds', nextcall=t3))
# This test assumes 4 cron threads.
def test_00(self, cr, uid):
self.test_00_set = set()
now = datetime.now()
t1 = (now + relativedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
t2 = (now + relativedelta(minutes=1, seconds=5)).strftime('%Y-%m-%d %H:%M:%S')
t3 = (now + relativedelta(minutes=1, seconds=10)).strftime('%Y-%m-%d %H:%M:%S')
self.create(cr, uid, dict(JOB, name='test_00 _20_seconds_A', function='_20_seconds_A', nextcall=t1))
self.create(cr, uid, dict(JOB, name='test_00 _20_seconds_B', function='_20_seconds_B', nextcall=t2))
self.create(cr, uid, dict(JOB, name='test_00 _20_seconds_C', function='_20_seconds_C', nextcall=t3))
def _expect(self, cr, uid, to_add, to_sleep, to_expect_in, to_expect_out):
assert self.test_00_set == to_expect_in
self.test_00_set.add(to_add)
time.sleep(to_sleep)
self.test_00_set.discard(to_add)
assert self.test_00_set == to_expect_out
def _20_seconds_A(self, cr, uid):
self._expect(cr, uid, 'A', 20, set(), set(['B', 'C']))
def _20_seconds_B(self, cr, uid):
self._expect(cr, uid, 'B', 20, set('A'), set('C'))
def _20_seconds_C(self, cr, uid):
self._expect(cr, uid, 'C', 20, set(['A', 'B']), set())
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -5,746,526,826,367,845,000 | 8,627,822,950,723,898,000 | 42.275862 | 121 | 0.583865 | false |
zooba/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_vendor/html5lib/_trie/py.py
|
1323
|
1775
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
apache-2.0
| 5,298,879,349,277,658,000 | 5,593,700,800,373,019,000 | 25.492537 | 66 | 0.552676 | false |
pongem/python-bot-project
|
appengine/standard/botapp/env/lib/python2.7/site-packages/django/db/utils.py
|
143
|
10368
|
import os
import pkgutil
from importlib import import_module
from threading import local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils._os import npath, upath
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception if six.PY3 else StandardError): # NOQA: StandardError undefined on PY3
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper(object):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
dj_exc_value.__cause__ = exc_value
if not hasattr(exc_value, '__traceback__'):
exc_value.__traceback__ = traceback
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
six.reraise(dj_exc_type, dj_exc_value, traceback)
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([npath(backend_dir)])
if ispkg and name not in {'base', 'dummy', 'postgresql_psycopg2'}
]
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
self._connections = local()
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
return self._databases
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
test_settings.setdefault(key, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter(object):
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, six.string_types):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
|
apache-2.0
| 5,065,063,898,864,701,000 | 6,802,113,007,901,421,000 | 31.501567 | 93 | 0.574942 | false |
dd00/commandergenius
|
project/jni/python/src/Demo/scripts/lpwatch.py
|
32
|
3200
|
#! /usr/bin/env python
# Watch line printer queue(s).
# Intended for BSD 4.3 lpq.
import posix
import sys
import time
import string
DEF_PRINTER = 'psc'
DEF_DELAY = 10
def main():
delay = DEF_DELAY # XXX Use getopt() later
try:
thisuser = posix.environ['LOGNAME']
except:
thisuser = posix.environ['USER']
printers = sys.argv[1:]
if printers:
# Strip '-P' from printer names just in case
# the user specified it...
for i in range(len(printers)):
if printers[i][:2] == '-P':
printers[i] = printers[i][2:]
else:
if posix.environ.has_key('PRINTER'):
printers = [posix.environ['PRINTER']]
else:
printers = [DEF_PRINTER]
#
clearhome = posix.popen('clear', 'r').read()
#
while 1:
text = clearhome
for name in printers:
text = text + makestatus(name, thisuser) + '\n'
print text
time.sleep(delay)
def makestatus(name, thisuser):
pipe = posix.popen('lpq -P' + name + ' 2>&1', 'r')
lines = []
users = {}
aheadbytes = 0
aheadjobs = 0
userseen = 0
totalbytes = 0
totaljobs = 0
while 1:
line = pipe.readline()
if not line: break
fields = string.split(line)
n = len(fields)
if len(fields) >= 6 and fields[n-1] == 'bytes':
rank = fields[0]
user = fields[1]
job = fields[2]
files = fields[3:-2]
bytes = eval(fields[n-2])
if user == thisuser:
userseen = 1
elif not userseen:
aheadbytes = aheadbytes + bytes
aheadjobs = aheadjobs + 1
totalbytes = totalbytes + bytes
totaljobs = totaljobs + 1
if users.has_key(user):
ujobs, ubytes = users[user]
else:
ujobs, ubytes = 0, 0
ujobs = ujobs + 1
ubytes = ubytes + bytes
users[user] = ujobs, ubytes
else:
if fields and fields[0] <> 'Rank':
line = string.strip(line)
if line == 'no entries':
line = name + ': idle'
elif line[-22:] == ' is ready and printing':
line = name
lines.append(line)
#
if totaljobs:
line = '%d K' % ((totalbytes+1023)//1024)
if totaljobs <> len(users):
line = line + ' (%d jobs)' % totaljobs
if len(users) == 1:
line = line + ' for %s' % (users.keys()[0],)
else:
line = line + ' for %d users' % len(users)
if userseen:
if aheadjobs == 0:
line = line + ' (%s first)' % thisuser
else:
line = line + ' (%d K before %s)' % (
(aheadbytes+1023)//1024, thisuser)
lines.append(line)
#
sts = pipe.close()
if sts:
lines.append('lpq exit status %r' % (sts,))
return string.joinfields(lines, ': ')
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
lgpl-2.1
| 3,830,954,653,769,616,000 | -6,972,162,664,281,405,000 | 28.090909 | 69 | 0.476875 | false |
victoredwardocallaghan/xen
|
tools/python/xen/util/pci.py
|
25
|
50406
|
#!/usr/bin/env python
#
# PCI Device Information Class
# - Helps obtain information about which I/O resources a PCI device needs
#
# Author: Ryan Wilson <[email protected]>
import sys
import os, os.path
import errno
import resource
import re
import types
import struct
import time
import threading
from xen.util import utils
from xen.xend import uuid
from xen.xend import sxp
from xen.xend.XendConstants import AUTO_PHP_SLOT
from xen.xend.XendSXPDev import dev_dict_to_sxp
from xen.xend.XendLogging import log
# for 2.3 compatibility
try:
set()
except NameError:
from sets import Set as set
PROC_PCI_PATH = '/proc/bus/pci/devices'
PROC_PCI_NUM_RESOURCES = 7
SYSFS_PCI_DEVS_PATH = '/bus/pci/devices'
SYSFS_PCI_DEV_RESOURCE_PATH = '/resource'
SYSFS_PCI_DEV_CONFIG_PATH = '/config'
SYSFS_PCI_DEV_IRQ_PATH = '/irq'
SYSFS_PCI_DEV_DRIVER_DIR_PATH = '/driver'
SYSFS_PCI_DEV_VENDOR_PATH = '/vendor'
SYSFS_PCI_DEV_DEVICE_PATH = '/device'
SYSFS_PCI_DEV_SUBVENDOR_PATH = '/subsystem_vendor'
SYSFS_PCI_DEV_SUBDEVICE_PATH = '/subsystem_device'
SYSFS_PCI_DEV_CLASS_PATH = '/class'
SYSFS_PCIBACK_PATH = '/bus/pci/drivers/pciback/'
SYSFS_PCISTUB_PATH = '/bus/pci/drivers/pci-stub/'
LSPCI_CMD = 'lspci'
PCI_DEV_REG_EXPRESS_STR = r"[0-9a-fA-F]{4}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2}."+ \
r"[0-9a-fA-F]{1}"
DEV_TYPE_PCIe_ENDPOINT = 0
DEV_TYPE_PCIe_BRIDGE = 1
DEV_TYPE_PCI_BRIDGE = 2
DEV_TYPE_PCI = 3
PCI_VENDOR_ID = 0x0
PCI_STATUS = 0x6
PCI_CLASS_DEVICE = 0x0a
PCI_CLASS_BRIDGE_PCI = 0x0604
PCI_HEADER_TYPE = 0x0e
PCI_HEADER_TYPE_MASK = 0x7f
PCI_HEADER_TYPE_NORMAL = 0
PCI_HEADER_TYPE_BRIDGE = 1
PCI_HEADER_TYPE_CARDBUS = 2
PCI_CAPABILITY_LIST = 0x34
PCI_CB_BRIDGE_CONTROL = 0x3e
PCI_BRIDGE_CTL_BUS_RESET= 0x40
PCI_CAP_ID_EXP = 0x10
PCI_EXP_FLAGS = 0x2
PCI_EXP_FLAGS_TYPE = 0x00f0
PCI_EXP_TYPE_DOWNSTREAM = 0x6
PCI_EXP_TYPE_PCI_BRIDGE = 0x7
PCI_EXP_DEVCAP = 0x4
PCI_EXP_DEVCAP_FLR = (0x1 << 28)
PCI_EXP_DEVCTL = 0x8
PCI_EXP_DEVCTL_FLR = (0x1 << 15)
PCI_EXT_CAP_ID_ACS = 0x000d
PCI_EXT_CAP_ACS_ENABLED = 0x1d # The bits V, R, C, U.
PCI_EXT_ACS_CTRL = 0x06
PCI_CAP_ID_PM = 0x01
PCI_PM_CTRL = 4
PCI_PM_CTRL_NO_SOFT_RESET = 0x0008
PCI_PM_CTRL_STATE_MASK = 0x0003
PCI_D3hot = 3
PCI_D0hot = 0
VENDOR_INTEL = 0x8086
PCI_CAP_ID_VENDOR_SPECIFIC_CAP = 0x09
PCI_CLASS_ID_USB = 0x0c03
PCI_USB_FLRCTRL = 0x4
PCI_DEVICE_ID = 0x02
PCI_COMMAND = 0x04
PCI_CLASS_ID_VGA = 0x0300
PCI_DEVICE_ID_IGFX_GM45 = 0x2a42
PCI_DEVICE_ID_IGFX_EAGLELAKE = 0x2e02
PCI_DEVICE_ID_IGFX_Q45 = 0x2e12
PCI_DEVICE_ID_IGFX_G45 = 0x2e22
PCI_DEVICE_ID_IGFX_G41 = 0x2e32
PCI_CAP_IGFX_CAP09_OFFSET = 0xa4
PCI_CAP_IGFX_CAP13_OFFSET = 0xa4
PCI_CAP_IGFX_GDRST = 0X0d
PCI_CAP_IGFX_GDRST_OFFSET = 0xc0
# The VF of Intel 82599 10GbE Controller
# See http://download.intel.com/design/network/datashts/82599_datasheet.pdf
# For 'VF PCIe Configuration Space', see its Table 9.7.
DEVICE_ID_82599 = 0x10ed
PCI_CAP_ID_AF = 0x13
PCI_AF_CAPs = 0x3
PCI_AF_CAPs_TP_FLR = 0x3
PCI_AF_CTL = 0x4
PCI_AF_CTL_FLR = 0x1
PCI_BAR_0 = 0x10
PCI_BAR_5 = 0x24
PCI_BAR_SPACE = 0x01
PCI_BAR_IO = 0x01
PCI_BAR_IO_MASK = ~0x03
PCI_BAR_MEM = 0x00
PCI_BAR_MEM_MASK = ~0x0f
PCI_STATUS_CAP_MASK = 0x10
PCI_STATUS_OFFSET = 0x6
PCI_CAP_OFFSET = 0x34
MSIX_BIR_MASK = 0x7
MSIX_SIZE_MASK = 0x7ff
# Global variable to store information from lspci
lspci_info = None
lspci_info_lock = threading.RLock()
#Calculate PAGE_SHIFT: number of bits to shift an address to get the page number
PAGE_SIZE = resource.getpagesize()
PAGE_SHIFT = 0
t = PAGE_SIZE
while not (t&1):
t>>=1
PAGE_SHIFT+=1
PAGE_MASK=~(PAGE_SIZE - 1)
# Definitions from Linux: include/linux/pci.h
def PCI_DEVFN(slot, func):
return ((((slot) & 0x1f) << 3) | ((func) & 0x07))
def PCI_SLOT(devfn):
return (devfn >> 3) & 0x1f
def PCI_FUNC(devfn):
return devfn & 0x7
def PCI_BDF(domain, bus, slot, func):
return (((domain & 0xffff) << 16) | ((bus & 0xff) << 8) |
PCI_DEVFN(slot, func))
def check_pci_opts(opts):
def f((k, v)):
if k not in ['msitranslate', 'power_mgmt'] or \
not v.lower() in ['0', '1', 'yes', 'no']:
raise PciDeviceParseError('Invalid pci option %s=%s: ' % (k, v))
map(f, opts)
def serialise_pci_opts(opts):
return ','.join(map(lambda x: '='.join(x), opts))
def split_pci_opts(opts):
return map(lambda x: x.split('='),
filter(lambda x: x != '', opts.split(',')))
def append_default_pci_opts(opts, defopts):
optsdict = dict(opts)
return opts + filter(lambda (k, v): not optsdict.has_key(k), defopts)
def pci_opts_list_to_sxp(list):
return dev_dict_to_sxp({'opts': list})
def pci_opts_list_from_sxp(dev):
return map(lambda x: sxp.children(x)[0], sxp.children(dev, 'opts'))
def pci_convert_dict_to_sxp(dev, state, sub_state = None):
pci_sxp = ['pci', dev_dict_to_sxp(dev), ['state', state]]
if sub_state != None:
pci_sxp.append(['sub_state', sub_state])
return pci_sxp
def pci_convert_sxp_to_dict(dev_sxp):
"""Convert pci device sxp to dict
@param dev_sxp: device configuration
@type dev_sxp: SXP object (parsed config)
@return: dev_config
@rtype: dictionary
"""
# Parsing the device SXP's. In most cases, the SXP looks
# like this:
#
# [device, [vif, [mac, xx:xx:xx:xx:xx:xx], [ip 1.3.4.5]]]
#
# However, for PCI devices it looks like this:
#
# [device, [pci, [dev, [domain, 0], [bus, 0], [slot, 1], [func, 2]]]
#
# It seems the reasoning for this difference is because
# pciif.py needs all the PCI device configurations at
# the same time when creating the devices.
#
# To further complicate matters, Xen 2.0 configuration format
# uses the following for pci device configuration:
#
# [device, [pci, [domain, 0], [bus, 0], [dev, 1], [func, 2]]]
# For PCI device hotplug support, the SXP of PCI devices is
# extendend like this:
#
# [device, [pci, [dev, [domain, 0], [bus, 0], [slot, 1], [func, 2],
# [vdevfn, 0]],
# [state, 'Initialising']]]
#
# 'vdevfn' shows the virtual hotplug slot number which the PCI device
# is inserted in. This is only effective for HVM domains.
#
# state 'Initialising' indicates that the device is being attached,
# while state 'Closing' indicates that the device is being detached.
#
# The Dict looks like this:
#
# { devs: [{domain: 0, bus: 0, slot: 1, func: 2, vdevfn: 0}],
# states: ['Initialising'] }
dev_config = {}
pci_devs = []
for pci_dev in sxp.children(dev_sxp, 'dev'):
pci_dev_info = dict(pci_dev[1:])
if 'opts' in pci_dev_info:
pci_dev_info['opts'] = pci_opts_list_from_sxp(pci_dev)
# If necessary, initialize uuid, key, and vdevfn for each pci device
if not pci_dev_info.has_key('uuid'):
pci_dev_info['uuid'] = uuid.createString()
if not pci_dev_info.has_key('key'):
pci_dev_info['key'] = "%02x:%02x.%x" % \
(int(pci_dev_info['bus'], 16),
int(pci_dev_info['slot'], 16),
int(pci_dev_info['func'], 16))
if not pci_dev_info.has_key('vdevfn'):
pci_dev_info['vdevfn'] = "0x%02x" % AUTO_PHP_SLOT
pci_devs.append(pci_dev_info)
dev_config['devs'] = pci_devs
pci_states = []
for pci_state in sxp.children(dev_sxp, 'state'):
try:
pci_states.append(pci_state[1])
except IndexError:
raise XendError("Error reading state while parsing pci sxp")
dev_config['states'] = pci_states
return dev_config
def parse_hex(val):
try:
if isinstance(val, types.StringTypes):
return int(val, 16)
else:
return val
except ValueError:
return None
AUTO_PHP_FUNC = 1
MANUAL_PHP_FUNC = 2
def parse_pci_pfunc_vfunc(func_str):
list = func_str.split('=')
l = len(list)
if l == 0 or l > 2:
raise PciDeviceParseError('Invalid function: ' + func_str)
p = int(list[0], 16)
if p < 0 or p > 7:
raise PciDeviceParseError('Invalid physical function in: ' + func_str)
if l == 1:
# This defaults to linear mapping of physical to virtual functions
return (p, p, AUTO_PHP_FUNC)
else:
v = int(list[1], 16)
if v < 0 or v > 7:
raise PciDeviceParseError('Invalid virtual function in: ' +
func_str)
return (p, v, MANUAL_PHP_FUNC)
def pci_func_range(start, end):
if end < start:
x = pci_func_range(end, start)
x.reverse()
return x
return range(start, end + 1)
def pci_pfunc_vfunc_range(orig, a, b):
phys = pci_func_range(a[0], b[0])
virt = pci_func_range(a[1], b[1])
if len(phys) != len(virt):
raise PciDeviceParseError('Invalid range in: ' + orig)
return map(lambda x: x + (MANUAL_PHP_FUNC,), zip(phys, virt))
def pci_func_list_map_fn(key, func_str):
if func_str == "*":
return map(lambda x: parse_pci_pfunc_vfunc(x['func']),
filter(lambda x:
pci_dict_cmp(x, key, ['domain', 'bus', 'slot']),
get_all_pci_dict()))
l = map(parse_pci_pfunc_vfunc, func_str.split("-"))
if len(l) == 1:
return l
if len(l) == 2:
return pci_pfunc_vfunc_range(func_str, l[0], l[1])
return []
def pci_func_list_process(pci_dev_str, template, func_str):
l = reduce(lambda x, y: x + y,
(map(lambda x: pci_func_list_map_fn(template, x),
func_str.split(","))))
phys = map(lambda x: x[0], l)
virt = map(lambda x: x[1], l)
if len(phys) != len(set(phys)) or len(virt) != len(set(virt)):
raise PciDeviceParseError("Duplicate functions: %s" % pci_dev_str)
return l
def parse_pci_name_extended(pci_dev_str):
pci_match = re.match(r"((?P<domain>[0-9a-fA-F]{1,4})[:,])?" +
r"(?P<bus>[0-9a-fA-F]{1,2})[:,]" +
r"(?P<slot>[0-9a-fA-F]{1,2})[.,]" +
r"(?P<func>(\*|[0-7]([,-=][0-7])*))" +
r"(@(?P<vdevfn>[01]?[0-9a-fA-F]))?" +
r"(,(?P<opts>.*))?$", pci_dev_str)
if pci_match == None:
raise PciDeviceParseError("Failed to parse pci device: %s" %
pci_dev_str)
pci_dev_info = pci_match.groupdict('')
template = {}
if pci_dev_info['domain'] != '':
domain = int(pci_dev_info['domain'], 16)
else:
domain = 0
template['domain'] = "0x%04x" % domain
template['bus'] = "0x%02x" % int(pci_dev_info['bus'], 16)
template['slot'] = "0x%02x" % int(pci_dev_info['slot'], 16)
template['key'] = pci_dev_str.split(',')[0]
if pci_dev_info['opts'] != '':
template['opts'] = split_pci_opts(pci_dev_info['opts'])
check_pci_opts(template['opts'])
# This is where virtual function assignment takes place
func_list = pci_func_list_process(pci_dev_str, template,
pci_dev_info['func'])
if len(func_list) == 0:
return []
# Set the virtual function of the numerically lowest physical function
# to zero if it has not been manually set
if not filter(lambda x: x[1] == 0, func_list):
auto = filter(lambda x: x[2] == AUTO_PHP_FUNC, func_list)
manual = filter(lambda x: x[2] == MANUAL_PHP_FUNC, func_list)
if not auto:
raise PciDeviceParseError('Virtual device does not include '
'virtual function 0: ' + pci_dev_str)
auto.sort(lambda x,y: cmp(x[1], y[1]))
auto[0] = (auto[0][0], 0, AUTO_PHP_FUNC)
func_list = auto + manual
# For pci attachment and detachment is it important that virtual
# function 0 is done last. This is because is virtual function 0 that
# is used to singnal changes to the guest using ACPI
func_list.sort(lambda x,y: cmp(PCI_FUNC(y[1]), PCI_FUNC(x[1])))
# Virtual slot assignment takes place here if specified in the bdf,
# else it is done inside qemu-xen, as it knows which slots are free
pci = []
for (pfunc, vfunc, auto) in func_list:
pci_dev = template.copy()
pci_dev['func'] = "0x%x" % pfunc
if pci_dev_info['vdevfn'] == '':
vdevfn = AUTO_PHP_SLOT | vfunc
else:
vdevfn = PCI_DEVFN(int(pci_dev_info['vdevfn'], 16), vfunc)
pci_dev['vdevfn'] = "0x%02x" % vdevfn
pci.append(pci_dev)
return pci
def parse_pci_name(pci_name_string):
dev = parse_pci_name_extended(pci_name_string)
if len(dev) != 1:
raise PciDeviceParseError(("Failed to parse pci device: %s: "
"multiple functions specified prohibited") %
pci_name_string)
pci = dev[0]
if not int(pci['vdevfn'], 16) & AUTO_PHP_SLOT:
raise PciDeviceParseError(("Failed to parse pci device: %s: " +
"vdevfn provided where prohibited: 0x%02x") %
(pci_name_string,
PCI_SLOT(int(pci['vdevfn'], 16))))
if 'opts' in pci:
raise PciDeviceParseError(("Failed to parse pci device: %s: " +
"options provided where prohibited: %s") %
(pci_name_string, pci['opts']))
return pci
def __pci_dict_to_fmt_str(fmt, dev):
return fmt % (int(dev['domain'], 16), int(dev['bus'], 16),
int(dev['slot'], 16), int(dev['func'], 16))
def pci_dict_to_bdf_str(dev):
return __pci_dict_to_fmt_str('%04x:%02x:%02x.%01x', dev)
def pci_dict_to_xc_str(dev):
return __pci_dict_to_fmt_str('0x%x, 0x%x, 0x%x, 0x%x', dev)
def pci_dict_cmp(a, b, keys=['domain', 'bus', 'slot', 'func']):
return reduce(lambda x, y: x and y,
map(lambda k: int(a[k], 16) == int(b[k], 16), keys))
def extract_the_exact_pci_names(pci_names):
result = []
if isinstance(pci_names, types.StringTypes):
pci_names = pci_names.split()
elif isinstance(pci_names, types.ListType):
pci_names = re.findall(PCI_DEV_REG_EXPRESS_STR, '%s' % pci_names)
else:
raise PciDeviceParseError('Invalid argument: %s' % pci_names)
for pci in pci_names:
# The length of DDDD:bb:dd.f is 12.
if len(pci) != 12:
continue
if re.match(PCI_DEV_REG_EXPRESS_STR, pci) is None:
continue
result = result + [pci]
return result
def find_sysfs_mnt():
try:
return utils.find_sysfs_mount()
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to locate sysfs mount: %s: %s (%d)'%
(PROC_PCI_PATH, strerr, errno)))
return None
def get_all_pci_names():
if not sys.platform.startswith('linux'): return []
sysfs_mnt = find_sysfs_mnt()
if sysfs_mnt is None:
return None
pci_names = os.popen('ls ' + sysfs_mnt + SYSFS_PCI_DEVS_PATH).read().split()
return pci_names
def get_all_pci_dict():
return map(parse_pci_name, get_all_pci_names())
def get_all_pci_devices():
return map(PciDevice, get_all_pci_dict())
def _create_lspci_info():
"""Execute 'lspci' command and parse the result.
If the command does not exist, lspci_info will be kept blank ({}).
Expects to be protected by lspci_info_lock.
"""
global lspci_info
lspci_info = {}
for paragraph in os.popen(LSPCI_CMD + ' -vmm').read().split('\n\n'):
device_name = None
device_info = {}
# FIXME: workaround for pciutils without the -mm option.
# see: git://git.kernel.org/pub/scm/utils/pciutils/pciutils.git
# commit: 3fd6b4d2e2fda814047664ffc67448ac782a8089
first_device = True
for line in paragraph.split('\n'):
try:
(opt, value) = line.split(':\t')
if opt == 'Slot' or (opt == 'Device' and first_device):
device_name = pci_dict_to_bdf_str(parse_pci_name(value))
first_device = False
else:
device_info[opt] = value
except:
pass
if device_name is not None:
lspci_info[device_name] = device_info
def create_lspci_info():
global lspci_info_lock
lspci_info_lock.acquire()
try:
_create_lspci_info()
finally:
lspci_info_lock.release()
def save_pci_conf_space(devs_string):
pci_list = []
cfg_list = []
sysfs_mnt = find_sysfs_mnt()
for pci_str in devs_string:
pci_path = sysfs_mnt + SYSFS_PCI_DEVS_PATH + '/' + pci_str + \
SYSFS_PCI_DEV_CONFIG_PATH
fd = os.open(pci_path, os.O_RDONLY)
configs = []
for i in range(0, 256, 4):
configs = configs + [os.read(fd,4)]
os.close(fd)
pci_list = pci_list + [pci_path]
cfg_list = cfg_list + [configs]
return (pci_list, cfg_list)
def restore_pci_conf_space(pci_cfg_list):
time.sleep(1.0)
pci_list = pci_cfg_list[0]
cfg_list = pci_cfg_list[1]
for i in range(0, len(pci_list)):
pci_path = pci_list[i]
configs = cfg_list[i]
fd = os.open(pci_path, os.O_WRONLY)
for dw in configs:
os.write(fd, dw)
os.close(fd)
def find_all_assignable_devices():
''' devices owned by pcibak or pci-stub can be directly assigned to
guest with IOMMU (VT-d or AMD IOMMU), find all these devices.
'''
sysfs_mnt = find_sysfs_mnt()
pciback_path = sysfs_mnt + SYSFS_PCIBACK_PATH
pcistub_path = sysfs_mnt + SYSFS_PCISTUB_PATH
pci_names1 = os.popen('ls %s 2>/dev/null' % pciback_path).read()
pci_names2 = os.popen('ls %s 2>/dev/null' % pcistub_path).read()
if len(pci_names1) + len(pci_names2) == 0 :
return None
pci_list = extract_the_exact_pci_names(pci_names1)
pci_list = pci_list + extract_the_exact_pci_names(pci_names2)
dev_list = []
for pci in pci_list:
dev = PciDevice(parse_pci_name(pci))
dev_list = dev_list + [dev]
return dev_list
def transform_list(target, src):
''' src: its element is pci string (Format: xxxx:xx:xx.x).
target: its element is pci string, or a list of pci string.
If all the elements in src are in target, we remove them from target
and add src into target; otherwise, we remove from target all the
elements that also appear in src.
'''
result = []
target_contains_src = True
for e in src:
if not e in target:
target_contains_src = False
break
if target_contains_src:
result = result + [src]
for e in target:
if not e in src:
result = result + [e]
return result
def check_FLR_capability(dev_list):
if len(dev_list) == 0:
return []
pci_list = []
pci_dev_dict = {}
for dev in dev_list:
pci_list = pci_list + [dev.name]
pci_dev_dict[dev.name] = dev
while True:
need_transform = False
for pci in pci_list:
if isinstance(pci, types.StringTypes):
dev = pci_dev_dict[pci]
if dev.bus == 0:
continue
if dev.dev_type == DEV_TYPE_PCIe_ENDPOINT and not dev.pcie_flr:
coassigned_pci_list = dev.find_all_the_multi_functions()
need_transform = True
elif dev.dev_type == DEV_TYPE_PCI and not dev.pci_af_flr:
coassigned_pci_list = dev.find_coassigned_pci_devices(True)
del coassigned_pci_list[0]
need_transform = True
if need_transform:
pci_list = transform_list(pci_list, coassigned_pci_list)
if not need_transform:
break
if len(pci_list) == 0:
return []
for i in range(0, len(pci_list)):
if isinstance(pci_list[i], types.StringTypes):
pci_list[i] = [pci_list[i]]
# Now every element in pci_list is a list of pci string.
result = []
for pci_names in pci_list:
devs = []
for pci in pci_names:
devs = devs + [pci_dev_dict[pci]]
result = result + [devs]
return result
def check_mmio_bar(devs_list):
result = []
for dev_list in devs_list:
non_aligned_bar_found = False
for dev in dev_list:
if dev.has_non_page_aligned_bar:
non_aligned_bar_found = True
break
if not non_aligned_bar_found:
result = result + [dev_list]
return result
class PciDeviceParseError(Exception):
def __init__(self,msg):
self.message = msg
def __str__(self):
return self.message
class PciDeviceAssignmentError(Exception):
def __init__(self,msg):
self.message = msg
def __str__(self):
return 'pci: improper device assignment specified: ' + \
self.message
class PciDeviceVslotMissing(Exception):
def __init__(self,msg):
self.message = msg
def __str__(self):
return 'pci: no vslot: ' + self.message
class PciDevice:
def __init__(self, dev):
self.domain = int(dev['domain'], 16)
self.bus = int(dev['bus'], 16)
self.slot = int(dev['slot'], 16)
self.func = int(dev['func'], 16)
self.name = pci_dict_to_bdf_str(dev)
self.cfg_space_path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name + SYSFS_PCI_DEV_CONFIG_PATH
self.irq = 0
self.iomem = []
self.ioports = []
self.driver = None
self.vendor = None
self.device = None
self.subvendor = None
self.subdevice = None
self.msix = 0
self.msix_iomem = []
self.revision = 0
self.classcode = None
self.vendorname = ""
self.devicename = ""
self.classname = ""
self.subvendorname = ""
self.subdevicename = ""
self.dev_type = None
self.is_downstream_port = False
self.acs_enabled = False
self.has_non_page_aligned_bar = False
self.pcie_flr = False
self.pci_af_flr = False
self.detect_dev_info()
if (self.dev_type == DEV_TYPE_PCI_BRIDGE) or \
(self.dev_type == DEV_TYPE_PCIe_BRIDGE):
return
self.get_info_from_sysfs()
self.get_info_from_lspci()
def find_parent(self):
# i.e., /sys/bus/pci/devices/0000:00:19.0 or
# /sys/bus/pci/devices/0000:03:04.0
path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ self.name
# i.e., ../../../devices/pci0000:00/0000:00:19.0
# ../../../devices/pci0000:00/0000:00:02.0/0000:01:00.2/0000:03:04.0
try:
target = os.readlink(path)
lst = target.split('/')
parent = lst[len(lst)-2]
if parent[0:3] == 'pci':
# We have reached the upmost one.
return None
return parse_pci_name(parent)
except OSError, (errno, strerr):
raise PciDeviceParseError('Can not locate the parent of %s',
self.name)
def find_the_uppermost_pci_bridge(self):
# Find the uppermost PCI/PCI-X bridge
dev = self.find_parent()
if dev is None:
return None
dev = dev_parent = PciDevice(dev)
while dev_parent.dev_type != DEV_TYPE_PCIe_BRIDGE:
parent = dev_parent.find_parent()
if parent is None:
break
dev = dev_parent
dev_parent = PciDevice(parent)
return dev
def find_all_devices_behind_the_bridge(self, ignore_bridge):
sysfs_mnt = find_sysfs_mnt()
self_path = sysfs_mnt + SYSFS_PCI_DEVS_PATH + '/' + self.name
pci_names = os.popen('ls ' + self_path).read()
dev_list = extract_the_exact_pci_names(pci_names)
list = [self.name]
for pci_str in dev_list:
dev = PciDevice(parse_pci_name(pci_str))
if dev.dev_type == DEV_TYPE_PCI_BRIDGE or \
dev.dev_type == DEV_TYPE_PCIe_BRIDGE:
sub_list_including_self = \
dev.find_all_devices_behind_the_bridge(ignore_bridge)
if ignore_bridge:
del sub_list_including_self[0]
list = list + [sub_list_including_self]
else:
list = list + [dev.name]
return list
def find_coassigned_pci_devices(self, ignore_bridge = True):
''' Here'self' is a PCI device, we need find the uppermost PCI/PCI-X
bridge, and all devices behind it must be co-assigned to the same
guest.
Parameter:
[ignore_bridge]: if set, the returned result doesn't include
any bridge behind the uppermost PCI/PCI-X bridge.
Note: The first element of the return value is the uppermost
PCI/PCI-X bridge. If the caller doesn't need the first
element, the caller itself can remove it explicitly.
'''
dev = self.find_the_uppermost_pci_bridge()
# The 'self' device is on bus0.
if dev is None:
return [self.name]
dev_list = dev.find_all_devices_behind_the_bridge(ignore_bridge)
dev_list = extract_the_exact_pci_names(dev_list)
return dev_list
def do_secondary_bus_reset(self, target_bus, devs):
# Save the config spaces of all the devices behind the bus.
(pci_list, cfg_list) = save_pci_conf_space(devs)
#Do the Secondary Bus Reset
sysfs_mnt = find_sysfs_mnt()
parent_path = sysfs_mnt + SYSFS_PCI_DEVS_PATH + '/' + \
target_bus + SYSFS_PCI_DEV_CONFIG_PATH
fd = os.open(parent_path, os.O_RDWR)
os.lseek(fd, PCI_CB_BRIDGE_CONTROL, 0)
br_cntl = (struct.unpack('H', os.read(fd, 2)))[0]
# Assert Secondary Bus Reset
os.lseek(fd, PCI_CB_BRIDGE_CONTROL, 0)
br_cntl |= PCI_BRIDGE_CTL_BUS_RESET
os.write(fd, struct.pack('H', br_cntl))
time.sleep(0.100)
# De-assert Secondary Bus Reset
os.lseek(fd, PCI_CB_BRIDGE_CONTROL, 0)
br_cntl &= ~PCI_BRIDGE_CTL_BUS_RESET
os.write(fd, struct.pack('H', br_cntl))
time.sleep(0.100)
os.close(fd)
# Restore the config spaces
restore_pci_conf_space((pci_list, cfg_list))
def do_Dstate_transition(self):
pos = self.find_cap_offset(PCI_CAP_ID_PM)
if pos == 0:
return False
# No_Soft_Reset - When set 1, this bit indicates that
# devices transitioning from D3hot to D0 because of
# PowerState commands do not perform an internal reset.
pm_ctl = self.pci_conf_read32(pos + PCI_PM_CTRL)
if (pm_ctl & PCI_PM_CTRL_NO_SOFT_RESET) == PCI_PM_CTRL_NO_SOFT_RESET:
return False
(pci_list, cfg_list) = save_pci_conf_space([self.name])
# Enter D3hot
pm_ctl &= ~PCI_PM_CTRL_STATE_MASK
pm_ctl |= PCI_D3hot
self.pci_conf_write32(pos + PCI_PM_CTRL, pm_ctl)
time.sleep(0.010)
# From D3hot to D0
pm_ctl &= ~PCI_PM_CTRL_STATE_MASK
pm_ctl |= PCI_D0hot
self.pci_conf_write32(pos + PCI_PM_CTRL, pm_ctl)
time.sleep(0.010)
restore_pci_conf_space((pci_list, cfg_list))
return True
def do_vendor_specific_FLR_method(self):
pos = self.find_cap_offset(PCI_CAP_ID_VENDOR_SPECIFIC_CAP)
if pos == 0:
return
vendor_id = self.pci_conf_read16(PCI_VENDOR_ID)
if vendor_id != VENDOR_INTEL:
return
class_id = self.pci_conf_read16(PCI_CLASS_DEVICE)
if class_id != PCI_CLASS_ID_USB:
return
(pci_list, cfg_list) = save_pci_conf_space([self.name])
self.pci_conf_write8(pos + PCI_USB_FLRCTRL, 1)
time.sleep(0.100)
restore_pci_conf_space((pci_list, cfg_list))
def do_FLR_for_integrated_device(self):
if not self.do_Dstate_transition():
self.do_vendor_specific_FLR_method()
def do_AF_FLR(self, af_pos):
''' use PCI Advanced Capability to do FLR
'''
(pci_list, cfg_list) = save_pci_conf_space([self.name])
self.pci_conf_write8(af_pos + PCI_AF_CTL, PCI_AF_CTL_FLR)
time.sleep(0.100)
restore_pci_conf_space((pci_list, cfg_list))
def do_FLR_for_intel_4Series_iGFX(self):
af_pos = PCI_CAP_IGFX_CAP13_OFFSET
self.do_AF_FLR(af_pos)
log.debug("Intel 4 Series iGFX FLR done")
def do_FLR_for_GM45_iGFX(self):
reg32 = self.pci_conf_read32(PCI_CAP_IGFX_CAP09_OFFSET)
if ((reg32 >> 16) & 0x000000FF) != 0x06 or \
((reg32 >> 24) & 0x000000F0) != 0x20:
return
self.pci_conf_write8(PCI_CAP_IGFX_GDRST_OFFSET, PCI_CAP_IGFX_GDRST)
for i in range(0, 10):
time.sleep(0.100)
reg8 = self.pci_conf_read8(PCI_CAP_IGFX_GDRST_OFFSET)
if (reg8 & 0x01) == 0:
break
if i == 10:
log.debug("Intel iGFX FLR fail on GM45")
return
# This specific reset will hang if the command register does not have
# memory space access enabled
cmd = self.pci_conf_read16(PCI_COMMAND)
self.pci_conf_write16(PCI_COMMAND, (cmd | 0x02))
af_pos = PCI_CAP_IGFX_CAP09_OFFSET
self.do_AF_FLR(af_pos)
self.pci_conf_write16(PCI_COMMAND, cmd)
log.debug("Intel iGFX FLR on GM45 done")
def find_all_the_multi_functions(self):
sysfs_mnt = find_sysfs_mnt()
parentdict = self.find_parent()
if parentdict is None :
return [ self.name ]
parent = pci_dict_to_bdf_str(parentdict)
pci_names = os.popen('ls ' + sysfs_mnt + SYSFS_PCI_DEVS_PATH + '/' + \
parent + '/').read()
funcs = extract_the_exact_pci_names(pci_names)
return funcs
def find_coassigned_devices(self):
if self.dev_type == DEV_TYPE_PCIe_ENDPOINT and not self.pcie_flr:
return self.find_all_the_multi_functions()
elif self.dev_type == DEV_TYPE_PCI and not self.pci_af_flr:
coassigned_pci_list = self.find_coassigned_pci_devices(True)
if len(coassigned_pci_list) > 1:
del coassigned_pci_list[0]
return coassigned_pci_list
else:
return [self.name]
def find_cap_offset(self, cap):
path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_CONFIG_PATH
pos = PCI_CAPABILITY_LIST
try:
fd = None
fd = os.open(path, os.O_RDONLY)
os.lseek(fd, PCI_STATUS, 0)
status = struct.unpack('H', os.read(fd, 2))[0]
if (status & 0x10) == 0:
os.close(fd)
# The device doesn't support PCI_STATUS_CAP_LIST
return 0
max_cap = 48
while max_cap > 0:
os.lseek(fd, pos, 0)
pos = ord(os.read(fd, 1))
if pos < 0x40:
pos = 0
break;
os.lseek(fd, pos + 0, 0)
id = ord(os.read(fd, 1))
if id == 0xff:
pos = 0
break;
# Found the capability
if id == cap:
break;
# Test the next one
pos = pos + 1
max_cap = max_cap - 1;
os.close(fd)
except OSError, (errno, strerr):
if fd is not None:
os.close(fd)
raise PciDeviceParseError(('Error when accessing sysfs: %s (%d)' %
(strerr, errno)))
return pos
def find_ext_cap(self, cap):
path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_CONFIG_PATH
ttl = 480; # 3840 bytes, minimum 8 bytes per capability
pos = 0x100
try:
fd = os.open(path, os.O_RDONLY)
os.lseek(fd, pos, 0)
h = os.read(fd, 4)
if len(h) == 0: # MMCONF is not enabled?
return 0
header = struct.unpack('I', h)[0]
if header == 0 or header == -1:
return 0
while ttl > 0:
if (header & 0x0000ffff) == cap:
return pos
pos = (header >> 20) & 0xffc
if pos < 0x100:
break
os.lseek(fd, pos, 0)
header = struct.unpack('I', os.read(fd, 4))[0]
ttl = ttl - 1
os.close(fd)
except OSError, (errno, strerr):
raise PciDeviceParseError(('Error when accessing sysfs: %s (%d)' %
(strerr, errno)))
return 0
def is_behind_switch_lacking_acs(self):
# If there is intermediate PCIe switch, which doesn't support ACS or
# doesn't enable ACS, between Root Complex and the function, we return
# True, meaning the function is not allowed to be assigned to guest due
# to potential security issue.
parent = self.find_parent()
while parent is not None:
dev_parent = PciDevice(parent)
if dev_parent.is_downstream_port and not dev_parent.acs_enabled:
return True
parent = dev_parent.find_parent()
return False
def pci_conf_read8(self, pos):
fd = os.open(self.cfg_space_path, os.O_RDONLY)
os.lseek(fd, pos, 0)
str = os.read(fd, 1)
os.close(fd)
val = struct.unpack('B', str)[0]
return val
def pci_conf_read16(self, pos):
fd = os.open(self.cfg_space_path, os.O_RDONLY)
os.lseek(fd, pos, 0)
str = os.read(fd, 2)
os.close(fd)
val = struct.unpack('H', str)[0]
return val
def pci_conf_read32(self, pos):
fd = os.open(self.cfg_space_path, os.O_RDONLY)
os.lseek(fd, pos, 0)
str = os.read(fd, 4)
os.close(fd)
val = struct.unpack('I', str)[0]
return val
def pci_conf_write8(self, pos, val):
str = struct.pack('B', val)
fd = os.open(self.cfg_space_path, os.O_WRONLY)
os.lseek(fd, pos, 0)
os.write(fd, str)
os.close(fd)
def pci_conf_write16(self, pos, val):
str = struct.pack('H', val)
fd = os.open(self.cfg_space_path, os.O_WRONLY)
os.lseek(fd, pos, 0)
os.write(fd, str)
os.close(fd)
def pci_conf_write32(self, pos, val):
str = struct.pack('I', val)
fd = os.open(self.cfg_space_path, os.O_WRONLY)
os.lseek(fd, pos, 0)
os.write(fd, str)
os.close(fd)
def detect_dev_info(self):
try:
class_dev = self.pci_conf_read16(PCI_CLASS_DEVICE)
except OSError, (err, strerr):
if err == errno.ENOENT:
strerr = "the device doesn't exist?"
raise PciDeviceParseError('%s: %s' %\
(self.name, strerr))
pos = self.find_cap_offset(PCI_CAP_ID_EXP)
if class_dev == PCI_CLASS_BRIDGE_PCI:
if pos == 0:
self.dev_type = DEV_TYPE_PCI_BRIDGE
else:
creg = self.pci_conf_read16(pos + PCI_EXP_FLAGS)
type = (creg & PCI_EXP_FLAGS_TYPE) >> 4
if type == PCI_EXP_TYPE_PCI_BRIDGE:
self.dev_type = DEV_TYPE_PCI_BRIDGE
else:
self.dev_type = DEV_TYPE_PCIe_BRIDGE
if type == PCI_EXP_TYPE_DOWNSTREAM:
self.is_downstream_port = True
pos = self.find_ext_cap(PCI_EXT_CAP_ID_ACS)
if pos != 0:
ctrl = self.pci_conf_read16(pos + PCI_EXT_ACS_CTRL)
if (ctrl & PCI_EXT_CAP_ACS_ENABLED) == \
(PCI_EXT_CAP_ACS_ENABLED):
self.acs_enabled = True
else:
if pos != 0:
self.dev_type = DEV_TYPE_PCIe_ENDPOINT
else:
self.dev_type = DEV_TYPE_PCI
# Force 0000:00:00.0 to be DEV_TYPE_PCIe_BRIDGE
if self.name == '0000:00:00.0':
self.dev_type = DEV_TYPE_PCIe_BRIDGE
if (self.dev_type == DEV_TYPE_PCI_BRIDGE) or \
(self.dev_type == DEV_TYPE_PCIe_BRIDGE):
return
# Try to findthe PCIe FLR capability
if self.dev_type == DEV_TYPE_PCIe_ENDPOINT:
dev_cap = self.pci_conf_read32(pos + PCI_EXP_DEVCAP)
if dev_cap & PCI_EXP_DEVCAP_FLR:
self.pcie_flr = True
else:
# Quirk for the VF of Intel 82599 10GbE Controller.
# We know it does have PCIe FLR capability even if it doesn't
# report that (dev_cap.PCI_EXP_DEVCAP_FLR is 0).
# See the 82599 datasheet.
dev_path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+self.name
vendor_id = parse_hex(os.popen('cat %s/vendor' % dev_path).read())
device_id = parse_hex(os.popen('cat %s/device' % dev_path).read())
if (vendor_id == VENDOR_INTEL) and \
(device_id == DEVICE_ID_82599):
self.pcie_flr = True
elif self.dev_type == DEV_TYPE_PCI:
# Try to find the "PCI Advanced Capabilities"
pos = self.find_cap_offset(PCI_CAP_ID_AF)
if pos != 0:
af_cap = self.pci_conf_read8(pos + PCI_AF_CAPs)
if (af_cap & PCI_AF_CAPs_TP_FLR) == PCI_AF_CAPs_TP_FLR:
self.pci_af_flr = True
bar_addr = PCI_BAR_0
while bar_addr <= PCI_BAR_5:
bar = self.pci_conf_read32(bar_addr)
if (bar & PCI_BAR_SPACE) == PCI_BAR_MEM:
bar = bar & PCI_BAR_MEM_MASK
bar = bar & ~PAGE_MASK
if bar != 0:
self.has_non_page_aligned_bar = True
break
bar_addr = bar_addr + 4
def devs_check_driver(self, devs):
if len(devs) == 0:
return
for pci_dev in devs:
dev = PciDevice(parse_pci_name(pci_dev))
if dev.driver == 'pciback' or dev.driver == 'pci-stub':
continue
err_msg = 'pci: %s must be co-assigned to the same guest with %s' + \
', but it is not owned by pciback or pci-stub.'
raise PciDeviceAssignmentError(err_msg % (pci_dev, self.name))
def do_FLR(self, is_hvm, strict_check):
""" Perform FLR (Functional Level Reset) for the device.
"""
if self.dev_type == DEV_TYPE_PCIe_ENDPOINT:
# If PCIe device supports FLR, we use it.
if self.pcie_flr:
(pci_list, cfg_list) = save_pci_conf_space([self.name])
pos = self.find_cap_offset(PCI_CAP_ID_EXP)
self.pci_conf_write32(pos + PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_FLR)
# We must sleep at least 100ms for the completion of FLR
time.sleep(0.100)
restore_pci_conf_space((pci_list, cfg_list))
else:
if self.bus == 0:
self.do_FLR_for_integrated_device()
else:
funcs = self.find_all_the_multi_functions()
if not is_hvm and (len(funcs) > 1):
return
if is_hvm and not strict_check:
return
self.devs_check_driver(funcs)
parent = pci_dict_to_bdf_str(self.find_parent())
# Do Secondary Bus Reset.
self.do_secondary_bus_reset(parent, funcs)
# PCI devices
else:
# For PCI device on host bus, we test "PCI Advanced Capabilities".
if self.bus == 0 and self.pci_af_flr:
af_pos = self.find_cap_offset(PCI_CAP_ID_AF)
self.do_AF_FLR(af_pos)
else:
if self.bus == 0:
if self.slot == 0x02 and self.func == 0x0:
vendor_id = self.pci_conf_read16(PCI_VENDOR_ID)
if vendor_id != VENDOR_INTEL:
return
class_id = self.pci_conf_read16(PCI_CLASS_DEVICE)
if class_id != PCI_CLASS_ID_VGA:
return
device_id = self.pci_conf_read16(PCI_DEVICE_ID)
if device_id == PCI_DEVICE_ID_IGFX_GM45:
self.do_FLR_for_GM45_iGFX()
elif device_id == PCI_DEVICE_ID_IGFX_EAGLELAKE or \
device_id == PCI_DEVICE_ID_IGFX_Q45 or \
device_id == PCI_DEVICE_ID_IGFX_G45 or \
device_id == PCI_DEVICE_ID_IGFX_G41:
self.do_FLR_for_intel_4Series_iGFX()
else:
log.debug("Unknown iGFX device_id:%x", device_id)
else:
self.do_FLR_for_integrated_device()
else:
devs = self.find_coassigned_pci_devices(False)
# Remove the element 0 which is a bridge
target_bus = devs[0]
del devs[0]
if not is_hvm and (len(devs) > 1):
return
if is_hvm and not strict_check:
return
self.devs_check_driver(devs)
# Do Secondary Bus Reset.
self.do_secondary_bus_reset(target_bus, devs)
def find_capability(self, type):
sysfs_mnt = find_sysfs_mnt()
if sysfs_mnt == None:
return False
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_CONFIG_PATH
try:
conf_file = open(path, 'rb')
conf_file.seek(PCI_HEADER_TYPE)
header_type = ord(conf_file.read(1)) & PCI_HEADER_TYPE_MASK
if header_type == PCI_HEADER_TYPE_CARDBUS:
return
conf_file.seek(PCI_STATUS_OFFSET)
status = ord(conf_file.read(1))
if status&PCI_STATUS_CAP_MASK:
conf_file.seek(PCI_CAP_OFFSET)
capa_pointer = ord(conf_file.read(1))
capa_count = 0
while capa_pointer:
if capa_pointer < 0x40:
raise PciDeviceParseError(
('Broken capability chain: %s' % self.name))
capa_count += 1
if capa_count > 96:
raise PciDeviceParseError(
('Looped capability chain: %s' % self.name))
conf_file.seek(capa_pointer)
capa_id = ord(conf_file.read(1))
capa_pointer = ord(conf_file.read(1))
if capa_id == type:
# get the type
message_cont_lo = ord(conf_file.read(1))
message_cont_hi = ord(conf_file.read(1))
self.msix=1
self.msix_entries = (message_cont_lo + \
(message_cont_hi << 8)) \
& MSIX_SIZE_MASK
t_off=conf_file.read(4)
p_off=conf_file.read(4)
self.table_offset=ord(t_off[0]) | (ord(t_off[1])<<8) | \
(ord(t_off[2])<<16)| \
(ord(t_off[3])<<24)
self.pba_offset=ord(p_off[0]) | (ord(p_off[1]) << 8)| \
(ord(p_off[2])<<16) | \
(ord(p_off[3])<<24)
self.table_index = self.table_offset & MSIX_BIR_MASK
self.table_offset = self.table_offset & ~MSIX_BIR_MASK
self.pba_index = self.pba_offset & MSIX_BIR_MASK
self.pba_offset = self.pba_offset & ~MSIX_BIR_MASK
break
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to locate sysfs mount: %s: %s (%d)' %
(PROC_PCI_PATH, strerr, errno)))
except TypeError, err:
log.debug("Caught TypeError '%s'" % err)
pass
def get_info_from_sysfs(self):
self.find_capability(0x11)
sysfs_mnt = find_sysfs_mnt()
if sysfs_mnt == None:
return False
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_RESOURCE_PATH
try:
resource_file = open(path,'r')
for i in range(PROC_PCI_NUM_RESOURCES):
line = resource_file.readline()
sline = line.split()
if len(sline)<3:
continue
start = int(sline[0],16)
end = int(sline[1],16)
flags = int(sline[2],16)
size = end-start+1
if start!=0:
if flags&PCI_BAR_IO:
self.ioports.append( (start,size) )
else:
self.iomem.append( (start,size) )
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_IRQ_PATH
try:
self.irq = int(open(path,'r').readline())
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_DRIVER_DIR_PATH
try:
self.driver = os.path.basename(os.readlink(path))
except OSError, (errno, strerr):
self.driver = ""
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_VENDOR_PATH
try:
self.vendor = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_DEVICE_PATH
try:
self.device = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_SUBVENDOR_PATH
try:
self.subvendor = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_SUBDEVICE_PATH
try:
self.subdevice = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
path = sysfs_mnt+SYSFS_PCI_DEVS_PATH+'/'+ \
self.name+SYSFS_PCI_DEV_CLASS_PATH
try:
self.classcode = int(open(path,'r').readline(), 16)
except IOError, (errno, strerr):
raise PciDeviceParseError(('Failed to open & read %s: %s (%d)' %
(path, strerr, errno)))
return True
def get_info_from_lspci(self):
""" Get information such as vendor name, device name, class name, etc.
Since we cannot obtain these data from sysfs, use 'lspci' command.
"""
global lspci_info
global lspci_info_lock
lspci_info_lock.acquire()
try:
if lspci_info is None:
_create_lspci_info()
device_info = lspci_info.get(self.name)
if device_info:
try:
self.revision = int(device_info.get('Rev', '0'), 16)
except ValueError:
pass
self.vendorname = device_info.get('Vendor', '')
self.devicename = device_info.get('Device', '')
self.classname = device_info.get('Class', '')
self.subvendorname = device_info.get('SVendor', '')
self.subdevicename = device_info.get('SDevice', '')
return True
finally:
lspci_info_lock.release()
def __str__(self):
str = "PCI Device %s\n" % (self.name)
for (start,size) in self.ioports:
str = str + "IO Port 0x%02x [size=%d]\n"%(start,size)
for (start,size) in self.iomem:
str = str + "IO Mem 0x%02x [size=%d]\n"%(start,size)
str = str + "IRQ %d\n"%(self.irq)
str = str + "Vendor ID 0x%04x\n"%(self.vendor)
str = str + "Device ID 0x%04x\n"%(self.device)
str = str + "Sybsystem Vendor ID 0x%04x\n"%(self.subvendor)
str = str + "Subsystem Device ID 0x%04x"%(self.subdevice)
return str
def main():
if len(sys.argv)<5:
print "Usage: %s <domain> <bus> <slot> <func>\n" % sys.argv[0]
sys.exit(2)
dev = PciDevice(int(sys.argv[1],16), int(sys.argv[2],16),
int(sys.argv[3],16), int(sys.argv[4],16))
print str(dev)
if __name__=='__main__':
main()
|
gpl-2.0
| -2,155,370,019,235,998,200 | -990,560,227,834,440,600 | 34.774308 | 84 | 0.534956 | false |
pnecchi/Thesis
|
Code/Prototype/critic.py
|
1
|
2591
|
################################################################################
# Description: Module containing various critic implementations
# Author: Pierpaolo Necchi
# Email: [email protected]
# Date: dom 05 giu 2016 18:24:01 CEST
################################################################################
import numpy as np
class Critic(object):
""" Critic class which specifies the generic interface of a critic. """
def __init__(self, dimIn):
""" Initialize critic.
Args:
dimIn (int): state size
"""
# Initialize input size, i.e. size of the state
self.dimIn = dimIn
def __call__(self, state):
""" Evaluate a given state.
Args:
state (np.array): state to be evaluated
Returns:
value (float): state value
"""
pass
class LinearCritic(Critic):
""" Critic that uses a linear function approximation """
def __init__(self, dimIn, features):
""" Initialize LinearCritic.
Args:
dimIn (int): state size
features (object): features Phi(s)
"""
# Initialize Critic base class
Critic.__init__(self, dimIn)
# Initialize features
self._features = features
self._dimPar = features.size()
# Initialize critic parameters
self._parameters = 0.05 * np.random.randn()
def __call__(self, state):
""" Evaluate a given state.
Args:
state (np.array): state to be evaluated
Returns:
value (float): state value
"""
# Cache state
self._lastState = state
# Evaluate features and cache result
self._featuresEval = self._features(state)
# Evaluate state
return np.dot(self._featuresEval, self._parameters.T)
def gradient(self, state):
""" Compute critic gradient.
Args:
state (np.array): state
Returns:
gradient (np.array): critic gradient
"""
if state != self._lastState:
self._featuresEval = self.features(state)
return self._featuresEval
def getParameters(self):
""" Return critic parameters.
Returns:
parameters (np.array): actor parameters
"""
return self._parameters
def setParameters(self, parameters):
""" Set critic parameters.
Args:
parameters (np.array): new actor parameters
"""
self._parameters = parameters
|
mit
| 2,879,253,658,443,821,600 | -8,990,184,775,097,502,000 | 24.653465 | 80 | 0.526052 | false |
ghisvail/vispy
|
vispy/visuals/transforms/tests/test_transforms.py
|
17
|
6541
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
import vispy.visuals.transforms as tr
from vispy.geometry import Rect
from vispy.testing import run_tests_if_main
NT = tr.NullTransform
ST = tr.STTransform
AT = tr.MatrixTransform
RT = tr.MatrixTransform
PT = tr.PolarTransform
LT = tr.LogTransform
CT = tr.ChainTransform
def assert_chain_types(chain, types):
assert list(map(type, chain.transforms)) == types
def assert_chain_objects(chain1, chain2):
assert chain1.transforms == chain2.transforms
def tesst_multiplication():
n = NT()
s = ST()
a = AT()
p = PT()
l = LT()
c1 = CT([s, a, p])
assert c1
c2 = CT([s, a, s])
assert isinstance(n * n, NT)
assert isinstance(n * s, ST)
assert isinstance(s * s, ST)
assert isinstance(a * s, AT)
assert isinstance(a * a, AT)
assert isinstance(s * a, AT)
assert isinstance(n * p, PT)
assert isinstance(s * p, CT)
assert_chain_types(s * p, [PT, ST])
assert_chain_types(s * p * a, [ST, PT, AT])
assert_chain_types(s * a * p, [PT, AT])
assert_chain_types(s * p * s, [ST, PT, ST])
assert_chain_types(s * a * p * s * a, [AT, PT, AT])
assert_chain_types(c2 * a, [AT])
assert_chain_types(p * l * s, [ST, LT, PT])
def test_transform_chain():
# Make dummy classes for easier distinguishing the transforms
class DummyTrans(tr.BaseTransform):
glsl_map = "vec4 trans(vec4 pos) {return pos;}"
glsl_imap = "vec4 trans(vec4 pos) {return pos;}"
class TransA(DummyTrans):
pass
class TransB(DummyTrans):
pass
class TransC(DummyTrans):
pass
# Create test transforms
a, b, c = TransA(), TransB(), TransC()
# Test Chain creation
assert tr.ChainTransform().transforms == []
assert tr.ChainTransform(a).transforms == [a]
assert tr.ChainTransform(a, b).transforms == [a, b]
assert tr.ChainTransform(a, b, c, a).transforms == [a, b, c, a]
# Test composition by multiplication
assert_chain_objects(a * b, tr.ChainTransform(a, b))
assert_chain_objects(a * b * c, tr.ChainTransform(a, b, c))
assert_chain_objects(a * b * c * a, tr.ChainTransform(a, b, c, a))
# Test adding/prepending to transform
chain = tr.ChainTransform()
chain.append(a)
assert chain.transforms == [a]
chain.append(b)
assert chain.transforms == [a, b]
chain.append(c)
assert chain.transforms == [a, b, c]
chain.prepend(b)
assert chain.transforms == [b, a, b, c]
chain.prepend(c)
assert chain.transforms == [c, b, a, b, c]
# Test simplifying
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
t3 = tr.STTransform(translate=(3, 4))
# Create multiplied versions
t123 = t1*t2*t3
t321 = t3*t2*t1
c123 = tr.ChainTransform(t1, t2, t3)
c321 = tr.ChainTransform(t3, t2, t1)
c123s = c123.simplified
c321s = c321.simplified
#
assert isinstance(t123, tr.STTransform) # or the test is useless
assert isinstance(t321, tr.STTransform) # or the test is useless
assert isinstance(c123s, tr.ChainTransform) # or the test is useless
assert isinstance(c321s, tr.ChainTransform) # or the test is useless
# Test Mapping
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
chain1 = tr.ChainTransform(t1, t2)
chain2 = tr.ChainTransform(t2, t1)
#
assert chain1.transforms == [t1, t2] # or the test is useless
assert chain2.transforms == [t2, t1] # or the test is useless
#
m12 = (t1*t2).map((1, 1)).tolist()
m21 = (t2*t1).map((1, 1)).tolist()
m12_ = chain1.map((1, 1)).tolist()
m21_ = chain2.map((1, 1)).tolist()
#
#print(m12, m21, m12_, m21_)
assert m12 != m21
assert m12 == m12_
assert m21 == m21_
# Test shader map
t1 = tr.STTransform(scale=(2, 3))
t2 = tr.STTransform(translate=(3, 4))
chain = tr.ChainTransform(t1, t2)
#
funcs = chain.shader_map().dependencies()
funcsi = chain.shader_imap().dependencies()
#
assert t1.shader_map() in funcs
assert t2.shader_map() in funcs
assert t1.shader_imap() in funcsi
assert t2.shader_imap() in funcsi
def test_map_rect():
r = Rect((2, 7), (13, 19))
r1 = ST(scale=(2, 2), translate=(-10, 10)).map(r)
assert r1 == Rect((-6, 24), (26, 38))
def test_st_transform():
# Check that STTransform maps exactly like MatrixTransform
pts = np.random.normal(size=(10, 4))
scale = (1, 7.5, -4e-8)
translate = (1e6, 0.2, 0)
st = tr.STTransform(scale=scale, translate=translate)
at = tr.MatrixTransform()
at.scale(scale)
at.translate(translate)
assert np.allclose(st.map(pts), at.map(pts))
assert np.allclose(st.inverse.map(pts), at.inverse.map(pts))
def test_st_mapping():
p1 = [[5., 7.], [23., 8.]]
p2 = [[-1.3, -1.4], [1.1, 1.2]]
t = tr.STTransform()
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :len(p2)], p2)
def test_affine_mapping():
t = tr.MatrixTransform()
p1 = np.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
# test pure translation
p2 = p1 + 5.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test pure scaling
p2 = p1 * 5.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test scale + translate
p2 = (p1 * 5.5) + 3.5
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
# test SRT
p2 = np.array([[10, 5, 3],
[10, 15, 3],
[30, 5, 3],
[10, 5, 3.5]])
t.set_mapping(p1, p2)
assert np.allclose(t.map(p1)[:, :p2.shape[1]], p2)
def test_inverse():
m = np.random.normal(size=(4, 4))
transforms = [
NT(),
ST(scale=(1e-4, 2e5), translate=(10, -6e9)),
AT(m),
RT(m),
]
np.random.seed(0)
N = 20
x = np.random.normal(size=(N, 3))
pw = np.random.normal(size=(N, 3), scale=3)
pos = x * 10 ** pw
for trn in transforms:
assert np.allclose(pos, trn.inverse.map(trn.map(pos))[:, :3])
# log transform only works on positive values
#abs_pos = np.abs(pos)
#tr = LT(base=(2, 4.5, 0))
#assert np.allclose(abs_pos, tr.inverse.map(tr.map(abs_pos))[:,:3])
run_tests_if_main()
|
bsd-3-clause
| -4,070,948,195,672,791,000 | -2,039,118,418,155,865,300 | 26.952991 | 73 | 0.586913 | false |
insomnia-lab/calibre
|
src/calibre/library/server/content.py
|
3
|
10759
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import re, os, posixpath
import cherrypy
from calibre import fit_image, guess_type
from calibre.utils.date import fromtimestamp, as_utc
from calibre.library.caches import SortKeyGenerator
from calibre.library.save_to_disk import find_plugboard
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.magick.draw import (save_cover_data_to, Image,
thumbnail as generate_thumbnail)
from calibre.utils.filenames import ascii_filename
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.utils.config import tweaks
plugboard_content_server_value = 'content_server'
plugboard_content_server_formats = ['epub', 'mobi', 'azw3']
class CSSortKeyGenerator(SortKeyGenerator):
def __init__(self, fields, fm, db_prefs):
SortKeyGenerator.__init__(self, fields, fm, None, db_prefs)
def __call__(self, record):
return self.itervals(record).next()
class ContentServer(object):
'''
Handles actually serving content files/covers/metadata. Also has
a few utility methods.
'''
def add_routes(self, connect):
connect('root', '/', self.index)
connect('old', '/old', self.old)
connect('get', '/get/{what}/{id}', self.get,
conditions=dict(method=["GET", "HEAD"]),
android_workaround=True)
connect('static', '/static/{name:.*?}', self.static,
conditions=dict(method=["GET", "HEAD"]))
connect('favicon', '/favicon.png', self.favicon,
conditions=dict(method=["GET", "HEAD"]))
# Utility methods {{{
def last_modified(self, updated):
'''
Generates a locale independent, english timestamp from a datetime
object
'''
updated = as_utc(updated)
lm = updated.strftime('day, %d month %Y %H:%M:%S GMT')
day ={0:'Sun', 1:'Mon', 2:'Tue', 3:'Wed', 4:'Thu', 5:'Fri', 6:'Sat'}
lm = lm.replace('day', day[int(updated.strftime('%w'))])
month = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul',
8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
return lm.replace('month', month[updated.month])
def sort(self, items, field, order):
field = self.db.data.sanitize_sort_field_name(field)
if field not in self.db.field_metadata.sortable_field_keys():
raise cherrypy.HTTPError(400, '%s is not a valid sort field'%field)
keyg = CSSortKeyGenerator([(field, order)], self.db.field_metadata,
self.db.prefs)
items.sort(key=keyg, reverse=not order)
# }}}
def get(self, what, id):
'Serves files, covers, thumbnails, metadata from the calibre database'
try:
id = int(id)
except ValueError:
id = id.rpartition('.')[0].rpartition('_')[-1]
match = re.search(r'\d+', id)
if not match:
raise cherrypy.HTTPError(404, 'id:%s not an integer'%id)
id = int(match.group())
if not self.db.has_id(id):
raise cherrypy.HTTPError(404, 'id:%d does not exist in database'%id)
if what == 'thumb' or what.startswith('thumb_'):
try:
width, height = map(int, what.split('_')[1:])
except:
width, height = 60, 80
return self.get_cover(id, thumbnail=True, thumb_width=width,
thumb_height=height)
if what == 'cover':
return self.get_cover(id)
if what == 'opf':
return self.get_metadata_as_opf(id)
if what == 'json':
raise cherrypy.InternalRedirect('/ajax/book/%d'%id)
return self.get_format(id, what)
def static(self, name):
'Serves static content'
name = name.lower()
fname = posixpath.basename(name)
try:
cherrypy.response.headers['Content-Type'] = {
'js' : 'text/javascript',
'css' : 'text/css',
'png' : 'image/png',
'gif' : 'image/gif',
'html' : 'text/html',
}[fname.rpartition('.')[-1].lower()]
except KeyError:
raise cherrypy.HTTPError(404, '%r not a valid resource type'%name)
cherrypy.response.headers['Last-Modified'] = self.last_modified(self.build_time)
basedir = os.path.abspath(P('content_server'))
path = os.path.join(basedir, name.replace('/', os.sep))
path = os.path.abspath(path)
if not path.startswith(basedir):
raise cherrypy.HTTPError(403, 'Access to %s is forbidden'%name)
if not os.path.exists(path) or not os.path.isfile(path):
raise cherrypy.HTTPError(404, '%s not found'%name)
if self.opts.develop:
lm = fromtimestamp(os.stat(path).st_mtime)
cherrypy.response.headers['Last-Modified'] = self.last_modified(lm)
with open(path, 'rb') as f:
ans = f.read()
if path.endswith('.css'):
ans = ans.replace('/static/', self.opts.url_prefix + '/static/')
return ans
def favicon(self):
data = I('lt.png', data=True)
cherrypy.response.headers['Content-Type'] = 'image/png'
cherrypy.response.headers['Last-Modified'] = self.last_modified(
self.build_time)
return data
def index(self, **kwargs):
'The / URL'
ua = cherrypy.request.headers.get('User-Agent', '').strip()
want_opds = \
cherrypy.request.headers.get('Stanza-Device-Name', 919) != 919 or \
cherrypy.request.headers.get('Want-OPDS-Catalog', 919) != 919 or \
ua.startswith('Stanza')
want_mobile = self.is_mobile_browser(ua)
if self.opts.develop and not want_mobile:
cherrypy.log('User agent: '+ua)
if want_opds:
return self.opds(version=0)
if want_mobile:
return self.mobile()
return self.browse_catalog()
def old(self, **kwargs):
return self.static('index.html').replace('{prefix}',
self.opts.url_prefix)
# Actually get content from the database {{{
def get_cover(self, id, thumbnail=False, thumb_width=60, thumb_height=80):
try:
cherrypy.response.headers['Content-Type'] = 'image/jpeg'
cherrypy.response.timeout = 3600
cover = self.db.cover(id, index_is_id=True)
if cover is None:
cover = self.default_cover
updated = self.build_time
else:
updated = self.db.cover_last_modified(id, index_is_id=True)
cherrypy.response.headers['Last-Modified'] = self.last_modified(updated)
if thumbnail:
quality = tweaks['content_server_thumbnail_compression_quality']
if quality < 50:
quality = 50
elif quality > 99:
quality = 99
return generate_thumbnail(cover, width=thumb_width,
height=thumb_height, compression_quality=quality)[-1]
img = Image()
img.load(cover)
width, height = img.size
scaled, width, height = fit_image(width, height,
thumb_width if thumbnail else self.max_cover_width,
thumb_height if thumbnail else self.max_cover_height)
if not scaled:
return cover
return save_cover_data_to(img, 'img.jpg', return_data=True,
resize_to=(width, height))
except Exception as err:
import traceback
cherrypy.log.error('Failed to generate cover:')
cherrypy.log.error(traceback.print_exc())
raise cherrypy.HTTPError(404, 'Failed to generate cover: %r'%err)
def get_metadata_as_opf(self, id_):
cherrypy.response.headers['Content-Type'] = \
'application/oebps-package+xml; charset=UTF-8'
mi = self.db.get_metadata(id_, index_is_id=True)
data = metadata_to_opf(mi)
cherrypy.response.timeout = 3600
cherrypy.response.headers['Last-Modified'] = \
self.last_modified(mi.last_modified)
return data
def get_format(self, id, format):
format = format.upper()
fm = self.db.format_metadata(id, format, allow_cache=False)
if not fm:
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
update_metadata = format in {'MOBI', 'EPUB', 'AZW3'}
mi = newmi = self.db.get_metadata(
id, index_is_id=True, cover_as_data=True, get_cover=update_metadata)
cherrypy.response.headers['Last-Modified'] = \
self.last_modified(max(fm['mtime'], mi.last_modified))
fmt = self.db.format(id, format, index_is_id=True, as_file=True,
mode='rb')
if fmt is None:
raise cherrypy.HTTPError(404, 'book: %d does not have format: %s'%(id, format))
mt = guess_type('dummy.'+format.lower())[0]
if mt is None:
mt = 'application/octet-stream'
cherrypy.response.headers['Content-Type'] = mt
if format.lower() in plugboard_content_server_formats:
# Get any plugboards for the content server
plugboards = self.db.prefs.get('plugboards', {})
cpb = find_plugboard(plugboard_content_server_value,
format.lower(), plugboards)
if cpb:
# Transform the metadata via the plugboard
newmi = mi.deepcopy_metadata()
newmi.template_to_attribute(mi, cpb)
if update_metadata:
# Write the updated file
from calibre.ebooks.metadata.meta import set_metadata
set_metadata(fmt, newmi, format.lower())
fmt.seek(0)
fmt.seek(0, 2)
cherrypy.response.headers['Content-Length'] = fmt.tell()
fmt.seek(0)
au = authors_to_string(newmi.authors if newmi.authors else
[_('Unknown')])
title = newmi.title if newmi.title else _('Unknown')
fname = u'%s - %s_%s.%s'%(title[:30], au[:30], id, format.lower())
fname = ascii_filename(fname).replace('"', '_')
cherrypy.response.headers['Content-Disposition'] = \
b'attachment; filename="%s"'%fname
cherrypy.response.body = fmt
cherrypy.response.timeout = 3600
return fmt
# }}}
|
gpl-3.0
| 5,456,641,144,515,611,000 | -809,554,208,406,538,000 | 39.29588 | 91 | 0.570406 | false |
TomBaxter/osf.io
|
osf/migrations/0024_migrate_subject_parents_to_parent.py
|
28
|
3542
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.db import connection, migrations, models
from osf.models.validators import validate_subject_hierarchy_length
logger = logging.getLogger(__name__)
def add_custom_mapping_constraint(state, schema):
sql = """
ALTER TABLE osf_subject
ADD CONSTRAINT customs_must_be_mapped
CHECK (bepress_subject_id IS NOT NULL OR provider_id = %s);
"""
try:
osf_id = state.get_model('osf', 'preprintprovider').objects.get(_id='osf').id
except models.ObjectDoesNotExist:
# Allow test / local dev DBs to pass
logger.warn('Unable to create contraint - assuming test environment.')
pass
else:
with connection.cursor() as cursor:
cursor.execute(sql, [osf_id])
def remove_custom_mapping_constraint(*args):
sql = """
ALTER TABLE osf_subject
DROP CONSTRAINT IF EXISTS customs_must_be_mapped RESTRICT;
"""
with connection.cursor() as cursor:
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('osf', '0023_merge_20170503_1947'),
]
operations = [
migrations.AddField(
model_name='subject',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.SET_NULL, related_name='children', to='osf.Subject', validators=[validate_subject_hierarchy_length]),
),
migrations.AddField(
model_name='subject',
name='provider',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.SET_NULL, to='osf.PreprintProvider', related_name='subjects')
),
migrations.AddField(
model_name='subject',
name='bepress_subject',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.CASCADE, to='osf.Subject', related_name='aliases')
),
migrations.RunSQL(
["""
UPDATE osf_subject
SET provider_id = (SELECT id FROM osf_preprintprovider WHERE _id = 'osf');
"""], ["""
UPDATE osf_subject
SET provider_id = NULL;
"""]
),
migrations.RunSQL(
["""
UPDATE osf_subject
SET parent_id=subquery.to_subject_id
FROM (SELECT from_subject_id, to_subject_id
FROM osf_subject_parents) AS subquery
WHERE osf_subject.id=subquery.from_subject_id;
"""], ["""
INSERT INTO osf_subject_parents (from_subject_id, to_subject_id)
SELECT id, parent_id FROM osf_subject
WHERE parent_id IS NOT NULL;
"""]
),
migrations.RunPython(
add_custom_mapping_constraint, remove_custom_mapping_constraint
),
migrations.RemoveField(
model_name='subject',
name='parents'
),
migrations.AlterField(
model_name='subject',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=models.deletion.SET_NULL, related_name='children', to='osf.Subject', validators=[validate_subject_hierarchy_length]),
),
migrations.AlterField(
model_name='subject',
name='provider',
field=models.ForeignKey(blank=False, null=False, on_delete=models.deletion.CASCADE, to='osf.PreprintProvider', related_name='subjects')
),
]
|
apache-2.0
| -1,417,731,455,290,295,300 | -8,174,466,014,094,112,000 | 35.895833 | 186 | 0.596273 | false |
iuliat/nova
|
nova/tests/unit/scheduler/filters/test_retry_filters.py
|
68
|
1929
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.scheduler.filters import retry_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestRetryFilter(test.NoDBTestCase):
def setUp(self):
super(TestRetryFilter, self).setUp()
self.filt_cls = retry_filter.RetryFilter()
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
# Node not previously tried.
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
hosts=[['host1', 'node1'], # same host, different node
['host2', 'node2'], # different host and node
])
filter_properties = dict(retry=retry)
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
# Node was already tried.
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
hosts=[['host1', 'node1']])
filter_properties = dict(retry=retry)
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
apache-2.0
| 8,557,649,019,638,903,000 | -7,757,418,900,083,467,000 | 40.934783 | 78 | 0.645931 | false |
andjimrio/LTN
|
Application/service/item_services.py
|
1
|
5173
|
from collections import Counter
from django.utils import timezone
from django.shortcuts import get_object_or_404
from Application.models import Item, UserProfile, Section, Status
from Application.utilities.python_utilities import floor_log
from Application.service.profile_services import get_profile, get_keywords_by_user
from Application.service.section_services import get_sections_by_user
def create_item(**dict_item):
return Item.objects.get_or_create(**dict_item)
def get_item(item_id):
return get_object_or_404(Item, pk=item_id)
def exists_item_by_link(link):
return Item.objects.filter(link=link).exists()
def get_status_by_user_item(user_id, item_id):
return Status.objects.get_or_create(user_id=get_profile(user_id).id, item_id=item_id)
def get_last_items_by_user(user_id, unview=False):
if unview:
return UserProfile.objects.get(user__id=user_id).sections.all()\
.values('feeds__id', 'feeds__title', 'feeds__items__id', 'feeds__items__title',
'feeds__items__description', 'feeds__items__pubDate', 'feeds__items__image')\
.order_by('-feeds__items__pubDate')
else:
return UserProfile.objects.get(user__id=user_id).statuses.all().\
filter(view=False).\
values('item__feed_id', 'item__feed__title', 'item_id', 'item__title',
'item__description', 'item__pubDate', 'item__image').\
order_by('-item__pubDate')
def get_item_today_by_section(section_id, days=0, hours=0):
end_date = timezone.now()
start_date = end_date - timezone.timedelta(days=days, hours=hours)
return Section.objects.filter(id=section_id).filter(feeds__items__pubDate__range=[start_date, end_date])\
.values('feeds__items__id', 'feeds__items__title')
def get_item_similarity(item_id, limit, user_id):
more_results = Item.objects.get_more_like_this('article', item_id, limit). \
filter(statuses__user__user_id=user_id)\
.order_by('-pubDate')
return more_results
def get_item_query(query, profile_id):
results = Item.objects.filter(keywords__term__contains=query) \
.filter(feed__sections__user_id=profile_id)\
.order_by('-pubDate')
return results
def query_multifield_dict(dict_query, profile_id):
results = Item.objects.query_multifield_dict(dict_query) \
.filter(feed__sections__user_id=profile_id)\
.order_by('-pubDate')
return results
def stats_items(queryset):
stats = [x.pubDate.strftime("%m/%Y") for x in queryset]
return dict(Counter(stats))
def get_item_recommend(profile_id):
results = Item.objects.filter(feed__sections__user_id=profile_id)\
.exclude(statuses__view=True)\
.filter(keywords__in=get_keywords_by_user(profile_id))\
.order_by('-pubDate')
return results
def get_item_saved(user_id):
return Item.objects.filter(statuses__user__user_id=user_id)\
.filter(statuses__saves=True)\
.order_by('-pubDate')
def get_summary(user_id):
summary_keywords = dict()
for section in get_sections_by_user(user_id):
section_summary_keywords = SectionSummaryKeywords(section.title)
for item in get_item_today_by_section(section.id, days=1):
keywords = get_item(item['feeds__items__id']).keywords.all()
if len(keywords) > 0:
section_summary_keywords.add_keyword(keywords, item['feeds__items__id'], item['feeds__items__title'])
summary_keywords[section.title] = section_summary_keywords.most_common()
return summary_keywords
class SectionSummaryKeywords:
def __init__(self, section_title):
self.section = section_title
self.keywords_counters = dict()
self.counts_counters = Counter()
def add_keyword(self, keywords, item_id, item_title):
exists = False
keyword = keywords[0]
for key in keywords:
if key in self.keywords_counters:
exists = True
keyword = key
break
if exists:
self.keywords_counters[keyword].update(item_id, item_title)
else:
keyword_counter = KeywordCounter(keyword, item_id, item_title)
self.keywords_counters[keyword] = keyword_counter
self.counts_counters[keyword] += 1
def most_common(self, number=None):
if not number and self.counts_counters:
number = floor_log(len(self.counts_counters))
else:
number = 0
return [self.keywords_counters[keyword[0]] for keyword in self.counts_counters.most_common(number)]
def __str__(self):
return "SSK: {} - {}".format(self.section, len(self.keywords_counters))
class KeywordCounter:
def __init__(self, keyword, item_id, item_title):
self.keyword = keyword
self.counts = 1
self.sample_title = item_title
self.items = dict()
self.items[item_id] = item_title
def update(self, item_id, item_title):
self.counts += 1
self.items[item_id] = item_title
def __str__(self):
return "KC: {} - {}".format(self.keyword, self.counts)
|
mit
| -6,827,074,556,194,668,000 | -4,072,608,520,598,983,700 | 33.039474 | 117 | 0.641794 | false |
bykof/billomapy
|
billomapy/resources.py
|
1
|
4081
|
"""
KEY and DATA_KEYS FOR THE API
"""
PROPERTY_VALUES = '-property-values'
PROPERTY_VALUE = '-property-value'
TAGS = '-tags'
TAG = '-tag'
ITEMS = '-items'
ITEM = '-item'
COMMENTS = '-comments'
COMMENT = '-comment'
PAYMENTS = '-payments'
PAYMENT = '-payment'
EMAIL_RECEIVERS = '-email-receivers'
EMAIL_RECEIVER = '-email-receiver'
DOCUMENTS = '-documents'
DOCUMENT = '-document'
CLIENTS = 'clients'
CLIENT = 'client'
CLIENT_PROPERTIES = CLIENT + PROPERTY_VALUES
CLIENT_PROPERTY = CLIENT + PROPERTY_VALUE
CLIENT_TAGS = CLIENT + TAGS
CLIENT_TAG = CLIENT + TAG
CONTACTS = 'contacts'
CONTACT = 'contact'
SUPPLIERS = 'suppliers'
SUPPLIER = 'supplier'
SUPPLIER_PROPERTIES = SUPPLIER + PROPERTY_VALUES
SUPPLIER_PROPERTY = SUPPLIER + PROPERTY_VALUE
SUPPLIER_TAGS = SUPPLIER + TAGS
SUPPLIER_TAG = SUPPLIER + TAG
ARTICLES = 'articles'
ARTICLE = 'article'
ARTICLE_PROPERTIES = ARTICLE + PROPERTY_VALUES
ARTICLE_PROPERTY = ARTICLE + PROPERTY_VALUE
ARTICLE_TAGS = ARTICLE + TAGS
ARTICLE_TAG = ARTICLE + TAG
UNITS = 'units'
UNIT = 'unit'
INVOICES = 'invoices'
INVOICE = 'invoice'
INVOICE_ITEMS = INVOICE + ITEMS
INVOICE_ITEM = INVOICE + ITEM
INVOICE_COMMENTS = INVOICE + COMMENTS
INVOICE_COMMENT = INVOICE + COMMENT
INVOICE_PAYMENTS = INVOICE + PAYMENTS
INVOICE_PAYMENT = INVOICE + PAYMENT
INVOICE_TAGS = INVOICE + TAGS
INVOICE_TAG = INVOICE + TAG
RECURRINGS = 'recurrings'
RECURRING = 'recurring'
RECURRING_ITEMS = RECURRING + ITEMS
RECURRING_ITEM = RECURRING + ITEM
RECURRING_TAGS = RECURRING + TAGS
RECURRING_TAG = RECURRING + TAG
RECURRING_EMAIL_RECEIVERS = RECURRING + EMAIL_RECEIVERS
RECURRING_EMAIL_RECEIVER = RECURRING + EMAIL_RECEIVER
INCOMINGS = 'incomings'
INCOMING = 'incoming'
INCOMING_COMMENTS = INCOMING + COMMENTS
INCOMING_COMMENT = INCOMING + COMMENT
INCOMING_PAYMENTS = INCOMING + PAYMENTS
INCOMING_PAYMENT = INCOMING + PAYMENT
INCOMING_PROPERTIES = INCOMING + PROPERTY_VALUES
INCOMING_PROPERTY = INCOMING + PROPERTY_VALUE
INCOMING_TAGS = INCOMING + TAGS
INCOMING_TAG = INCOMING + TAG
INBOX = 'inbox'
INBOX_DOCUMENTS = INBOX + DOCUMENTS
INBOX_DOCUMENT = INBOX + DOCUMENT
OFFERS = 'offers'
OFFER = 'offer'
OFFER_ITEMS = OFFER + ITEMS
OFFER_ITEM = OFFER + ITEM
OFFER_COMMENTS = OFFER + COMMENTS
OFFER_COMMENT = OFFER + COMMENT
OFFER_TAGS = OFFER + TAGS
OFFER_TAG = OFFER + TAG
CREDIT_NOTES = 'credit-notes'
CREDIT_NOTE = 'credit-note'
CREDIT_NOTE_ITEMS = CREDIT_NOTE + ITEMS
CREDIT_NOTE_ITEM = CREDIT_NOTE + ITEM
CREDIT_NOTE_COMMENTS = CREDIT_NOTE + COMMENTS
CREDIT_NOTE_COMMENT = CREDIT_NOTE + COMMENT
CREDIT_NOTE_PAYMENTS = CREDIT_NOTE + PAYMENTS
CREDIT_NOTE_PAYMENT = CREDIT_NOTE + PAYMENT
CREDIT_NOTE_TAGS = CREDIT_NOTE + TAGS
CREDIT_NOTE_TAG = CREDIT_NOTE + TAG
CONFIRMATIONS = 'confirmations'
CONFIRMATION = 'confirmation'
CONFIRMATION_ITEMS = CONFIRMATION + ITEMS
CONFIRMATION_ITEM = CONFIRMATION + ITEM
CONFIRMATION_COMMENTS = CONFIRMATION + COMMENTS
CONFIRMATION_COMMENT = CONFIRMATION + COMMENT
CONFIRMATION_TAGS = CONFIRMATION + TAGS
CONFIRMATION_TAG = CONFIRMATION + TAG
REMINDERS = 'reminders'
REMINDER = 'reminder'
REMINDER_ITEMS = REMINDER + ITEMS
REMINDER_ITEM = REMINDER + ITEM
REMINDER_TAGS = REMINDER + TAGS
REMINDER_TAG = REMINDER + TAG
DELIVERY_NOTES = 'delivery-notes'
DELIVERY_NOTE = 'delivery-note'
DELIVERY_NOTE_ITEMS = DELIVERY_NOTE + ITEMS
DELIVERY_NOTE_ITEM = DELIVERY_NOTE + ITEM
DELIVERY_NOTE_COMMENTS = DELIVERY_NOTE + COMMENTS
DELIVERY_NOTE_COMMENT = DELIVERY_NOTE + COMMENT
DELIVERY_NOTE_TAGS = DELIVERY_NOTE + TAGS
DELIVERY_NOTE_TAG = DELIVERY_NOTE + TAG
LETTERS = 'letters'
LETTER = 'letter'
LETTER_COMMENTS = LETTER + COMMENTS
LETTER_COMMENT = LETTER + COMMENT
LETTER_TAGS = LETTER + TAGS
LETTER_TAG = LETTER + TAG
TEMPLATES = 'templates'
TEMPLATE = 'template'
EMAIL_TEMPLATES = 'email-templates'
EMAIL_TEMPLATE = 'email-template'
USER = 'users'
USERS = 'users'
"""
COMMANDS for the API
"""
COMPLETE = 'complete'
PDF = 'pdf'
UPLOAD_SIGNATURE = 'upload-signature'
EMAIL = 'email'
CANCEL = 'cancel'
UNCANCEL = 'uncancel'
WIN = 'win'
LOSE = 'lose'
CLEAR = 'clear'
UNCLEAR = 'unclear'
|
apache-2.0
| -9,127,605,369,658,511,000 | -5,927,685,901,486,775,000 | 19.611111 | 55 | 0.735359 | false |
BjerknesClimateDataCentre/QuinCe
|
external_scripts/NRT/salinity_data/prepare_salinity.py
|
2
|
2681
|
# Prepare a cut-down version of the World Ocean Atlas 2018 salinity
# data to use with the AddSalinityPreprocessor.
# Input files are 0.25° seasonal files for the years 2005-2017,
# available from https://www.nodc.noaa.gov/cgi-bin/OC5/woa18/woa18.pl
# Files are:
#
# woa18_A5B7_s13_04.nc - Winter (DJF) = Season 1
# woa18_A5B7_s14_04.nc - Spring (MAM) = Season 2
# woa18_A5B7_s15_04.nc - Summer (JJA) = Season 3
# woa18_A5B7_s16_04.nc - Winter (SON) = Season 4
#
#
# Output is a single netCDF file, containing the surface data for the full grid
# and four time steps.
import os
import netCDF4
WINTER_FILE = "woa18_A5B7_s13_04.nc"
SPRING_FILE = "woa18_A5B7_s14_04.nc"
SUMMER_FILE = "woa18_A5B7_s15_04.nc"
AUTUMN_FILE = "woa18_A5B7_s16_04.nc"
IN_VAR = "s_an"
OUTPUT_FILE = "woa18_seasonal_surface_salinity.nc"
def main():
if not init_check():
print("Initialisation check failed.")
exit()
init_output_file()
add_season(WINTER_FILE, 0)
add_season(SPRING_FILE, 1)
add_season(SUMMER_FILE, 2)
add_season(AUTUMN_FILE, 3)
# Initialisation check
def init_check():
check_result = True
if not file_exists(WINTER_FILE):
check_result = False
if not file_exists(SPRING_FILE):
check_result = False
if not file_exists(SUMMER_FILE):
check_result = False
if not file_exists(SPRING_FILE):
check_result = False
return check_result
# See if a file exists
def file_exists(file):
exists = True
if not os.path.isfile(file):
print("Missing file %s" % file)
exists = False
return exists
def init_output_file():
# Get spatial dimensions from input file
nc = netCDF4.Dataset(WINTER_FILE, mode="r")
lons = nc.variables["lon"][:]
lats = nc.variables["lat"][:]
nc.close()
nc = netCDF4.Dataset(OUTPUT_FILE, format="NETCDF4_CLASSIC", mode="w")
londim = nc.createDimension("lon", len(lons))
lonvar = nc.createVariable("lon", "f", ("lon"), fill_value=-999)
lonvar.units = "degrees_east"
latdim = nc.createDimension("lat", len(lats))
latvar = nc.createVariable("lat", "f", ("lat"), fill_value=-999)
latvar.units = "degrees_north"
timedim = nc.createDimension("time", 4)
timevar = nc.createVariable("time", "i", ("time"), fill_value = -1)
timevar.units = "season"
timevar.long_name = "season"
salvar = nc.createVariable("salinity", "d", ("time", "lat", "lon"), fill_value=-999)
lonvar[:] = lons
latvar[:] = lats
timevar[:] = [1,2,3,4]
nc.close()
def add_season(season_file, season):
nc = netCDF4.Dataset(season_file, mode="r")
values = nc.variables[IN_VAR][0,0,:,:]
nc.close()
nc = netCDF4.Dataset(OUTPUT_FILE, mode="a")
nc.variables["salinity"][season,:,:] = values
nc.close()
if __name__ == '__main__':
main()
|
gpl-3.0
| -140,755,896,723,591,780 | 957,466,447,414,527,500 | 24.283019 | 86 | 0.674627 | false |
balloob/home-assistant
|
homeassistant/components/venstar/climate.py
|
16
|
11240
|
"""Support for Venstar WiFi Thermostats."""
import logging
from venstarcolortouch import VenstarColorTouch
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_AUTO,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
CONF_PASSWORD,
CONF_PIN,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
PRECISION_HALVES,
STATE_ON,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_STATE = "fan_state"
ATTR_HVAC_STATE = "hvac_mode"
CONF_HUMIDIFIER = "humidifier"
DEFAULT_SSL = False
VALID_FAN_STATES = [STATE_ON, HVAC_MODE_AUTO]
VALID_THERMOSTAT_MODES = [HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_OFF, HVAC_MODE_AUTO]
HOLD_MODE_OFF = "off"
HOLD_MODE_TEMPERATURE = "temperature"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HUMIDIFIER, default=True): cv.boolean,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=5): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PIN): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Venstar thermostat."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
pin = config.get(CONF_PIN)
host = config.get(CONF_HOST)
timeout = config.get(CONF_TIMEOUT)
humidifier = config.get(CONF_HUMIDIFIER)
protocol = "https" if config[CONF_SSL] else "http"
client = VenstarColorTouch(
addr=host,
timeout=timeout,
user=username,
password=password,
pin=pin,
proto=protocol,
)
add_entities([VenstarThermostat(client, humidifier)], True)
class VenstarThermostat(ClimateEntity):
"""Representation of a Venstar thermostat."""
def __init__(self, client, humidifier):
"""Initialize the thermostat."""
self._client = client
self._humidifier = humidifier
self._mode_map = {
HVAC_MODE_HEAT: self._client.MODE_HEAT,
HVAC_MODE_COOL: self._client.MODE_COOL,
HVAC_MODE_AUTO: self._client.MODE_AUTO,
}
def update(self):
"""Update the data from the thermostat."""
info_success = self._client.update_info()
sensor_success = self._client.update_sensors()
if not info_success or not sensor_success:
_LOGGER.error("Failed to update data")
@property
def supported_features(self):
"""Return the list of supported features."""
features = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_PRESET_MODE
if self._client.mode == self._client.MODE_AUTO:
features |= SUPPORT_TARGET_TEMPERATURE_RANGE
if self._humidifier and hasattr(self._client, "hum_active"):
features |= SUPPORT_TARGET_HUMIDITY
return features
@property
def name(self):
"""Return the name of the thermostat."""
return self._client.name
@property
def precision(self):
"""Return the precision of the system.
Venstar temperature values are passed back and forth in the
API in C or F, with half-degree accuracy.
"""
return PRECISION_HALVES
@property
def temperature_unit(self):
"""Return the unit of measurement, as defined by the API."""
if self._client.tempunits == self._client.TEMPUNITS_F:
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return VALID_FAN_STATES
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return VALID_THERMOSTAT_MODES
@property
def current_temperature(self):
"""Return the current temperature."""
return self._client.get_indoor_temp()
@property
def current_humidity(self):
"""Return the current humidity."""
return self._client.get_indoor_humidity()
@property
def hvac_mode(self):
"""Return current operation mode ie. heat, cool, auto."""
if self._client.mode == self._client.MODE_HEAT:
return HVAC_MODE_HEAT
if self._client.mode == self._client.MODE_COOL:
return HVAC_MODE_COOL
if self._client.mode == self._client.MODE_AUTO:
return HVAC_MODE_AUTO
return HVAC_MODE_OFF
@property
def hvac_action(self):
"""Return current operation mode ie. heat, cool, auto."""
if self._client.state == self._client.STATE_IDLE:
return CURRENT_HVAC_IDLE
if self._client.state == self._client.STATE_HEATING:
return CURRENT_HVAC_HEAT
if self._client.state == self._client.STATE_COOLING:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_OFF
@property
def fan_mode(self):
"""Return the current fan mode."""
if self._client.fan == self._client.FAN_ON:
return FAN_ON
return FAN_AUTO
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
return {
ATTR_FAN_STATE: self._client.fanstate,
ATTR_HVAC_STATE: self._client.state,
}
@property
def target_temperature(self):
"""Return the target temperature we try to reach."""
if self._client.mode == self._client.MODE_HEAT:
return self._client.heattemp
if self._client.mode == self._client.MODE_COOL:
return self._client.cooltemp
return None
@property
def target_temperature_low(self):
"""Return the lower bound temp if auto mode is on."""
if self._client.mode == self._client.MODE_AUTO:
return self._client.heattemp
return None
@property
def target_temperature_high(self):
"""Return the upper bound temp if auto mode is on."""
if self._client.mode == self._client.MODE_AUTO:
return self._client.cooltemp
return None
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return self._client.hum_setpoint
@property
def min_humidity(self):
"""Return the minimum humidity. Hardcoded to 0 in API."""
return 0
@property
def max_humidity(self):
"""Return the maximum humidity. Hardcoded to 60 in API."""
return 60
@property
def preset_mode(self):
"""Return current preset."""
if self._client.away:
return PRESET_AWAY
if self._client.schedule == 0:
return HOLD_MODE_TEMPERATURE
return PRESET_NONE
@property
def preset_modes(self):
"""Return valid preset modes."""
return [PRESET_NONE, PRESET_AWAY, HOLD_MODE_TEMPERATURE]
def _set_operation_mode(self, operation_mode):
"""Change the operation mode (internal)."""
if operation_mode == HVAC_MODE_HEAT:
success = self._client.set_mode(self._client.MODE_HEAT)
elif operation_mode == HVAC_MODE_COOL:
success = self._client.set_mode(self._client.MODE_COOL)
elif operation_mode == HVAC_MODE_AUTO:
success = self._client.set_mode(self._client.MODE_AUTO)
else:
success = self._client.set_mode(self._client.MODE_OFF)
if not success:
_LOGGER.error("Failed to change the operation mode")
return success
def set_temperature(self, **kwargs):
"""Set a new target temperature."""
set_temp = True
operation_mode = kwargs.get(ATTR_HVAC_MODE)
temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temperature = kwargs.get(ATTR_TEMPERATURE)
if operation_mode and self._mode_map.get(operation_mode) != self._client.mode:
set_temp = self._set_operation_mode(operation_mode)
if set_temp:
if (
self._mode_map.get(operation_mode, self._client.mode)
== self._client.MODE_HEAT
):
success = self._client.set_setpoints(temperature, self._client.cooltemp)
elif (
self._mode_map.get(operation_mode, self._client.mode)
== self._client.MODE_COOL
):
success = self._client.set_setpoints(self._client.heattemp, temperature)
elif (
self._mode_map.get(operation_mode, self._client.mode)
== self._client.MODE_AUTO
):
success = self._client.set_setpoints(temp_low, temp_high)
else:
success = False
_LOGGER.error(
"The thermostat is currently not in a mode "
"that supports target temperature: %s",
operation_mode,
)
if not success:
_LOGGER.error("Failed to change the temperature")
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
if fan_mode == STATE_ON:
success = self._client.set_fan(self._client.FAN_ON)
else:
success = self._client.set_fan(self._client.FAN_AUTO)
if not success:
_LOGGER.error("Failed to change the fan mode")
def set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
self._set_operation_mode(hvac_mode)
def set_humidity(self, humidity):
"""Set new target humidity."""
success = self._client.set_hum_setpoint(humidity)
if not success:
_LOGGER.error("Failed to change the target humidity level")
def set_preset_mode(self, preset_mode):
"""Set the hold mode."""
if preset_mode == PRESET_AWAY:
success = self._client.set_away(self._client.AWAY_AWAY)
elif preset_mode == HOLD_MODE_TEMPERATURE:
success = self._client.set_away(self._client.AWAY_HOME)
success = success and self._client.set_schedule(0)
elif preset_mode == PRESET_NONE:
success = self._client.set_away(self._client.AWAY_HOME)
success = success and self._client.set_schedule(1)
else:
_LOGGER.error("Unknown hold mode: %s", preset_mode)
success = False
if not success:
_LOGGER.error("Failed to change the schedule/hold state")
|
apache-2.0
| -430,798,134,445,319,800 | 7,320,322,056,327,429,000 | 31.022792 | 88 | 0.606228 | false |
yamila-moreno/nikola
|
nikola/data/themes/base/messages/messages_da.py
|
6
|
1441
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "%d min. tilbage at læse",
"(active)": "",
"Also available in:": "Fås også i:",
"Archive": "Arkiv",
"Categories": "Kategorier",
"Comments": "Kommentarer",
"LANGUAGE": "Dansk",
"Languages:": "Sprog:",
"More posts about %s": "Yderligere indlæg om %s",
"Newer posts": "Nyere indlæg",
"Next post": "Næste indlæg",
"No posts found.": "Søgningen gav ingen resultater.",
"Nothing found.": "Søgningen gav ingen resultater.",
"Older posts": "Ældre indlæg",
"Original site": "Oprindeligt hjemmeside",
"Posted:": "Opslået:",
"Posts about %s": "Indlæg om %s",
"Posts for year %s": "Indlæg for %s",
"Posts for {month} {day}, {year}": "Indlæs for {month} {day}, {year}",
"Posts for {month} {year}": "Indlæg for {month} {year}",
"Previous post": "Tidligere indlæg",
"Publication date": "Udgivelsesdato",
"RSS feed": "RSS-nyhedskilde",
"Read in English": "Læs på dansk",
"Read more": "Læs mere",
"Skip to main content": "Hop direkte til hovedindhold",
"Source": "Kilde",
"Subcategories:": "",
"Tags and Categories": "Nøgleord og kategorier",
"Tags": "Nøgleord",
"Write your page here.": "",
"Write your post here.": "",
"old posts, page %d": "gamle indlæg, side %d",
"page %d": "side %d",
}
|
mit
| -7,012,272,974,438,922,000 | -6,980,059,405,190,528,000 | 35.358974 | 74 | 0.588858 | false |
mhvk/numpy
|
numpy/core/tests/test_memmap.py
|
5
|
7469
|
import sys
import os
import mmap
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryFile
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
from numpy import arange, allclose, asarray
from numpy.testing import (
assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
break_cycles
)
class TestMemmap:
def setup(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3, 4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def teardown(self):
self.tmpfp.close()
self.data = None
if IS_PYPY:
break_cycles()
break_cycles()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self, tmp_path):
tmpname = tmp_path / 'mmap'
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
def test_unnamed_file(self):
with TemporaryFile() as f:
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
assert_equal(offset, fp.offset)
assert_equal(mode, fp.mode)
del fp
def test_filename(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = Path(os.path.abspath(tmpname))
fp[:] = self.data[:]
assert_equal(abspath, fp.filename)
b = fp[:1]
assert_equal(abspath, b.filename)
del b
del fp
def test_path(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
shape=self.shape)
# os.path.realpath does not resolve symlinks on Windows
# see: https://bugs.python.org/issue9949
# use Path.resolve, just as memmap class does internally
abspath = str(Path(tmpname).resolve())
fp[:] = self.data[:]
assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
assert_equal(fp.filename, self.tmpfp.name)
@pytest.mark.skipif(sys.platform == 'gnu0',
reason="Known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def test_arithmetic_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[(1, 2), (2, 3)]
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert_(fp[:2, :2]._mmap is fp._mmap)
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert_(new1.base is fp)
assert_(new2.base is fp)
new_array = asarray(fp)
assert_(new_array.base is fp)
def test_ufunc_return_ndarray(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
with suppress_warnings() as sup:
sup.filter(FutureWarning, "np.average currently does not preserve")
for unary_op in [sum, average, product]:
result = unary_op(fp)
assert_(isscalar(result))
assert_(result.__class__ is self.data[0, 0].__class__)
assert_(unary_op(fp, axis=0).__class__ is ndarray)
assert_(unary_op(fp, axis=1).__class__ is ndarray)
for binary_op in [add, subtract, multiply]:
assert_(binary_op(fp, self.data).__class__ is ndarray)
assert_(binary_op(self.data, fp).__class__ is ndarray)
assert_(binary_op(fp, fp).__class__ is ndarray)
fp += 1
assert(fp.__class__ is memmap)
add(fp, 1, out=fp)
assert(fp.__class__ is memmap)
def test_getitem(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
assert_(fp[1:, :-1].__class__ is memmap)
# Fancy indexing returns a copy that is not memmapped
assert_(fp[[0, 1]].__class__ is ndarray)
def test_memmap_subclass(self):
class MemmapSubClass(memmap):
pass
fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
# We keep previous behavior for subclasses of memmap, i.e. the
# ufunc and __getitem__ output is never turned into a ndarray
assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
assert_(sum(fp).__class__ is MemmapSubClass)
assert_(fp[1:, :-1].__class__ is MemmapSubClass)
assert(fp[[0, 1]].__class__ is MemmapSubClass)
def test_mmap_offset_greater_than_allocation_granularity(self):
size = 5 * mmap.ALLOCATIONGRANULARITY
offset = mmap.ALLOCATIONGRANULARITY + 1
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
assert_(fp.offset == offset)
def test_no_shape(self):
self.tmpfp.write(b'a'*16)
mm = memmap(self.tmpfp, dtype='float64')
assert_equal(mm.shape, (2,))
def test_empty_array(self):
# gh-12653
with pytest.raises(ValueError, match='empty file'):
memmap(self.tmpfp, shape=(0,4), mode='w+')
self.tmpfp.write(b'\0')
# ok now the file is not empty
memmap(self.tmpfp, shape=(0,4), mode='w+')
|
bsd-3-clause
| -8,131,594,623,377,786,000 | -7,521,276,311,761,904,000 | 33.739535 | 79 | 0.566876 | false |
ychen820/microblog
|
flask/lib/python2.7/site-packages/openid/server/server.py
|
142
|
65667
|
# -*- test-case-name: openid.test.test_server -*-
"""OpenID server protocol and logic.
Overview
========
An OpenID server must perform three tasks:
1. Examine the incoming request to determine its nature and validity.
2. Make a decision about how to respond to this request.
3. Format the response according to the protocol.
The first and last of these tasks may performed by
the L{decodeRequest<Server.decodeRequest>} and
L{encodeResponse<Server.encodeResponse>} methods of the
L{Server} object. Who gets to do the intermediate task -- deciding
how to respond to the request -- will depend on what type of request it
is.
If it's a request to authenticate a user (a X{C{checkid_setup}} or
X{C{checkid_immediate}} request), you need to decide if you will assert
that this user may claim the identity in question. Exactly how you do
that is a matter of application policy, but it generally involves making
sure the user has an account with your system and is logged in, checking
to see if that identity is hers to claim, and verifying with the user that
she does consent to releasing that information to the party making the
request.
Examine the properties of the L{CheckIDRequest} object, optionally
check L{CheckIDRequest.returnToVerified}, and and when you've come
to a decision, form a response by calling L{CheckIDRequest.answer}.
Other types of requests relate to establishing associations between client
and server and verifying the authenticity of previous communications.
L{Server} contains all the logic and data necessary to respond to
such requests; just pass the request to L{Server.handleRequest}.
OpenID Extensions
=================
Do you want to provide other information for your users
in addition to authentication? Version 2.0 of the OpenID
protocol allows consumers to add extensions to their requests.
For example, with sites using the U{Simple Registration
Extension<http://openid.net/specs/openid-simple-registration-extension-1_0.html>},
a user can agree to have their nickname and e-mail address sent to a
site when they sign up.
Since extensions do not change the way OpenID authentication works,
code to handle extension requests may be completely separate from the
L{OpenIDRequest} class here. But you'll likely want data sent back by
your extension to be signed. L{OpenIDResponse} provides methods with
which you can add data to it which can be signed with the other data in
the OpenID signature.
For example::
# when request is a checkid_* request
response = request.answer(True)
# this will a signed 'openid.sreg.timezone' parameter to the response
# as well as a namespace declaration for the openid.sreg namespace
response.fields.setArg('http://openid.net/sreg/1.0', 'timezone', 'America/Los_Angeles')
There are helper modules for a number of extensions, including
L{Attribute Exchange<openid.extensions.ax>},
L{PAPE<openid.extensions.pape>}, and
L{Simple Registration<openid.extensions.sreg>} in the L{openid.extensions}
package.
Stores
======
The OpenID server needs to maintain state between requests in order
to function. Its mechanism for doing this is called a store. The
store interface is defined in C{L{openid.store.interface.OpenIDStore}}.
Additionally, several concrete store implementations are provided, so that
most sites won't need to implement a custom store. For a store backed
by flat files on disk, see C{L{openid.store.filestore.FileOpenIDStore}}.
For stores based on MySQL or SQLite, see the C{L{openid.store.sqlstore}}
module.
Upgrading
=========
From 1.0 to 1.1
---------------
The keys by which a server looks up associations in its store have changed
in version 1.2 of this library. If your store has entries created from
version 1.0 code, you should empty it.
From 1.1 to 2.0
---------------
One of the additions to the OpenID protocol was a specified nonce
format for one-way nonces. As a result, the nonce table in the store
has changed. You'll need to run contrib/upgrade-store-1.1-to-2.0 to
upgrade your store, or you'll encounter errors about the wrong number
of columns in the oid_nonces table.
If you've written your own custom store or code that interacts
directly with it, you'll need to review the change notes in
L{openid.store.interface}.
@group Requests: OpenIDRequest, AssociateRequest, CheckIDRequest,
CheckAuthRequest
@group Responses: OpenIDResponse
@group HTTP Codes: HTTP_OK, HTTP_REDIRECT, HTTP_ERROR
@group Response Encodings: ENCODE_KVFORM, ENCODE_HTML_FORM, ENCODE_URL
"""
import time, warnings
from copy import deepcopy
from openid import cryptutil
from openid import oidutil
from openid import kvform
from openid.dh import DiffieHellman
from openid.store.nonce import mkNonce
from openid.server.trustroot import TrustRoot, verifyReturnTo
from openid.association import Association, default_negotiator, getSecretSize
from openid.message import Message, InvalidOpenIDNamespace, \
OPENID_NS, OPENID2_NS, IDENTIFIER_SELECT, OPENID1_URL_LIMIT
from openid.urinorm import urinorm
HTTP_OK = 200
HTTP_REDIRECT = 302
HTTP_ERROR = 400
BROWSER_REQUEST_MODES = ['checkid_setup', 'checkid_immediate']
ENCODE_KVFORM = ('kvform',)
ENCODE_URL = ('URL/redirect',)
ENCODE_HTML_FORM = ('HTML form',)
UNUSED = None
class OpenIDRequest(object):
"""I represent an incoming OpenID request.
@cvar mode: the C{X{openid.mode}} of this request.
@type mode: str
"""
mode = None
class CheckAuthRequest(OpenIDRequest):
"""A request to verify the validity of a previous response.
@cvar mode: "X{C{check_authentication}}"
@type mode: str
@ivar assoc_handle: The X{association handle} the response was signed with.
@type assoc_handle: str
@ivar signed: The message with the signature which wants checking.
@type signed: L{Message}
@ivar invalidate_handle: An X{association handle} the client is asking
about the validity of. Optional, may be C{None}.
@type invalidate_handle: str
@see: U{OpenID Specs, Mode: check_authentication
<http://openid.net/specs.bml#mode-check_authentication>}
"""
mode = "check_authentication"
required_fields = ["identity", "return_to", "response_nonce"]
def __init__(self, assoc_handle, signed, invalidate_handle=None):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<CheckAuthRequest>} for their descriptions.
@type assoc_handle: str
@type signed: L{Message}
@type invalidate_handle: str
"""
self.assoc_handle = assoc_handle
self.signed = signed
self.invalidate_handle = invalidate_handle
self.namespace = OPENID2_NS
def fromMessage(klass, message, op_endpoint=UNUSED):
"""Construct me from an OpenID Message.
@param message: An OpenID check_authentication Message
@type message: L{openid.message.Message}
@returntype: L{CheckAuthRequest}
"""
self = klass.__new__(klass)
self.message = message
self.namespace = message.getOpenIDNamespace()
self.assoc_handle = message.getArg(OPENID_NS, 'assoc_handle')
self.sig = message.getArg(OPENID_NS, 'sig')
if (self.assoc_handle is None or
self.sig is None):
fmt = "%s request missing required parameter from message %s"
raise ProtocolError(
message, text=fmt % (self.mode, message))
self.invalidate_handle = message.getArg(OPENID_NS, 'invalidate_handle')
self.signed = message.copy()
# openid.mode is currently check_authentication because
# that's the mode of this request. But the signature
# was made on something with a different openid.mode.
# http://article.gmane.org/gmane.comp.web.openid.general/537
if self.signed.hasKey(OPENID_NS, "mode"):
self.signed.setArg(OPENID_NS, "mode", "id_res")
return self
fromMessage = classmethod(fromMessage)
def answer(self, signatory):
"""Respond to this request.
Given a L{Signatory}, I can check the validity of the signature and
the X{C{invalidate_handle}}.
@param signatory: The L{Signatory} to use to check the signature.
@type signatory: L{Signatory}
@returns: A response with an X{C{is_valid}} (and, if
appropriate X{C{invalidate_handle}}) field.
@returntype: L{OpenIDResponse}
"""
is_valid = signatory.verify(self.assoc_handle, self.signed)
# Now invalidate that assoc_handle so it this checkAuth message cannot
# be replayed.
signatory.invalidate(self.assoc_handle, dumb=True)
response = OpenIDResponse(self)
valid_str = (is_valid and "true") or "false"
response.fields.setArg(OPENID_NS, 'is_valid', valid_str)
if self.invalidate_handle:
assoc = signatory.getAssociation(self.invalidate_handle, dumb=False)
if not assoc:
response.fields.setArg(
OPENID_NS, 'invalidate_handle', self.invalidate_handle)
return response
def __str__(self):
if self.invalidate_handle:
ih = " invalidate? %r" % (self.invalidate_handle,)
else:
ih = ""
s = "<%s handle: %r sig: %r: signed: %r%s>" % (
self.__class__.__name__, self.assoc_handle,
self.sig, self.signed, ih)
return s
class PlainTextServerSession(object):
"""An object that knows how to handle association requests with no
session type.
@cvar session_type: The session_type for this association
session. There is no type defined for plain-text in the OpenID
specification, so we use 'no-encryption'.
@type session_type: str
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
@see: AssociateRequest
"""
session_type = 'no-encryption'
allowed_assoc_types = ['HMAC-SHA1', 'HMAC-SHA256']
def fromMessage(cls, unused_request):
return cls()
fromMessage = classmethod(fromMessage)
def answer(self, secret):
return {'mac_key': oidutil.toBase64(secret)}
class DiffieHellmanSHA1ServerSession(object):
"""An object that knows how to handle association requests with the
Diffie-Hellman session type.
@cvar session_type: The session_type for this association
session.
@type session_type: str
@ivar dh: The Diffie-Hellman algorithm values for this request
@type dh: DiffieHellman
@ivar consumer_pubkey: The public key sent by the consumer in the
associate request
@type consumer_pubkey: long
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
@see: AssociateRequest
"""
session_type = 'DH-SHA1'
hash_func = staticmethod(cryptutil.sha1)
allowed_assoc_types = ['HMAC-SHA1']
def __init__(self, dh, consumer_pubkey):
self.dh = dh
self.consumer_pubkey = consumer_pubkey
def fromMessage(cls, message):
"""
@param message: The associate request message
@type message: openid.message.Message
@returntype: L{DiffieHellmanSHA1ServerSession}
@raises ProtocolError: When parameters required to establish the
session are missing.
"""
dh_modulus = message.getArg(OPENID_NS, 'dh_modulus')
dh_gen = message.getArg(OPENID_NS, 'dh_gen')
if (dh_modulus is None and dh_gen is not None or
dh_gen is None and dh_modulus is not None):
if dh_modulus is None:
missing = 'modulus'
else:
missing = 'generator'
raise ProtocolError(message,
'If non-default modulus or generator is '
'supplied, both must be supplied. Missing %s'
% (missing,))
if dh_modulus or dh_gen:
dh_modulus = cryptutil.base64ToLong(dh_modulus)
dh_gen = cryptutil.base64ToLong(dh_gen)
dh = DiffieHellman(dh_modulus, dh_gen)
else:
dh = DiffieHellman.fromDefaults()
consumer_pubkey = message.getArg(OPENID_NS, 'dh_consumer_public')
if consumer_pubkey is None:
raise ProtocolError(message, "Public key for DH-SHA1 session "
"not found in message %s" % (message,))
consumer_pubkey = cryptutil.base64ToLong(consumer_pubkey)
return cls(dh, consumer_pubkey)
fromMessage = classmethod(fromMessage)
def answer(self, secret):
mac_key = self.dh.xorSecret(self.consumer_pubkey,
secret,
self.hash_func)
return {
'dh_server_public': cryptutil.longToBase64(self.dh.public),
'enc_mac_key': oidutil.toBase64(mac_key),
}
class DiffieHellmanSHA256ServerSession(DiffieHellmanSHA1ServerSession):
session_type = 'DH-SHA256'
hash_func = staticmethod(cryptutil.sha256)
allowed_assoc_types = ['HMAC-SHA256']
class AssociateRequest(OpenIDRequest):
"""A request to establish an X{association}.
@cvar mode: "X{C{check_authentication}}"
@type mode: str
@ivar assoc_type: The type of association. The protocol currently only
defines one value for this, "X{C{HMAC-SHA1}}".
@type assoc_type: str
@ivar session: An object that knows how to handle association
requests of a certain type.
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
"""
mode = "associate"
session_classes = {
'no-encryption': PlainTextServerSession,
'DH-SHA1': DiffieHellmanSHA1ServerSession,
'DH-SHA256': DiffieHellmanSHA256ServerSession,
}
def __init__(self, session, assoc_type):
"""Construct me.
The session is assigned directly as a class attribute. See my
L{class documentation<AssociateRequest>} for its description.
"""
super(AssociateRequest, self).__init__()
self.session = session
self.assoc_type = assoc_type
self.namespace = OPENID2_NS
def fromMessage(klass, message, op_endpoint=UNUSED):
"""Construct me from an OpenID Message.
@param message: The OpenID associate request
@type message: openid.message.Message
@returntype: L{AssociateRequest}
"""
if message.isOpenID1():
session_type = message.getArg(OPENID_NS, 'session_type')
if session_type == 'no-encryption':
oidutil.log('Received OpenID 1 request with a no-encryption '
'assocaition session type. Continuing anyway.')
elif not session_type:
session_type = 'no-encryption'
else:
session_type = message.getArg(OPENID2_NS, 'session_type')
if session_type is None:
raise ProtocolError(message,
text="session_type missing from request")
try:
session_class = klass.session_classes[session_type]
except KeyError:
raise ProtocolError(message,
"Unknown session type %r" % (session_type,))
try:
session = session_class.fromMessage(message)
except ValueError, why:
raise ProtocolError(message, 'Error parsing %s session: %s' %
(session_class.session_type, why[0]))
assoc_type = message.getArg(OPENID_NS, 'assoc_type', 'HMAC-SHA1')
if assoc_type not in session.allowed_assoc_types:
fmt = 'Session type %s does not support association type %s'
raise ProtocolError(message, fmt % (session_type, assoc_type))
self = klass(session, assoc_type)
self.message = message
self.namespace = message.getOpenIDNamespace()
return self
fromMessage = classmethod(fromMessage)
def answer(self, assoc):
"""Respond to this request with an X{association}.
@param assoc: The association to send back.
@type assoc: L{openid.association.Association}
@returns: A response with the association information, encrypted
to the consumer's X{public key} if appropriate.
@returntype: L{OpenIDResponse}
"""
response = OpenIDResponse(self)
response.fields.updateArgs(OPENID_NS, {
'expires_in': '%d' % (assoc.getExpiresIn(),),
'assoc_type': self.assoc_type,
'assoc_handle': assoc.handle,
})
response.fields.updateArgs(OPENID_NS,
self.session.answer(assoc.secret))
if not (self.session.session_type == 'no-encryption' and
self.message.isOpenID1()):
# The session type "no-encryption" did not have a name
# in OpenID v1, it was just omitted.
response.fields.setArg(
OPENID_NS, 'session_type', self.session.session_type)
return response
def answerUnsupported(self, message, preferred_association_type=None,
preferred_session_type=None):
"""Respond to this request indicating that the association
type or association session type is not supported."""
if self.message.isOpenID1():
raise ProtocolError(self.message)
response = OpenIDResponse(self)
response.fields.setArg(OPENID_NS, 'error_code', 'unsupported-type')
response.fields.setArg(OPENID_NS, 'error', message)
if preferred_association_type:
response.fields.setArg(
OPENID_NS, 'assoc_type', preferred_association_type)
if preferred_session_type:
response.fields.setArg(
OPENID_NS, 'session_type', preferred_session_type)
return response
class CheckIDRequest(OpenIDRequest):
"""A request to confirm the identity of a user.
This class handles requests for openid modes X{C{checkid_immediate}}
and X{C{checkid_setup}}.
@cvar mode: "X{C{checkid_immediate}}" or "X{C{checkid_setup}}"
@type mode: str
@ivar immediate: Is this an immediate-mode request?
@type immediate: bool
@ivar identity: The OP-local identifier being checked.
@type identity: str
@ivar claimed_id: The claimed identifier. Not present in OpenID 1.x
messages.
@type claimed_id: str
@ivar trust_root: "Are you Frank?" asks the checkid request. "Who wants
to know?" C{trust_root}, that's who. This URL identifies the party
making the request, and the user will use that to make her decision
about what answer she trusts them to have. Referred to as "realm" in
OpenID 2.0.
@type trust_root: str
@ivar return_to: The URL to send the user agent back to to reply to this
request.
@type return_to: str
@ivar assoc_handle: Provided in smart mode requests, a handle for a
previously established association. C{None} for dumb mode requests.
@type assoc_handle: str
"""
def __init__(self, identity, return_to, trust_root=None, immediate=False,
assoc_handle=None, op_endpoint=None, claimed_id=None):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<CheckIDRequest>} for their descriptions.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
"""
self.assoc_handle = assoc_handle
self.identity = identity
self.claimed_id = claimed_id or identity
self.return_to = return_to
self.trust_root = trust_root or return_to
self.op_endpoint = op_endpoint
assert self.op_endpoint is not None
if immediate:
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
if self.return_to is not None and \
not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(None, self.return_to)
if not self.trustRootValid():
raise UntrustedReturnURL(None, self.return_to, self.trust_root)
self.message = None
def _getNamespace(self):
warnings.warn('The "namespace" attribute of CheckIDRequest objects '
'is deprecated. Use "message.getOpenIDNamespace()" '
'instead', DeprecationWarning, stacklevel=2)
return self.message.getOpenIDNamespace()
namespace = property(_getNamespace)
def fromMessage(klass, message, op_endpoint):
"""Construct me from an OpenID message.
@raises ProtocolError: When not all required parameters are present
in the message.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
@raises UntrustedReturnURL: When the C{return_to} URL is outside
the C{trust_root}.
@param message: An OpenID checkid_* request Message
@type message: openid.message.Message
@param op_endpoint: The endpoint URL of the server that this
message was sent to.
@type op_endpoint: str
@returntype: L{CheckIDRequest}
"""
self = klass.__new__(klass)
self.message = message
self.op_endpoint = op_endpoint
mode = message.getArg(OPENID_NS, 'mode')
if mode == "checkid_immediate":
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
self.return_to = message.getArg(OPENID_NS, 'return_to')
if message.isOpenID1() and not self.return_to:
fmt = "Missing required field 'return_to' from %r"
raise ProtocolError(message, text=fmt % (message,))
self.identity = message.getArg(OPENID_NS, 'identity')
self.claimed_id = message.getArg(OPENID_NS, 'claimed_id')
if message.isOpenID1():
if self.identity is None:
s = "OpenID 1 message did not contain openid.identity"
raise ProtocolError(message, text=s)
else:
if self.identity and not self.claimed_id:
s = ("OpenID 2.0 message contained openid.identity but not "
"claimed_id")
raise ProtocolError(message, text=s)
elif self.claimed_id and not self.identity:
s = ("OpenID 2.0 message contained openid.claimed_id but not "
"identity")
raise ProtocolError(message, text=s)
# There's a case for making self.trust_root be a TrustRoot
# here. But if TrustRoot isn't currently part of the "public" API,
# I'm not sure it's worth doing.
if message.isOpenID1():
trust_root_param = 'trust_root'
else:
trust_root_param = 'realm'
# Using 'or' here is slightly different than sending a default
# argument to getArg, as it will treat no value and an empty
# string as equivalent.
self.trust_root = (message.getArg(OPENID_NS, trust_root_param)
or self.return_to)
if not message.isOpenID1():
if self.return_to is self.trust_root is None:
raise ProtocolError(message, "openid.realm required when " +
"openid.return_to absent")
self.assoc_handle = message.getArg(OPENID_NS, 'assoc_handle')
# Using TrustRoot.parse here is a bit misleading, as we're not
# parsing return_to as a trust root at all. However, valid URLs
# are valid trust roots, so we can use this to get an idea if it
# is a valid URL. Not all trust roots are valid return_to URLs,
# however (particularly ones with wildcards), so this is still a
# little sketchy.
if self.return_to is not None and \
not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(message, self.return_to)
# I first thought that checking to see if the return_to is within
# the trust_root is premature here, a logic-not-decoding thing. But
# it was argued that this is really part of data validation. A
# request with an invalid trust_root/return_to is broken regardless of
# application, right?
if not self.trustRootValid():
raise UntrustedReturnURL(message, self.return_to, self.trust_root)
return self
fromMessage = classmethod(fromMessage)
def idSelect(self):
"""Is the identifier to be selected by the IDP?
@returntype: bool
"""
# So IDPs don't have to import the constant
return self.identity == IDENTIFIER_SELECT
def trustRootValid(self):
"""Is my return_to under my trust_root?
@returntype: bool
"""
if not self.trust_root:
return True
tr = TrustRoot.parse(self.trust_root)
if tr is None:
raise MalformedTrustRoot(self.message, self.trust_root)
if self.return_to is not None:
return tr.validateURL(self.return_to)
else:
return True
def returnToVerified(self):
"""Does the relying party publish the return_to URL for this
response under the realm? It is up to the provider to set a
policy for what kinds of realms should be allowed. This
return_to URL verification reduces vulnerability to data-theft
attacks based on open proxies, cross-site-scripting, or open
redirectors.
This check should only be performed after making sure that the
return_to URL matches the realm.
@see: L{trustRootValid}
@raises openid.yadis.discover.DiscoveryFailure: if the realm
URL does not support Yadis discovery (and so does not
support the verification process).
@raises openid.fetchers.HTTPFetchingError: if the realm URL
is not reachable. When this is the case, the RP may be hosted
on the user's intranet.
@returntype: bool
@returns: True if the realm publishes a document with the
return_to URL listed
@since: 2.1.0
"""
return verifyReturnTo(self.trust_root, self.return_to)
def answer(self, allow, server_url=None, identity=None, claimed_id=None):
"""Respond to this request.
@param allow: Allow this user to claim this identity, and allow the
consumer to have this information?
@type allow: bool
@param server_url: DEPRECATED. Passing C{op_endpoint} to the
L{Server} constructor makes this optional.
When an OpenID 1.x immediate mode request does not succeed,
it gets back a URL where the request may be carried out
in a not-so-immediate fashion. Pass my URL in here (the
fully qualified address of this server's endpoint, i.e.
C{http://example.com/server}), and I will use it as a base for the
URL for a new request.
Optional for requests where C{CheckIDRequest.immediate} is C{False}
or C{allow} is C{True}.
@type server_url: str
@param identity: The OP-local identifier to answer with. Only for use
when the relying party requested identifier selection.
@type identity: str or None
@param claimed_id: The claimed identifier to answer with, for use
with identifier selection in the case where the claimed identifier
and the OP-local identifier differ, i.e. when the claimed_id uses
delegation.
If C{identity} is provided but this is not, C{claimed_id} will
default to the value of C{identity}. When answering requests
that did not ask for identifier selection, the response
C{claimed_id} will default to that of the request.
This parameter is new in OpenID 2.0.
@type claimed_id: str or None
@returntype: L{OpenIDResponse}
@change: Version 2.0 deprecates C{server_url} and adds C{claimed_id}.
@raises NoReturnError: when I do not have a return_to.
"""
assert self.message is not None
if not self.return_to:
raise NoReturnToError
if not server_url:
if not self.message.isOpenID1() and not self.op_endpoint:
# In other words, that warning I raised in Server.__init__?
# You should pay attention to it now.
raise RuntimeError("%s should be constructed with op_endpoint "
"to respond to OpenID 2.0 messages." %
(self,))
server_url = self.op_endpoint
if allow:
mode = 'id_res'
elif self.message.isOpenID1():
if self.immediate:
mode = 'id_res'
else:
mode = 'cancel'
else:
if self.immediate:
mode = 'setup_needed'
else:
mode = 'cancel'
response = OpenIDResponse(self)
if claimed_id and self.message.isOpenID1():
namespace = self.message.getOpenIDNamespace()
raise VersionError("claimed_id is new in OpenID 2.0 and not "
"available for %s" % (namespace,))
if allow:
if self.identity == IDENTIFIER_SELECT:
if not identity:
raise ValueError(
"This request uses IdP-driven identifier selection."
"You must supply an identifier in the response.")
response_identity = identity
response_claimed_id = claimed_id or identity
elif self.identity:
if identity and (self.identity != identity):
normalized_request_identity = urinorm(self.identity)
normalized_answer_identity = urinorm(identity)
if (normalized_request_identity !=
normalized_answer_identity):
raise ValueError(
"Request was for identity %r, cannot reply "
"with identity %r" % (self.identity, identity))
# The "identity" value in the response shall always be
# the same as that in the request, otherwise the RP is
# likely to not validate the response.
response_identity = self.identity
response_claimed_id = self.claimed_id
else:
if identity:
raise ValueError(
"This request specified no identity and you "
"supplied %r" % (identity,))
response_identity = None
if self.message.isOpenID1() and response_identity is None:
raise ValueError(
"Request was an OpenID 1 request, so response must "
"include an identifier."
)
response.fields.updateArgs(OPENID_NS, {
'mode': mode,
'return_to': self.return_to,
'response_nonce': mkNonce(),
})
if server_url:
response.fields.setArg(OPENID_NS, 'op_endpoint', server_url)
if response_identity is not None:
response.fields.setArg(
OPENID_NS, 'identity', response_identity)
if self.message.isOpenID2():
response.fields.setArg(
OPENID_NS, 'claimed_id', response_claimed_id)
else:
response.fields.setArg(OPENID_NS, 'mode', mode)
if self.immediate:
if self.message.isOpenID1() and not server_url:
raise ValueError("setup_url is required for allow=False "
"in OpenID 1.x immediate mode.")
# Make a new request just like me, but with immediate=False.
setup_request = self.__class__(
self.identity, self.return_to, self.trust_root,
immediate=False, assoc_handle=self.assoc_handle,
op_endpoint=self.op_endpoint, claimed_id=self.claimed_id)
# XXX: This API is weird.
setup_request.message = self.message
setup_url = setup_request.encodeToURL(server_url)
response.fields.setArg(OPENID_NS, 'user_setup_url', setup_url)
return response
def encodeToURL(self, server_url):
"""Encode this request as a URL to GET.
@param server_url: The URL of the OpenID server to make this request of.
@type server_url: str
@returntype: str
@raises NoReturnError: when I do not have a return_to.
"""
if not self.return_to:
raise NoReturnToError
# Imported from the alternate reality where these classes are used
# in both the client and server code, so Requests are Encodable too.
# That's right, code imported from alternate realities all for the
# love of you, id_res/user_setup_url.
q = {'mode': self.mode,
'identity': self.identity,
'claimed_id': self.claimed_id,
'return_to': self.return_to}
if self.trust_root:
if self.message.isOpenID1():
q['trust_root'] = self.trust_root
else:
q['realm'] = self.trust_root
if self.assoc_handle:
q['assoc_handle'] = self.assoc_handle
response = Message(self.message.getOpenIDNamespace())
response.updateArgs(OPENID_NS, q)
return response.toURL(server_url)
def getCancelURL(self):
"""Get the URL to cancel this request.
Useful for creating a "Cancel" button on a web form so that operation
can be carried out directly without another trip through the server.
(Except you probably want to make another trip through the server so
that it knows that the user did make a decision. Or you could simulate
this method by doing C{.answer(False).encodeToURL()})
@returntype: str
@returns: The return_to URL with openid.mode = cancel.
@raises NoReturnError: when I do not have a return_to.
"""
if not self.return_to:
raise NoReturnToError
if self.immediate:
raise ValueError("Cancel is not an appropriate response to "
"immediate mode requests.")
response = Message(self.message.getOpenIDNamespace())
response.setArg(OPENID_NS, 'mode', 'cancel')
return response.toURL(self.return_to)
def __repr__(self):
return '<%s id:%r im:%s tr:%r ah:%r>' % (self.__class__.__name__,
self.identity,
self.immediate,
self.trust_root,
self.assoc_handle)
class OpenIDResponse(object):
"""I am a response to an OpenID request.
@ivar request: The request I respond to.
@type request: L{OpenIDRequest}
@ivar fields: My parameters as a dictionary with each key mapping to
one value. Keys are parameter names with no leading "C{openid.}".
e.g. "C{identity}" and "C{mac_key}", never "C{openid.identity}".
@type fields: L{openid.message.Message}
@ivar signed: The names of the fields which should be signed.
@type signed: list of str
"""
# Implementer's note: In a more symmetric client/server
# implementation, there would be more types of OpenIDResponse
# object and they would have validated attributes according to the
# type of response. But as it is, Response objects in a server are
# basically write-only, their only job is to go out over the wire,
# so this is just a loose wrapper around OpenIDResponse.fields.
def __init__(self, request):
"""Make a response to an L{OpenIDRequest}.
@type request: L{OpenIDRequest}
"""
self.request = request
self.fields = Message(request.namespace)
def __str__(self):
return "%s for %s: %s" % (
self.__class__.__name__,
self.request.__class__.__name__,
self.fields)
def toFormMarkup(self, form_tag_attrs=None):
"""Returns the form markup for this response.
@param form_tag_attrs: Dictionary of attributes to be added to
the form tag. 'accept-charset' and 'enctype' have defaults
that can be overridden. If a value is supplied for
'action' or 'method', it will be replaced.
@returntype: str
@since: 2.1.0
"""
return self.fields.toFormMarkup(self.request.return_to,
form_tag_attrs=form_tag_attrs)
def toHTML(self, form_tag_attrs=None):
"""Returns an HTML document that auto-submits the form markup
for this response.
@returntype: str
@see: toFormMarkup
@since: 2.1.?
"""
return oidutil.autoSubmitHTML(self.toFormMarkup(form_tag_attrs))
def renderAsForm(self):
"""Returns True if this response's encoding is
ENCODE_HTML_FORM. Convenience method for server authors.
@returntype: bool
@since: 2.1.0
"""
return self.whichEncoding() == ENCODE_HTML_FORM
def needsSigning(self):
"""Does this response require signing?
@returntype: bool
"""
return self.fields.getArg(OPENID_NS, 'mode') == 'id_res'
# implements IEncodable
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response.
"""
if self.request.mode in BROWSER_REQUEST_MODES:
if self.fields.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
else:
return ENCODE_KVFORM
def encodeToURL(self):
"""Encode a response as a URL for the user agent to GET.
You will generally use this URL with a HTTP redirect.
@returns: A URL to direct the user agent back to.
@returntype: str
"""
return self.fields.toURL(self.request.return_to)
def addExtension(self, extension_response):
"""
Add an extension response to this response message.
@param extension_response: An object that implements the
extension interface for adding arguments to an OpenID
message.
@type extension_response: L{openid.extension}
@returntype: None
"""
extension_response.toMessage(self.fields)
def encodeToKVForm(self):
"""Encode a response in key-value colon/newline format.
This is a machine-readable format used to respond to messages which
came directly from the consumer and not through the user agent.
@see: OpenID Specs,
U{Key-Value Colon/Newline format<http://openid.net/specs.bml#keyvalue>}
@returntype: str
"""
return self.fields.toKVForm()
class WebResponse(object):
"""I am a response to an OpenID request in terms a web server understands.
I generally come from an L{Encoder}, either directly or from
L{Server.encodeResponse}.
@ivar code: The HTTP code of this response.
@type code: int
@ivar headers: Headers to include in this response.
@type headers: dict
@ivar body: The body of this response.
@type body: str
"""
def __init__(self, code=HTTP_OK, headers=None, body=""):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<WebResponse>} for their descriptions.
"""
self.code = code
if headers is not None:
self.headers = headers
else:
self.headers = {}
self.body = body
class Signatory(object):
"""I sign things.
I also check signatures.
All my state is encapsulated in an
L{OpenIDStore<openid.store.interface.OpenIDStore>}, which means
I'm not generally pickleable but I am easy to reconstruct.
@cvar SECRET_LIFETIME: The number of seconds a secret remains valid.
@type SECRET_LIFETIME: int
"""
SECRET_LIFETIME = 14 * 24 * 60 * 60 # 14 days, in seconds
# keys have a bogus server URL in them because the filestore
# really does expect that key to be a URL. This seems a little
# silly for the server store, since I expect there to be only one
# server URL.
_normal_key = 'http://localhost/|normal'
_dumb_key = 'http://localhost/|dumb'
def __init__(self, store):
"""Create a new Signatory.
@param store: The back-end where my associations are stored.
@type store: L{openid.store.interface.OpenIDStore}
"""
assert store is not None
self.store = store
def verify(self, assoc_handle, message):
"""Verify that the signature for some data is valid.
@param assoc_handle: The handle of the association used to sign the
data.
@type assoc_handle: str
@param message: The signed message to verify
@type message: openid.message.Message
@returns: C{True} if the signature is valid, C{False} if not.
@returntype: bool
"""
assoc = self.getAssociation(assoc_handle, dumb=True)
if not assoc:
oidutil.log("failed to get assoc with handle %r to verify "
"message %r"
% (assoc_handle, message))
return False
try:
valid = assoc.checkMessageSignature(message)
except ValueError, ex:
oidutil.log("Error in verifying %s with %s: %s" % (message,
assoc,
ex))
return False
return valid
def sign(self, response):
"""Sign a response.
I take a L{OpenIDResponse}, create a signature for everything
in its L{signed<OpenIDResponse.signed>} list, and return a new
copy of the response object with that signature included.
@param response: A response to sign.
@type response: L{OpenIDResponse}
@returns: A signed copy of the response.
@returntype: L{OpenIDResponse}
"""
signed_response = deepcopy(response)
assoc_handle = response.request.assoc_handle
if assoc_handle:
# normal mode
# disabling expiration check because even if the association
# is expired, we still need to know some properties of the
# association so that we may preserve those properties when
# creating the fallback association.
assoc = self.getAssociation(assoc_handle, dumb=False,
checkExpiration=False)
if not assoc or assoc.expiresIn <= 0:
# fall back to dumb mode
signed_response.fields.setArg(
OPENID_NS, 'invalidate_handle', assoc_handle)
assoc_type = assoc and assoc.assoc_type or 'HMAC-SHA1'
if assoc and assoc.expiresIn <= 0:
# now do the clean-up that the disabled checkExpiration
# code didn't get to do.
self.invalidate(assoc_handle, dumb=False)
assoc = self.createAssociation(dumb=True, assoc_type=assoc_type)
else:
# dumb mode.
assoc = self.createAssociation(dumb=True)
try:
signed_response.fields = assoc.signMessage(signed_response.fields)
except kvform.KVFormError, err:
raise EncodingError(response, explanation=str(err))
return signed_response
def createAssociation(self, dumb=True, assoc_type='HMAC-SHA1'):
"""Make a new association.
@param dumb: Is this association for a dumb-mode transaction?
@type dumb: bool
@param assoc_type: The type of association to create. Currently
there is only one type defined, C{HMAC-SHA1}.
@type assoc_type: str
@returns: the new association.
@returntype: L{openid.association.Association}
"""
secret = cryptutil.getBytes(getSecretSize(assoc_type))
uniq = oidutil.toBase64(cryptutil.getBytes(4))
handle = '{%s}{%x}{%s}' % (assoc_type, int(time.time()), uniq)
assoc = Association.fromExpiresIn(
self.SECRET_LIFETIME, handle, secret, assoc_type)
if dumb:
key = self._dumb_key
else:
key = self._normal_key
self.store.storeAssociation(key, assoc)
return assoc
def getAssociation(self, assoc_handle, dumb, checkExpiration=True):
"""Get the association with the specified handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
@returns: the association, or None if no valid association with that
handle was found.
@returntype: L{openid.association.Association}
"""
# Hmm. We've created an interface that deals almost entirely with
# assoc_handles. The only place outside the Signatory that uses this
# (and thus the only place that ever sees Association objects) is
# when creating a response to an association request, as it must have
# the association's secret.
if assoc_handle is None:
raise ValueError("assoc_handle must not be None")
if dumb:
key = self._dumb_key
else:
key = self._normal_key
assoc = self.store.getAssociation(key, assoc_handle)
if assoc is not None and assoc.expiresIn <= 0:
oidutil.log("requested %sdumb key %r is expired (by %s seconds)" %
((not dumb) and 'not-' or '',
assoc_handle, assoc.expiresIn))
if checkExpiration:
self.store.removeAssociation(key, assoc_handle)
assoc = None
return assoc
def invalidate(self, assoc_handle, dumb):
"""Invalidates the association with the given handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
"""
if dumb:
key = self._dumb_key
else:
key = self._normal_key
self.store.removeAssociation(key, assoc_handle)
class Encoder(object):
"""I encode responses in to L{WebResponses<WebResponse>}.
If you don't like L{WebResponses<WebResponse>}, you can do
your own handling of L{OpenIDResponses<OpenIDResponse>} with
L{OpenIDResponse.whichEncoding}, L{OpenIDResponse.encodeToURL}, and
L{OpenIDResponse.encodeToKVForm}.
"""
responseFactory = WebResponse
def encode(self, response):
"""Encode a response to a L{WebResponse}.
@raises EncodingError: When I can't figure out how to encode this
message.
"""
encode_as = response.whichEncoding()
if encode_as == ENCODE_KVFORM:
wr = self.responseFactory(body=response.encodeToKVForm())
if isinstance(response, Exception):
wr.code = HTTP_ERROR
elif encode_as == ENCODE_URL:
location = response.encodeToURL()
wr = self.responseFactory(code=HTTP_REDIRECT,
headers={'location': location})
elif encode_as == ENCODE_HTML_FORM:
wr = self.responseFactory(code=HTTP_OK,
body=response.toFormMarkup())
else:
# Can't encode this to a protocol message. You should probably
# render it to HTML and show it to the user.
raise EncodingError(response)
return wr
class SigningEncoder(Encoder):
"""I encode responses in to L{WebResponses<WebResponse>}, signing them when required.
"""
def __init__(self, signatory):
"""Create a L{SigningEncoder}.
@param signatory: The L{Signatory} I will make signatures with.
@type signatory: L{Signatory}
"""
self.signatory = signatory
def encode(self, response):
"""Encode a response to a L{WebResponse}, signing it first if appropriate.
@raises EncodingError: When I can't figure out how to encode this
message.
@raises AlreadySigned: When this response is already signed.
@returntype: L{WebResponse}
"""
# the isinstance is a bit of a kludge... it means there isn't really
# an adapter to make the interfaces quite match.
if (not isinstance(response, Exception)) and response.needsSigning():
if not self.signatory:
raise ValueError(
"Must have a store to sign this request: %s" %
(response,), response)
if response.fields.hasKey(OPENID_NS, 'sig'):
raise AlreadySigned(response)
response = self.signatory.sign(response)
return super(SigningEncoder, self).encode(response)
class Decoder(object):
"""I decode an incoming web request in to a L{OpenIDRequest}.
"""
_handlers = {
'checkid_setup': CheckIDRequest.fromMessage,
'checkid_immediate': CheckIDRequest.fromMessage,
'check_authentication': CheckAuthRequest.fromMessage,
'associate': AssociateRequest.fromMessage,
}
def __init__(self, server):
"""Construct a Decoder.
@param server: The server which I am decoding requests for.
(Necessary because some replies reference their server.)
@type server: L{Server}
"""
self.server = server
def decode(self, query):
"""I transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
"""
if not query:
return None
try:
message = Message.fromPostArgs(query)
except InvalidOpenIDNamespace, err:
# It's useful to have a Message attached to a ProtocolError, so we
# override the bad ns value to build a Message out of it. Kinda
# kludgy, since it's made of lies, but the parts that aren't lies
# are more useful than a 'None'.
query = query.copy()
query['openid.ns'] = OPENID2_NS
message = Message.fromPostArgs(query)
raise ProtocolError(message, str(err))
mode = message.getArg(OPENID_NS, 'mode')
if not mode:
fmt = "No mode value in message %s"
raise ProtocolError(message, text=fmt % (message,))
handler = self._handlers.get(mode, self.defaultDecoder)
return handler(message, self.server.op_endpoint)
def defaultDecoder(self, message, server):
"""Called to decode queries when no handler for that mode is found.
@raises ProtocolError: This implementation always raises
L{ProtocolError}.
"""
mode = message.getArg(OPENID_NS, 'mode')
fmt = "Unrecognized OpenID mode %r"
raise ProtocolError(message, text=fmt % (mode,))
class Server(object):
"""I handle requests for an OpenID server.
Some types of requests (those which are not C{checkid} requests) may be
handed to my L{handleRequest} method, and I will take care of it and
return a response.
For your convenience, I also provide an interface to L{Decoder.decode}
and L{SigningEncoder.encode} through my methods L{decodeRequest} and
L{encodeResponse}.
All my state is encapsulated in an
L{OpenIDStore<openid.store.interface.OpenIDStore>}, which means
I'm not generally pickleable but I am easy to reconstruct.
Example::
oserver = Server(FileOpenIDStore(data_path), "http://example.com/op")
request = oserver.decodeRequest(query)
if request.mode in ['checkid_immediate', 'checkid_setup']:
if self.isAuthorized(request.identity, request.trust_root):
response = request.answer(True)
elif request.immediate:
response = request.answer(False)
else:
self.showDecidePage(request)
return
else:
response = oserver.handleRequest(request)
webresponse = oserver.encode(response)
@ivar signatory: I'm using this for associate requests and to sign things.
@type signatory: L{Signatory}
@ivar decoder: I'm using this to decode things.
@type decoder: L{Decoder}
@ivar encoder: I'm using this to encode things.
@type encoder: L{Encoder}
@ivar op_endpoint: My URL.
@type op_endpoint: str
@ivar negotiator: I use this to determine which kinds of
associations I can make and how.
@type negotiator: L{openid.association.SessionNegotiator}
"""
signatoryClass = Signatory
encoderClass = SigningEncoder
decoderClass = Decoder
def __init__(self, store, op_endpoint=None):
"""A new L{Server}.
@param store: The back-end where my associations are stored.
@type store: L{openid.store.interface.OpenIDStore}
@param op_endpoint: My URL, the fully qualified address of this
server's endpoint, i.e. C{http://example.com/server}
@type op_endpoint: str
@change: C{op_endpoint} is new in library version 2.0. It
currently defaults to C{None} for compatibility with
earlier versions of the library, but you must provide it
if you want to respond to any version 2 OpenID requests.
"""
self.store = store
self.signatory = self.signatoryClass(self.store)
self.encoder = self.encoderClass(self.signatory)
self.decoder = self.decoderClass(self)
self.negotiator = default_negotiator.copy()
if not op_endpoint:
warnings.warn("%s.%s constructor requires op_endpoint parameter "
"for OpenID 2.0 servers" %
(self.__class__.__module__, self.__class__.__name__),
stacklevel=2)
self.op_endpoint = op_endpoint
def handleRequest(self, request):
"""Handle a request.
Give me a request, I will give you a response. Unless it's a type
of request I cannot handle myself, in which case I will raise
C{NotImplementedError}. In that case, you can handle it yourself,
or add a method to me for handling that request type.
@raises NotImplementedError: When I do not have a handler defined
for that type of request.
@returntype: L{OpenIDResponse}
"""
handler = getattr(self, 'openid_' + request.mode, None)
if handler is not None:
return handler(request)
else:
raise NotImplementedError(
"%s has no handler for a request of mode %r." %
(self, request.mode))
def openid_check_authentication(self, request):
"""Handle and respond to C{check_authentication} requests.
@returntype: L{OpenIDResponse}
"""
return request.answer(self.signatory)
def openid_associate(self, request):
"""Handle and respond to C{associate} requests.
@returntype: L{OpenIDResponse}
"""
# XXX: TESTME
assoc_type = request.assoc_type
session_type = request.session.session_type
if self.negotiator.isAllowed(assoc_type, session_type):
assoc = self.signatory.createAssociation(dumb=False,
assoc_type=assoc_type)
return request.answer(assoc)
else:
message = ('Association type %r is not supported with '
'session type %r' % (assoc_type, session_type))
(preferred_assoc_type, preferred_session_type) = \
self.negotiator.getAllowedType()
return request.answerUnsupported(
message,
preferred_assoc_type,
preferred_session_type)
def decodeRequest(self, query):
"""Transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
@see: L{Decoder.decode}
"""
return self.decoder.decode(query)
def encodeResponse(self, response):
"""Encode a response to a L{WebResponse}, signing it first if appropriate.
@raises EncodingError: When I can't figure out how to encode this
message.
@raises AlreadySigned: When this response is already signed.
@returntype: L{WebResponse}
@see: L{SigningEncoder.encode}
"""
return self.encoder.encode(response)
class ProtocolError(Exception):
"""A message did not conform to the OpenID protocol.
@ivar message: The query that is failing to be a valid OpenID request.
@type message: openid.message.Message
"""
def __init__(self, message, text=None, reference=None, contact=None):
"""When an error occurs.
@param message: The message that is failing to be a valid
OpenID request.
@type message: openid.message.Message
@param text: A message about the encountered error. Set as C{args[0]}.
@type text: str
"""
self.openid_message = message
self.reference = reference
self.contact = contact
assert type(message) not in [str, unicode]
Exception.__init__(self, text)
def getReturnTo(self):
"""Get the return_to argument from the request, if any.
@returntype: str
"""
if self.openid_message is None:
return None
else:
return self.openid_message.getArg(OPENID_NS, 'return_to')
def hasReturnTo(self):
"""Did this request have a return_to parameter?
@returntype: bool
"""
return self.getReturnTo() is not None
def toMessage(self):
"""Generate a Message object for sending to the relying party,
after encoding.
"""
namespace = self.openid_message.getOpenIDNamespace()
reply = Message(namespace)
reply.setArg(OPENID_NS, 'mode', 'error')
reply.setArg(OPENID_NS, 'error', str(self))
if self.contact is not None:
reply.setArg(OPENID_NS, 'contact', str(self.contact))
if self.reference is not None:
reply.setArg(OPENID_NS, 'reference', str(self.reference))
return reply
# implements IEncodable
def encodeToURL(self):
return self.toMessage().toURL(self.getReturnTo())
def encodeToKVForm(self):
return self.toMessage().toKVForm()
def toFormMarkup(self):
"""Encode to HTML form markup for POST.
@since: 2.1.0
"""
return self.toMessage().toFormMarkup(self.getReturnTo())
def toHTML(self):
"""Encode to a full HTML page, wrapping the form markup in a page
that will autosubmit the form.
@since: 2.1.?
"""
return oidutil.autoSubmitHTML(self.toFormMarkup())
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_KVFORM, or None. If None,
I cannot be encoded as a protocol message and should be
displayed to the user.
"""
if self.hasReturnTo():
if self.openid_message.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
if self.openid_message is None:
return None
mode = self.openid_message.getArg(OPENID_NS, 'mode')
if mode:
if mode not in BROWSER_REQUEST_MODES:
return ENCODE_KVFORM
# According to the OpenID spec as of this writing, we are probably
# supposed to switch on request type here (GET versus POST) to figure
# out if we're supposed to print machine-readable or human-readable
# content at this point. GET/POST seems like a pretty lousy way of
# making the distinction though, as it's just as possible that the
# user agent could have mistakenly been directed to post to the
# server URL.
# Basically, if your request was so broken that you didn't manage to
# include an openid.mode, I'm not going to worry too much about
# returning you something you can't parse.
return None
class VersionError(Exception):
"""Raised when an operation was attempted that is not compatible with
the protocol version being used."""
class NoReturnToError(Exception):
"""Raised when a response to a request cannot be generated because
the request contains no return_to URL.
"""
pass
class EncodingError(Exception):
"""Could not encode this as a protocol message.
You should probably render it and show it to the user.
@ivar response: The response that failed to encode.
@type response: L{OpenIDResponse}
"""
def __init__(self, response, explanation=None):
Exception.__init__(self, response)
self.response = response
self.explanation = explanation
def __str__(self):
if self.explanation:
s = '%s: %s' % (self.__class__.__name__,
self.explanation)
else:
s = '%s for Response %s' % (
self.__class__.__name__, self.response)
return s
class AlreadySigned(EncodingError):
"""This response is already signed."""
class UntrustedReturnURL(ProtocolError):
"""A return_to is outside the trust_root."""
def __init__(self, message, return_to, trust_root):
ProtocolError.__init__(self, message)
self.return_to = return_to
self.trust_root = trust_root
def __str__(self):
return "return_to %r not under trust_root %r" % (self.return_to,
self.trust_root)
class MalformedReturnURL(ProtocolError):
"""The return_to URL doesn't look like a valid URL."""
def __init__(self, openid_message, return_to):
self.return_to = return_to
ProtocolError.__init__(self, openid_message)
class MalformedTrustRoot(ProtocolError):
"""The trust root is not well-formed.
@see: OpenID Specs, U{openid.trust_root<http://openid.net/specs.bml#mode-checkid_immediate>}
"""
pass
#class IEncodable: # Interface
# def encodeToURL(return_to):
# """Encode a response as a URL for redirection.
#
# @returns: A URL to direct the user agent back to.
# @returntype: str
# """
# pass
#
# def encodeToKvform():
# """Encode a response in key-value colon/newline format.
#
# This is a machine-readable format used to respond to messages which
# came directly from the consumer and not through the user agent.
#
# @see: OpenID Specs,
# U{Key-Value Colon/Newline format<http://openid.net/specs.bml#keyvalue>}
#
# @returntype: str
# """
# pass
#
# def whichEncoding():
# """How should I be encoded?
#
# @returns: one of ENCODE_URL, ENCODE_KVFORM, or None. If None,
# I cannot be encoded as a protocol message and should be
# displayed to the user.
# """
# pass
|
bsd-3-clause
| -4,624,120,184,274,388,000 | -8,796,814,865,912,129,000 | 34.514873 | 96 | 0.61122 | false |
anant-dev/django
|
tests/indexes/models.py
|
253
|
1714
|
from django.db import connection, models
class CurrentTranslation(models.ForeignObject):
"""
Creates virtual relation to the translation with model cache enabled.
"""
# Avoid validation
requires_unique_target = False
def __init__(self, to, on_delete, from_fields, to_fields, **kwargs):
# Disable reverse relation
kwargs['related_name'] = '+'
# Set unique to enable model cache.
kwargs['unique'] = True
super(CurrentTranslation, self).__init__(to, on_delete, from_fields, to_fields, **kwargs)
class ArticleTranslation(models.Model):
article = models.ForeignKey('indexes.Article', models.CASCADE)
language = models.CharField(max_length=10, unique=True)
content = models.TextField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
# Add virtual relation to the ArticleTranslation model.
translation = CurrentTranslation(ArticleTranslation, models.CASCADE, ['id'], ['article'])
class Meta:
index_together = [
["headline", "pub_date"],
]
# Model for index_together being used only with single list
class IndexTogetherSingleList(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
index_together = ["headline", "pub_date"]
# Indexing a TextField on Oracle or MySQL results in index creation error.
if connection.vendor == 'postgresql':
class IndexedArticle(models.Model):
headline = models.CharField(max_length=100, db_index=True)
body = models.TextField(db_index=True)
slug = models.CharField(max_length=40, unique=True)
|
bsd-3-clause
| 3,073,177,661,539,787,300 | 5,199,222,130,086,511,000 | 31.961538 | 97 | 0.683781 | false |
pcrews/rannsaka
|
test_files/2volumes_basic.py
|
1
|
3456
|
import os
import random
import time
import json
from locust import HttpLocust, TaskSet, task
from lib.baseTaskSet import baseTaskSet
# TODO - make these config-driven
from lib.openstack.keystone import get_auth_token
from lib.openstack.cinder import list_volumes
from lib.openstack.cinder import list_volumes_detail
from lib.openstack.cinder import list_volume_detail
from lib.openstack.cinder import create_volume
from lib.openstack.cinder import delete_volume
from lib.openstack.cinder import cinder_get_volume_id
from lib.openstack.nova import nova_get_image_id
from lib.openstack.nova import list_limits
class UserBehavior(baseTaskSet):
def on_start(self):
super(UserBehavior, self).on_start()
self.volume_id = None
self.volume_count = 0
self.sleep_times=[0,0,1,1,1,1,3,3,3,5,5,5,5,10,10,30,30]
self.auth_token, self.tenant_id, self.service_catalog = get_auth_token(self)
def chance(self):
chances = [1,1,1,1,2]
if random.choice(chances)%2==0:
return True
else:
return False
def rand_sleep(self):
time.sleep(random.choice(self.sleep_times))
@task(2)
def update_volume_id(self):
self.volume_id = cinder_get_volume_id(self)
@task(5)
def cinder_create_volume(self):
if not self.volume_id:
volume_id=None
image_id=None
bootable=False
size=1
# volume_id
if self.chance():
volume_id = cinder_get_volume_id(self)
# image_id
if self.chance():
image_id = nova_get_image_id(self)
# bootable
if self.chance():
bootable=True
# metadata
# size
sizes = [1,1,1,3,3,5,5,2.5,100,99,'a','abbazabba',-1,0]
size = random.choice(sizes)
# description
# snapshot_id
response = create_volume(self,
name="volume-%s-%s" % (self.id, self.volume_count),
volume_id=volume_id,
image_id=image_id,
bootable=bootable,
size=size)
print response.content
print '!'*80
self.volume_id = json.loads(response.content)['volume']['id']
self.volume_count += 1
self.rand_sleep()
else:
self.output('Volume already exists, not creating one:')
self.output("volume id: %s" % self.volume_id)
@task(2)
def cinder_delete_volume(self):
if self.volume_id:
delete_volume(self, self.volume_id)
# TODO - test response
self.volume_id = None
self.rand_sleep()
else:
self.cinder_create_volume()
@task(5)
def cinder_list_volumes(self):
list_volumes(self)
@task(5)
def cinder_list_volumes_detail(self):
list_volumes_detail(self)
@task(4)
def cinder_list_volume_detail(self):
list_volume_detail(self)
@task(1)
def nova_list_limits(self):
list_limits(self)
@task(1)
def keystone_get_auth(self):
self.auth_token, self.tenant_id, self.service_catalog = get_auth_token(self)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait=500
max_wait=1000
|
apache-2.0
| 5,981,604,982,990,266,000 | 8,230,865,291,533,988,000 | 29.052174 | 88 | 0.567998 | false |
grap/OpenUpgrade
|
addons/membership/membership.py
|
9
|
28553
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
STATE_PRIOR = {
'none': 0,
'canceled': 1,
'old': 2,
'waiting': 3,
'invoiced': 4,
'free': 6,
'paid': 7
}
class membership_line(osv.osv):
'''Member line'''
def _get_partners(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.pool.get('res.partner').browse(cr, uid, ids, context=context):
if partner.member_lines:
list_membership_line += member_line_obj.search(cr, uid, [('id', 'in', [ l.id for l in partner.member_lines])], context=context)
return list_membership_line
def _get_membership_lines(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for invoice in self.pool.get('account.invoice').browse(cr, uid, ids, context=context):
if invoice.invoice_line:
list_membership_line += member_line_obj.search(cr, uid, [('account_invoice_line', 'in', [ l.id for l in invoice.invoice_line])], context=context)
return list_membership_line
def _check_membership_date(self, cr, uid, ids, context=None):
"""Check if membership product is not in the past
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param context: A standard dictionary for contextual values
"""
cr.execute('''
SELECT MIN(ml.date_to - ai.date_invoice)
FROM membership_membership_line ml
JOIN account_invoice_line ail ON (
ml.account_invoice_line = ail.id
)
JOIN account_invoice ai ON (
ai.id = ail.invoice_id)
WHERE ml.id IN %s''', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[0] and r[0] < 0:
return False
return True
def _state(self, cr, uid, ids, name, args, context=None):
"""Compute the state lines
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of state Value
"""
res = {}
inv_obj = self.pool.get('account.invoice')
for line in self.browse(cr, uid, ids, context=context):
cr.execute('''
SELECT i.state, i.id FROM
account_invoice i
WHERE
i.id = (
SELECT l.invoice_id FROM
account_invoice_line l WHERE
l.id = (
SELECT ml.account_invoice_line FROM
membership_membership_line ml WHERE
ml.id = %s
)
)
''', (line.id,))
fetched = cr.fetchone()
if not fetched:
res[line.id] = 'canceled'
continue
istate = fetched[0]
state = 'none'
if (istate == 'draft') | (istate == 'proforma'):
state = 'waiting'
elif istate == 'open':
state = 'invoiced'
elif istate == 'paid':
state = 'paid'
inv = inv_obj.browse(cr, uid, fetched[1], context=context)
for payment in inv.payment_ids:
if payment.invoice and payment.invoice.type == 'out_refund':
state = 'canceled'
elif istate == 'cancel':
state = 'canceled'
res[line.id] = state
return res
_description = __doc__
_name = 'membership.membership_line'
_columns = {
'partner': fields.many2one('res.partner', 'Partner', ondelete='cascade', select=1),
'membership_id': fields.many2one('product.product', string="Membership", required=True),
'date_from': fields.date('From', readonly=True),
'date_to': fields.date('To', readonly=True),
'date_cancel': fields.date('Cancel date'),
'date': fields.date('Join Date', help="Date on which member has joined the membership"),
'member_price': fields.float('Membership Fee', digits_compute= dp.get_precision('Product Price'), required=True, help='Amount for the membership'),
'account_invoice_line': fields.many2one('account.invoice.line', 'Account Invoice line', readonly=True),
'account_invoice_id': fields.related('account_invoice_line', 'invoice_id', type='many2one', relation='account.invoice', string='Invoice', readonly=True),
'state': fields.function(_state,
string='Membership Status', type='selection',
selection=STATE, store = {
'account.invoice': (_get_membership_lines, ['state'], 10),
'res.partner': (_get_partners, ['membership_state'], 12),
}, help="""It indicates the membership status.
-Non Member: A member who has not applied for any membership.
-Cancelled Member: A member who has cancelled his membership.
-Old Member: A member whose membership date has expired.
-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.
-Invoiced Member: A member whose invoice has been created.
-Paid Member: A member who has paid the membership amount."""),
'company_id': fields.related('account_invoice_line', 'invoice_id', 'company_id', type="many2one", relation="res.company", string="Company", readonly=True, store=True)
}
_rec_name = 'partner'
_order = 'id desc'
_constraints = [
(_check_membership_date, 'Error, this membership product is out of date', [])
]
class Partner(osv.osv):
'''Partner'''
_inherit = 'res.partner'
def _get_partner_id(self, cr, uid, ids, context=None):
member_line_obj = self.pool.get('membership.membership_line')
res_obj = self.pool.get('res.partner')
data_inv = member_line_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _get_invoice_partner(self, cr, uid, ids, context=None):
inv_obj = self.pool.get('account.invoice')
res_obj = self.pool.get('res.partner')
data_inv = inv_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner_id.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _cron_update_membership(self, cr, uid, context=None):
partner_ids = self.search(cr, uid, [('membership_state', '=', 'paid')], context=context)
if partner_ids:
self._store_set_values(cr, uid, partner_ids, ['membership_state'], context=context)
def _membership_state(self, cr, uid, ids, name, args, context=None):
"""This Function return Membership State For Given Partner.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Partner IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of Membership state Value
"""
res = {}
for id in ids:
res[id] = 'none'
today = time.strftime('%Y-%m-%d')
for id in ids:
partner_data = self.browse(cr, uid, id, context=context)
if partner_data.membership_cancel and today > partner_data.membership_cancel:
res[id] = 'free' if partner_data.free_member else 'canceled'
continue
if partner_data.membership_stop and today > partner_data.membership_stop:
res[id] = 'free' if partner_data.free_member else 'old'
continue
s = 4
if partner_data.member_lines:
for mline in partner_data.member_lines:
if mline.date_to >= today and mline.date_from <= today:
if mline.account_invoice_line and mline.account_invoice_line.invoice_id:
mstate = mline.account_invoice_line.invoice_id.state
if mstate == 'paid':
s = 0
inv = mline.account_invoice_line.invoice_id
for payment in inv.payment_ids:
if payment.invoice.type == 'out_refund':
s = 2
break
elif mstate == 'open' and s!=0:
s = 1
elif mstate == 'cancel' and s!=0 and s!=1:
s = 2
elif (mstate == 'draft' or mstate == 'proforma') and s!=0 and s!=1:
s = 3
if s==4:
for mline in partner_data.member_lines:
if mline.date_from < today and mline.date_to < today and mline.date_from <= mline.date_to and mline.account_invoice_line and mline.account_invoice_line.invoice_id.state == 'paid':
s = 5
else:
s = 6
if s==0:
res[id] = 'paid'
elif s==1:
res[id] = 'invoiced'
elif s==2:
res[id] = 'canceled'
elif s==3:
res[id] = 'waiting'
elif s==5:
res[id] = 'old'
elif s==6:
res[id] = 'none'
if partner_data.free_member and s!=0:
res[id] = 'free'
if partner_data.associate_member:
res_state = self._membership_state(cr, uid, [partner_data.associate_member.id], name, args, context=context)
res[id] = res_state[partner_data.associate_member.id]
return res
def _membership_date(self, cr, uid, ids, name, args, context=None):
"""Return date of membership"""
name = name[0]
res = {}
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.browse(cr, uid, ids, context=context):
if partner.associate_member:
partner_id = partner.associate_member.id
else:
partner_id = partner.id
res[partner.id] = {
'membership_start': False,
'membership_stop': False,
'membership_cancel': False
}
if name == 'membership_start':
line_id = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_from', context=context)
if line_id:
res[partner.id]['membership_start'] = member_line_obj.read(cr, uid, [line_id[0]],
['date_from'], context=context)[0]['date_from']
if name == 'membership_stop':
line_id1 = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_to desc', context=context)
if line_id1:
res[partner.id]['membership_stop'] = member_line_obj.read(cr, uid, [line_id1[0]],
['date_to'], context=context)[0]['date_to']
if name == 'membership_cancel':
if partner.membership_state == 'canceled':
line_id2 = member_line_obj.search(cr, uid, [('partner', '=', partner.id)], limit=1, order='date_cancel', context=context)
if line_id2:
res[partner.id]['membership_cancel'] = member_line_obj.read(cr, uid, [line_id2[0]], ['date_cancel'], context=context)[0]['date_cancel']
return res
def _get_partners(self, cr, uid, ids, context=None):
ids2 = ids
while ids2:
ids2 = self.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
ids += ids2
return ids
def __get_membership_state(self, *args, **kwargs):
return self._membership_state(*args, **kwargs)
_columns = {
'associate_member': fields.many2one('res.partner', 'Associate Member',help="A member with whom you want to associate your membership.It will consider the membership state of the associated member."),
'member_lines': fields.one2many('membership.membership_line', 'partner', 'Membership'),
'free_member': fields.boolean('Free Member', help = "Select if you want to give free membership."),
'membership_amount': fields.float(
'Membership Amount', digits=(16, 2),
help = 'The price negotiated by the partner'),
'membership_state': fields.function(
__get_membership_state,
string = 'Current Membership Status', type = 'selection',
selection = STATE,
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help='It indicates the membership state.\n'
'-Non Member: A partner who has not applied for any membership.\n'
'-Cancelled Member: A member who has cancelled his membership.\n'
'-Old Member: A member whose membership date has expired.\n'
'-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.\n'
'-Invoiced Member: A member whose invoice has been created.\n'
'-Paying member: A member who has paid the membership fee.'),
'membership_start': fields.function(
_membership_date, multi = 'membeship_start',
string = 'Membership Start Date', type = 'date',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10, ),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date from which membership becomes active."),
'membership_stop': fields.function(
_membership_date,
string = 'Membership End Date', type='date', multi='membership_stop',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date until which membership remains active."),
'membership_cancel': fields.function(
_membership_date,
string = 'Cancel Membership Date', type='date', multi='membership_cancel',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 11),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help="Date on which membership has been cancelled"),
}
_defaults = {
'free_member': False,
'membership_cancel': False,
}
def _check_recursion(self, cr, uid, ids, context=None):
"""Check Recursive for Associated Members.
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT associate_member FROM res_partner WHERE id IN %s', (tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive associated members.', ['associate_member'])
]
def create_membership_invoice(self, cr, uid, ids, product_id=None, datas=None, context=None):
""" Create Customer Invoice of Membership for partners.
@param datas: datas has dictionary value which consist Id of Membership product and Cost Amount of Membership.
datas = {'membership_product_id': None, 'amount': None}
"""
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_tax_obj = self.pool.get('account.invoice.tax')
product_id = product_id or datas.get('membership_product_id', False)
amount = datas.get('amount', 0.0)
invoice_list = []
if type(ids) in (int, long,):
ids = [ids]
for partner in self.browse(cr, uid, ids, context=context):
account_id = partner.property_account_receivable and partner.property_account_receivable.id or False
fpos_id = partner.property_account_position and partner.property_account_position.id or False
addr = self.address_get(cr, uid, [partner.id], ['invoice'])
if partner.free_member:
raise osv.except_osv(_('Error!'),
_("Partner is a free Member."))
if not addr.get('invoice', False):
raise osv.except_osv(_('Error!'),
_("Partner doesn't have an address to make the invoice."))
quantity = 1
line_value = {
'product_id': product_id,
}
line_dict = invoice_line_obj.product_id_change(cr, uid, {},
product_id, False, quantity, '', 'out_invoice', partner.id, fpos_id, price_unit=amount, context=context)
line_value.update(line_dict['value'])
line_value['price_unit'] = amount
if line_value.get('invoice_line_tax_id', False):
tax_tab = [(6, 0, line_value['invoice_line_tax_id'])]
line_value['invoice_line_tax_id'] = tax_tab
invoice_id = invoice_obj.create(cr, uid, {
'partner_id': partner.id,
'account_id': account_id,
'fiscal_position': fpos_id or False
}, context=context)
line_value['invoice_id'] = invoice_id
invoice_line_obj.create(cr, uid, line_value, context=context)
invoice_list.append(invoice_id)
if line_value['invoice_line_tax_id']:
tax_value = invoice_tax_obj.compute(cr, uid, invoice_id).values()
for tax in tax_value:
invoice_tax_obj.create(cr, uid, tax, context=context)
#recompute the membership_state of those partners
self.pool.get('res.partner').write(cr, uid, ids, {})
return invoice_list
class Product(osv.osv):
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
model_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
if ('product' in context) and (context['product']=='membership_product'):
model_data_ids_form = model_obj.search(cr, user, [('model','=','ir.ui.view'), ('name', 'in', ['membership_products_form', 'membership_products_tree'])], context=context)
resource_id_form = model_obj.read(cr, user, model_data_ids_form, fields=['res_id', 'name'], context=context)
dict_model = {}
for i in resource_id_form:
dict_model[i['name']] = i['res_id']
if view_type == 'form':
view_id = dict_model['membership_products_form']
else:
view_id = dict_model['membership_products_tree']
return super(Product,self).fields_view_get(cr, user, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
'''Product'''
_inherit = 'product.template'
_columns = {
'membership': fields.boolean('Membership', help='Check if the product is eligible for membership.'),
'membership_date_from': fields.date('Membership Start Date', help='Date from which membership becomes active.'),
'membership_date_to': fields.date('Membership End Date', help='Date until which membership remains active.'),
}
_sql_constraints = [('membership_date_greater','check(membership_date_to >= membership_date_from)','Error ! Ending Date cannot be set before Beginning Date.')]
_defaults = {
'membership': False,
}
class Invoice(osv.osv):
'''Invoice'''
_inherit = 'account.invoice'
def action_cancel(self, cr, uid, ids, context=None):
'''Create a 'date_cancel' on the membership_line object'''
member_line_obj = self.pool.get('membership.membership_line')
today = time.strftime('%Y-%m-%d')
for invoice in self.browse(cr, uid, ids, context=context):
mlines = member_line_obj.search(cr, uid,
[('account_invoice_line', 'in',
[l.id for l in invoice.invoice_line])])
member_line_obj.write(cr, uid, mlines, {'date_cancel': today})
return super(Invoice, self).action_cancel(cr, uid, ids, context=context)
# TODO master: replace by ondelete='cascade'
def unlink(self, cr, uid, ids, context=None):
member_line_obj = self.pool.get('membership.membership_line')
for invoice in self.browse(cr, uid, ids, context=context):
mlines = member_line_obj.search(cr, uid,
[('account_invoice_line', 'in',
[l.id for l in invoice.invoice_line])])
member_line_obj.unlink(cr, uid, mlines, context=context)
return super(Invoice, self).unlink(cr, uid, ids, context=context)
class account_invoice_line(osv.osv):
_inherit='account.invoice.line'
def write(self, cr, uid, ids, vals, context=None):
"""Overrides orm write method
"""
member_line_obj = self.pool.get('membership.membership_line')
res = super(account_invoice_line, self).write(cr, uid, ids, vals, context=context)
for line in self.browse(cr, uid, ids, context=context):
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line has changed to a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id.id,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
if line.product_id and not line.product_id.membership and ml_ids:
# Product line has changed to a non membership product
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return res
# TODO master: replace by ondelete='cascade'
def unlink(self, cr, uid, ids, context=None):
"""Remove Membership Line Record for Account Invoice Line
"""
member_line_obj = self.pool.get('membership.membership_line')
for id in ids:
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', id)], context=context)
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return super(account_invoice_line, self).unlink(cr, uid, ids, context=context)
def create(self, cr, uid, vals, context=None):
"""Overrides orm create method
"""
member_line_obj = self.pool.get('membership.membership_line')
result = super(account_invoice_line, self).create(cr, uid, vals, context=context)
line = self.browse(cr, uid, result, context=context)
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line is a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id and line.invoice_id.partner_id.id or False,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 787,804,379,168,964,100 | -6,798,224,026,913,035,000 | 49.170475 | 207 | 0.547588 | false |
2014c2g5/2014c2
|
exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/result.py
|
727
|
6397
|
"""Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
#fixme brython
pass
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
#fix me brython
#return '__unittest' in tb.tb_frame.f_globals
return True #for now, lets just return False
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
gpl-2.0
| -8,360,454,040,264,944,000 | -7,838,147,491,687,614,000 | 31.805128 | 79 | 0.582304 | false |
fperez/cython
|
tests/run/test_call.py
|
17
|
3139
|
import unittest
# The test cases here cover several paths through the function calling
# code. They depend on the METH_XXX flag that is used to define a C
# function, which can't be verified from Python. If the METH_XXX decl
# for a C function changes, these tests may not cover the right paths.
class CFunctionCalls(unittest.TestCase):
def test_varargs0(self):
self.assertRaises(TypeError, {}.__contains__)
def test_varargs1(self):
{}.__contains__(0)
def test_varargs2(self):
self.assertRaises(TypeError, {}.__contains__, 0, 1)
def test_varargs0_ext(self):
try:
{}.__contains__(*())
except TypeError:
pass
def test_varargs1_ext(self):
{}.__contains__(*(0,))
def test_varargs2_ext(self):
try:
{}.__contains__(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_varargs0_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2)
def test_varargs1_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2)
def test_varargs2_kw(self):
self.assertRaises(TypeError, {}.__contains__, x=2, y=2)
def test_oldargs0_0(self):
{}.keys()
def test_oldargs0_1(self):
self.assertRaises(TypeError, {}.keys, 0)
def test_oldargs0_2(self):
self.assertRaises(TypeError, {}.keys, 0, 1)
def test_oldargs0_0_ext(self):
{}.keys(*())
def test_oldargs0_1_ext(self):
try:
{}.keys(*(0,))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_2_ext(self):
try:
{}.keys(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
### Cython makes this a compile time error
# def test_oldargs0_0_kw(self):
# try:
# {}.keys(x=2)
# except TypeError:
# pass
# else:
# raise RuntimeError
def test_oldargs0_1_kw(self):
self.assertRaises(TypeError, {}.keys, x=2)
def test_oldargs0_2_kw(self):
self.assertRaises(TypeError, {}.keys, x=2, y=2)
def test_oldargs1_0(self):
self.assertRaises(TypeError, [].count)
def test_oldargs1_1(self):
[].count(1)
def test_oldargs1_2(self):
self.assertRaises(TypeError, [].count, 1, 2)
def test_oldargs1_0_ext(self):
try:
[].count(*())
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_1_ext(self):
[].count(*(1,))
def test_oldargs1_2_ext(self):
try:
[].count(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_0_kw(self):
self.assertRaises(TypeError, [].count, x=2)
def test_oldargs1_1_kw(self):
self.assertRaises(TypeError, [].count, {}, x=2)
def test_oldargs1_2_kw(self):
self.assertRaises(TypeError, [].count, x=2, y=2)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| 3,586,326,880,962,747,400 | -2,045,383,350,180,734,700 | 23.716535 | 70 | 0.547627 | false |
brean/arduino-kivy-bluetooth
|
glue/protocols/__init__.py
|
1
|
2145
|
# -*- coding: utf-8 -*-
"""
provide classes for different connection protocols
(bluetooth, tcp/ip, ...)
"""
protocols = {}
class Protocol(object):
def __init__(self, name):
"""
basic protocol interface
"""
self.name = name
def write(self, data):
"""
write data to connected system
"""
return False
def read(self):
"""
read data from connected system
"""
return None
try:
import bluetooth
class BluetoothSocket(bluetooth.BluetoothSocket, Protocol):
def __init__(self, config):
self.config = config
self.name = config['name']
super(BluetoothSocket, self).__init__()
print (config['addr'], config['port'])
self.connect((config['addr'], config['port']))
def write(self, data):
"""
write data to system
:param data: data to send to the system
"""
self.send(data)
def read(self):
"""
read data from system
:return: received data
"""
return self.recv(numbytes=4096)
def inWaiting(self):
# XXX replace this with some real waiting state detection
return 0
protocols['bluetooth'] = BluetoothSocket
except ImportError as err:
bluetooth = None
print 'can not import bluetooth', err
try:
import serial
class SerialSocket(Protocol):
def __init__(self, config):
self.ser = serial.Serial(config['addr'], config['baudrate'])
super(SerialSocket, self).__init__(self.ser.name)
def write(self, data):
self.ser.write(data)
def inWaiting(self):
# XXX replace this with some real wating state detection
return 0
protocols['serial'] = SerialSocket
except ImportError as err:
socket = None
print 'can not import socket', err
#sock = BTFirmataSock(bluetooth.RFCOMM)
#sock.connect((bd_addr, port))
#print 'Connected to {}'.format(bd_addr)
#sock.settimeout(1.0)
#board = BTArduino(sock)
|
mit
| -7,289,418,188,471,246,000 | -2,435,858,109,203,664,400 | 23.375 | 72 | 0.561305 | false |
infoxchange/lettuce
|
tests/integration/lib/Django-1.3/django/template/loaders/app_directories.py
|
229
|
2764
|
"""
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import os
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
from django.utils.importlib import import_module
# At compile time, cache the directories to search.
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
app_template_dirs = []
for app in settings.INSTALLED_APPS:
try:
mod = import_module(app)
except ImportError, e:
raise ImproperlyConfigured('ImportError %s: %s' % (app, e.args[0]))
template_dir = os.path.join(os.path.dirname(mod.__file__), 'templates')
if os.path.isdir(template_dir):
app_template_dirs.append(template_dir.decode(fs_encoding))
# It won't change, so convert it to a tuple to save memory.
app_template_dirs = tuple(app_template_dirs)
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
# For backwards compatibility
import warnings
warnings.warn(
"'django.template.loaders.app_directories.load_template_source' is deprecated; use 'django.template.loaders.app_directories.Loader' instead.",
DeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = True
|
gpl-3.0
| -603,029,229,966,188,300 | 7,207,962,185,146,516,000 | 36.351351 | 150 | 0.667149 | false |
semgroup5-project/opendlv.scaledcars
|
thirdparty/cxxtest/doc/examples/test_examples.py
|
50
|
2474
|
#-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
# Imports
import pyutilib.th as unittest
import glob
import os
from os.path import dirname, abspath, basename
import sys
import re
currdir = dirname(abspath(__file__))+os.sep
datadir = currdir
compilerre = re.compile("^(?P<path>[^:]+)(?P<rest>:.*)$")
dirre = re.compile("^([^%s]*/)*" % re.escape(os.sep))
xmlre = re.compile("\"(?P<path>[^\"]*/[^\"]*)\"")
datere = re.compile("date=\"[^\"]*\"")
failure = re.compile("^(?P<prefix>.+)file=\"(?P<path>[^\"]+)\"(?P<suffix>.*)$")
#print "FOO", dirre
def filter(line):
# for xml, remove prefixes from everything that looks like a
# file path inside ""
line = xmlre.sub(
lambda match: '"'+re.sub("^[^/]+/", "", match.group(1))+'"',
line
)
# Remove date info
line = datere.sub( lambda match: 'date=""', line)
if 'Running' in line:
return False
if "IGNORE" in line:
return True
pathmatch = compilerre.match(line) # see if we can remove the basedir
failmatch = failure.match(line) # see if we can remove the basedir
#print "HERE", pathmatch, failmatch
if failmatch:
parts = failmatch.groupdict()
#print "X", parts
line = "%s file=\"%s\" %s" % (parts['prefix'], dirre.sub("", parts['path']), parts['suffix'])
elif pathmatch:
parts = pathmatch.groupdict()
#print "Y", parts
line = dirre.sub("", parts['path']) + parts['rest']
return line
# Declare an empty TestCase class
class Test(unittest.TestCase): pass
if not sys.platform.startswith('win'):
# Find all *.sh files, and use them to define baseline tests
for file in glob.glob(datadir+'*.sh'):
bname = basename(file)
name=bname.split('.')[0]
if os.path.exists(datadir+name+'.txt'):
Test.add_baseline_test(cwd=datadir, cmd=file, baseline=datadir+name+'.txt', name=name, filter=filter)
# Execute the tests
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| -4,536,945,914,895,909,400 | -1,749,234,548,620,928,300 | 35.382353 | 113 | 0.581245 | false |
quantumlib/Cirq
|
examples/quantum_fourier_transform.py
|
1
|
2702
|
"""
Creates and simulates a circuit for Quantum Fourier Transform(QFT)
on a 4 qubit system.
In this example we demonstrate Fourier Transform on
(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0) vector. To do the same, we prepare
the input state of the qubits as |0000>.
=== EXAMPLE OUTPUT ===
Circuit:
(0, 0): ─H───@^0.5───×───H────────────@^0.5─────×───H──────────@^0.5──×─H
│ │ │ │ │ │
(0, 1): ─────@───────×───@^0.25───×───@─────────×───@^0.25───×──@─────×──
│ │ │ │
(1, 0): ─────────────────┼────────┼───@^0.125───×───┼────────┼───────────
│ │ │ │ │ │
(1, 1): ─────────────────@────────×───@─────────×───@────────×───────────
FinalState
[0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j
0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j 0.25+0.j]
"""
import numpy as np
import cirq
def main():
"""Demonstrates Quantum Fourier transform."""
# Create circuit
qft_circuit = generate_2x2_grid_qft_circuit()
print('Circuit:')
print(qft_circuit)
# Simulate and collect final_state
simulator = cirq.Simulator()
result = simulator.simulate(qft_circuit)
print()
print('FinalState')
print(np.around(result.final_state_vector, 3))
def _cz_and_swap(q0, q1, rot):
yield cirq.CZ(q0, q1) ** rot
yield cirq.SWAP(q0, q1)
# Create a quantum fourier transform circuit for 2*2 planar qubit architecture.
# Circuit is adopted from https://arxiv.org/pdf/quant-ph/0402196.pdf
def generate_2x2_grid_qft_circuit():
# Define a 2*2 square grid of qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0),
]
circuit = cirq.Circuit(
cirq.H(a),
_cz_and_swap(a, b, 0.5),
_cz_and_swap(b, c, 0.25),
_cz_and_swap(c, d, 0.125),
cirq.H(a),
_cz_and_swap(a, b, 0.5),
_cz_and_swap(b, c, 0.25),
cirq.H(a),
_cz_and_swap(a, b, 0.5),
cirq.H(a),
strategy=cirq.InsertStrategy.EARLIEST,
)
return circuit
if __name__ == '__main__':
main()
|
apache-2.0
| -1,594,887,179,431,519,200 | -1,464,876,671,561,208,000 | 28.605263 | 79 | 0.480889 | false |
gangadharkadam/sher
|
erpnext/hr/report/employee_birthday/employee_birthday.py
|
25
|
1331
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
data = get_employees(filters)
return columns, data
def get_columns():
return [
_("Employee") + ":Link/Employee:120", _("Name") + ":Data:200", _("Date of Birth")+ ":Date:100",
_("Branch") + ":Link/Branch:120", _("Department") + ":Link/Department:120",
_("Designation") + ":Link/Designation:120", _("Gender") + "::60", _("Company") + ":Link/Company:120"
]
def get_employees(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select name, employee_name, date_of_birth,
branch, department, designation,
gender, company from tabEmployee where status = 'Active' %s""" % conditions, as_list=1)
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
conditions += " and month(date_of_birth) = '%s'" % month
if filters.get("company"): conditions += " and company = '%s'" % \
filters["company"].replace("'", "\\'")
return conditions
|
agpl-3.0
| -5,858,700,739,603,223,000 | -4,338,661,128,879,666,700 | 32.275 | 102 | 0.652141 | false |
giggsey/SickRage
|
lib/sqlalchemy/dialects/drizzle/mysqldb.py
|
154
|
1270
|
"""
.. dialect:: drizzle+mysqldb
:name: MySQL-Python
:dbapi: mysqldb
:connectstring: drizzle+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://sourceforge.net/projects/mysql-python
"""
from sqlalchemy.dialects.drizzle.base import (
DrizzleDialect,
DrizzleExecutionContext,
DrizzleCompiler,
DrizzleIdentifierPreparer)
from sqlalchemy.connectors.mysqldb import (
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBConnector)
class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext,
DrizzleExecutionContext):
pass
class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler):
pass
class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer,
DrizzleIdentifierPreparer):
pass
class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect):
execution_ctx_cls = DrizzleExecutionContext_mysqldb
statement_compiler = DrizzleCompiler_mysqldb
preparer = DrizzleIdentifierPreparer_mysqldb
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return 'utf8'
dialect = DrizzleDialect_mysqldb
|
gpl-3.0
| 7,450,810,432,462,334,000 | 3,664,333,136,075,221,000 | 25.458333 | 80 | 0.713386 | false |
TalShafir/ansible
|
lib/ansible/modules/cloud/scaleway/scaleway_sshkey.py
|
75
|
4776
|
#!/usr/bin/python
#
# Scaleway SSH keys management module
#
# Copyright (C) 2018 Online SAS.
# https://www.scaleway.com
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: scaleway_sshkey
short_description: Scaleway SSH keys management module
version_added: "2.6"
author: Remy Leone (@sieben)
description:
- This module manages SSH keys on Scaleway account
U(https://developer.scaleway.com)
extends_documentation_fragment: scaleway
options:
state:
description:
- Indicate desired state of the SSH key.
default: present
choices:
- present
- absent
ssh_pub_key:
description:
- The public SSH key as a string to add.
required: true
api_url:
description:
- Scaleway API URL
default: 'https://account.scaleway.com'
aliases: ['base_url']
'''
EXAMPLES = '''
- name: "Add SSH key"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "present"
- name: "Delete SSH key"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "absent"
- name: "Add SSH key with explicit token"
scaleway_sshkey:
ssh_pub_key: "ssh-rsa AAAA..."
state: "present"
oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
'''
RETURN = '''
data:
description: This is only present when C(state=present)
returned: when C(state=present)
type: dict
sample: {
"ssh_public_keys": [
{"key": "ssh-rsa AAAA...."}
]
}
'''
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.scaleway import scaleway_argument_spec, Scaleway
def extract_present_sshkeys(raw_organization_dict):
ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
return ssh_key_lookup
def extract_user_id(raw_organization_dict):
return raw_organization_dict["organizations"][0]["users"][0]["id"]
def sshkey_user_patch(ssh_lookup):
ssh_list = {"ssh_public_keys": [{"key": key}
for key in ssh_lookup]}
return ssh_list
def core(module):
ssh_pub_key = module.params['ssh_pub_key']
state = module.params["state"]
account_api = Scaleway(module)
response = account_api.get('organizations')
status_code = response.status_code
organization_json = response.json
if not response.ok:
module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
status_code, response.json['message']))
user_id = extract_user_id(organization_json)
present_sshkeys = []
try:
present_sshkeys = extract_present_sshkeys(organization_json)
except (KeyError, IndexError) as e:
module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
if state in ('present',):
if ssh_pub_key in present_sshkeys:
module.exit_json(changed=False)
# If key not found create it!
if module.check_mode:
module.exit_json(changed=True)
present_sshkeys.append(ssh_pub_key)
payload = sshkey_user_patch(present_sshkeys)
response = account_api.patch('/users/%s' % user_id, data=payload)
if response.ok:
module.exit_json(changed=True, data=response.json)
module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
response.status_code, response.json))
elif state in ('absent',):
if ssh_pub_key not in present_sshkeys:
module.exit_json(changed=False)
if module.check_mode:
module.exit_json(changed=True)
present_sshkeys.remove(ssh_pub_key)
payload = sshkey_user_patch(present_sshkeys)
response = account_api.patch('/users/%s' % user_id, data=payload)
if response.ok:
module.exit_json(changed=True, data=response.json)
module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
response.status_code, response.json))
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['absent', 'present']),
ssh_pub_key=dict(required=True),
api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
core(module)
if __name__ == '__main__':
main()
|
gpl-3.0
| 5,600,560,704,245,844,000 | 415,041,264,346,063,600 | 26.448276 | 125 | 0.636097 | false |
jrha/aquilon
|
tests/broker/test_update_cluster.py
|
2
|
3584
|
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update cluster command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUpdateCluster(TestBrokerCommand):
def test_100_updatenoop(self):
self.noouttest(["update_cluster", "--cluster=utgrid1",
"--down_hosts_threshold=2%"])
command = "show cluster --cluster utgrid1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Grid Cluster: utgrid1", command)
self.matchoutput(out, "Down Hosts Threshold: 0 (2%)", command)
self.matchoutput(out, "Maintenance Threshold: 0 (6%)", command)
def test_200_updateutgrid1(self):
command = ["update_cluster", "--cluster=utgrid1",
"--down_hosts_threshold=2"]
self.noouttest(command)
command = "show cluster --cluster utgrid1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Grid Cluster: utgrid1", command)
self.matchoutput(out, "Down Hosts Threshold: 2", command)
self.matchoutput(out, "Maintenance Threshold: 0 (6%)", command)
def test_300_update_maint_threshold(self):
command = ["update_cluster", "--cluster=utgrid1",
"--maint_threshold=50%"]
self.noouttest(command)
command = "show cluster --cluster utgrid1 --format proto"
out = self.commandtest(command.split(" "))
cluslist = self.parse_clusters_msg(out)
cluster = cluslist.clusters[0]
self.assertEqual(cluster.name, "utgrid1")
self.assertEqual(cluster.threshold, 2)
self.assertEqual(cluster.threshold_is_percent, False)
self.assertEqual(cluster.maint_threshold, 50)
self.assertEqual(cluster.maint_threshold_is_percent, True)
command = ["update_cluster", "--cluster=utgrid1",
"--maint_threshold=50"]
self.noouttest(command)
command = "show cluster --cluster utgrid1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Grid Cluster: utgrid1", command)
self.matchoutput(out, "Down Hosts Threshold: 2", command)
self.matchoutput(out, "Maintenance Threshold: 50", command)
command = ["update_cluster", "--cluster=utgrid1",
"--maint_threshold=0%"]
self.noouttest(command)
command = "show cluster --cluster utgrid1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Grid Cluster: utgrid1", command)
self.matchoutput(out, "Down Hosts Threshold: 2", command)
self.matchoutput(out, "Maintenance Threshold: 0 (0%)", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateCluster)
unittest.TextTestRunner(verbosity=2).run(suite)
|
apache-2.0
| 3,033,870,961,586,470,000 | -1,715,081,624,662,824,700 | 38.822222 | 74 | 0.654576 | false |
zhaochao/fuel-web
|
tasklib/tasklib/tests/functional/test_run_exec.py
|
4
|
2108
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tasklib.tests import base
from tasklib.utils import STATUS
class TestFunctionalExecTasks(base.BaseFunctionalTest):
"""Each test will follow next pattern:
1. Run test with provided name - taskcmd -c conf.yaml run test/test
2. check status of task
"""
def test_simple_run(self):
exit_code, out, err = self.execute(['run', 'exec/simple'])
self.assertEqual(exit_code, 0)
exit_code, out, err = self.execute(['status', 'exec/simple'])
self.assertEqual(out.strip('\n'), STATUS.end.name)
self.assertEqual(exit_code, 0)
def test_failed_run(self):
exit_code, out, err = self.execute(['run', 'exec/fail'])
self.assertEqual(exit_code, 2)
exit_code, out, err = self.execute(['status', 'exec/fail'])
self.assertEqual(out.strip('\n'), STATUS.failed.name)
self.assertEqual(exit_code, 2)
def test_error(self):
exit_code, out, err = self.execute(['run', 'exec/error'])
self.assertEqual(exit_code, 3)
exit_code, out, err = self.execute(['status', 'exec/error'])
self.assertEqual(out.strip('\n'), STATUS.error.name)
self.assertEqual(exit_code, 3)
def test_notfound(self):
exit_code, out, err = self.execute(['run', 'exec/notfound'])
self.assertEqual(exit_code, 4)
exit_code, out, err = self.execute(['status', 'exec/notfound'])
self.assertEqual(out.strip('\n'), STATUS.notfound.name)
self.assertEqual(exit_code, 4)
|
apache-2.0
| 376,785,520,220,731,700 | 4,674,240,132,444,002,000 | 40.333333 | 78 | 0.655123 | false |
hell03610/python-koans
|
python3/koans/about_regex.py
|
34
|
4795
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import re
class AboutRegex(Koan):
"""
These koans are based on the Ben's book: Regular Expressions in 10 minutes.
I found this books very useful so I decided to write a koans in order to practice everything I had learned from it.
http://www.forta.com/books/0672325667/
"""
def test_matching_literal_text(self):
"""
Lesson 1 Matching Literal String
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes."
m = re.search(__, string)
self.assertTrue(m and m.group(0) and m.group(0)== 'Felix', "I want my name")
def test_matching_literal_text_how_many(self):
"""
Lesson 1 How many matches?
The default behaviour of most regular extression engines is to return just the first match.
In python you have the next options:
match() --> Determine if the RE matches at the beginning of the string.
search() --> Scan through a string, looking for any location where this RE matches.
findall() --> Find all substrings where the RE matches, and returns them as a list.
finditer() --> Find all substrings where the RE matches, and returns them as an iterator.
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes. Repeat My name is Felix"
m = re.match('Felix', string) #TIP: Maybe match it's not the best option
# I want to know how many times appears my name
self.assertEqual(m, __)
def test_matching_literal_text_not_case_sensitivity(self):
"""
Lesson 1 Matching Literal String non case sensitivity.
Most regex implementations also support matches that are not case sensitive. In python you can use re.IGNORECASE, in
Javascript you can specify the optional i flag.
In Ben's book you can see more languages.
"""
string = "Hello, my name is Felix or felix and this koans is based on the Ben's book: Regular Expressions in 10 minutes."
self.assertEqual(re.findall("felix", string, 20), __)
self.assertEqual(re.findall("felix", string, 10), __)
def test_matching_any_character(self):
"""
Lesson 1 Matching any character
. matches any character, alphabetic characters, digits and .
"""
string = "pecks.xlx\n" \
+ "orders1.xls\n" \
+ "apec1.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls"
# TIP: remember the name of this lesson
change_this_search_string = 'a..xlx' # <-- I want to find all uses of myArray
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_matching_set_character(self):
"""
Lesson 2 Matching sets of characters
A set of characters is defined using the metacharacters [ and ]. Everything between them is part of the set and
any one of the set members must match (but not all).
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find all files for North America(na) or South America(sa), but not (ca)
# TIP you can use the pattern .a. which matches in above test but in this case matches more than you want
change_this_search_string = '[nsc]a[2-9].xls'
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_anything_but_matching(self):
"""
Lesson 2 Using character set ranges
Occsionally, you'll want a list of characters that you don't want to match.
Character sets can be negated using the ^ metacharacter.
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "sales3.xls\n" \
+ "europe2.xls\n" \
+ "sam.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find the name sam
change_this_search_string = '[^nc]am'
self.assertEquals(re.findall(change_this_search_string, string), ['sam.xls'])
|
mit
| -3,349,174,802,293,685,000 | 9,186,544,918,875,644,000 | 40.336207 | 145 | 0.561001 | false |
ogenstad/ansible
|
lib/ansible/plugins/lookup/subelements.py
|
64
|
6146
|
# (c) 2013, Serge van Ginderachter <[email protected]>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: subelements
author: Serge van Ginderachter <[email protected]>
version_added: "1.4"
short_description: traverse nested key from a list of dictionaries
description:
- Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given (nested sub-)key inside of those records.
options:
_terms:
description: tuple of list of dictionaries and dictionary key to extract
required: True
skip_missing:
default: False
description:
- If set to True, the lookup plugin will skip the lists items that do not contain the given subkey.
If False, the plugin will yield an error and complain about the missing subkey.
"""
EXAMPLES = """
- name: show var structure as it is needed for example to make sense
hosts: all
vars:
users:
- name: alice
authorized:
- /tmp/alice/onekey.pub
- /tmp/alice/twokey.pub
mysql:
password: mysql-password
hosts:
- "%"
- "127.0.0.1"
- "::1"
- "localhost"
privs:
- "*.*:SELECT"
- "DB1.*:ALL"
groups:
- wheel
- name: bob
authorized:
- /tmp/bob/id_rsa.pub
mysql:
password: other-mysql-password
hosts:
- "db1"
privs:
- "*.*:SELECT"
- "DB2.*:ALL"
tasks:
- name: Set authorized ssh key, extracting just that data from 'users'
authorized_key:
user: "{{ item.0.name }}"
key: "{{ lookup('file', item.1) }}"
with_subelements:
- "{{ users }}"
- authorized
- name: Setup MySQL users, given the mysql hosts and privs subkey lists
mysql_user:
name: "{{ item.0.name }}"
password: "{{ item.0.mysql.password }}"
host: "{{ item.1 }}"
priv: "{{ item.0.mysql.privs | join('/') }}"
with_subelements:
- "{{ users }}"
- mysql.hosts
- name: list groups for user that have them, dont error if they don't
debug: var=item
with_list: "{{lookup('subelements', users, 'groups', 'skip_missing=True')}}"
"""
RETURN = """
_list:
description: list of subelements extracted
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, " + msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
# check lookup terms - check number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0]:
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all([isinstance(key, string_types) and key in FLAGS for key in flags]):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False), strict=False)
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
if subkey == subelements[-1]:
lastsubkey = True
if subkey not in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
if skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret
|
gpl-3.0
| -6,868,708,469,358,533,000 | 8,310,128,118,211,017,000 | 35.583333 | 142 | 0.555158 | false |
andybab/Impala
|
tests/query_test/test_hbase_queries.py
|
8
|
1079
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Targeted Impala HBase Tests
#
import logging
import pytest
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
class TestHBaseQueries(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHBaseQueries, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(\
lambda v: v.get_value('table_format').file_format == 'hbase')
def test_hbase_scan_node(self, vector):
self.run_test_case('QueryTest/hbase-scan-node', vector)
def test_hbase_row_key(self, vector):
self.run_test_case('QueryTest/hbase-rowkeys', vector)
def test_hbase_filters(self, vector):
self.run_test_case('QueryTest/hbase-filters', vector)
def test_hbase_subquery(self, vector):
self.run_test_case('QueryTest/hbase-subquery', vector)
@pytest.mark.execute_serially
def test_hbase_inserts(self, vector):
self.run_test_case('QueryTest/hbase-inserts', vector)
|
apache-2.0
| -5,978,959,063,576,463,000 | 7,200,931,678,518,718,000 | 29.828571 | 69 | 0.727525 | false |
nozuono/calibre-webserver
|
src/calibre/ebooks/pdb/ereader/writer.py
|
24
|
10728
|
# -*- coding: utf-8 -*-
'''
Write content to ereader pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import re
import struct
import zlib
try:
from PIL import Image
Image
except ImportError:
import Image
import cStringIO
from calibre.ebooks.pdb.formatwriter import FormatWriter
from calibre.ebooks.pdb.header import PdbHeaderBuilder
from calibre.ebooks.pml.pmlml import PMLMLizer
IDENTITY = 'PNRdPPrs'
# This is an arbitrary number that is small enough to work. The actual maximum
# record size is unknown.
MAX_RECORD_SIZE = 8192
class Writer(FormatWriter):
def __init__(self, opts, log):
self.opts = opts
self.log = log
def write_content(self, oeb_book, out_stream, metadata=None):
pmlmlizer = PMLMLizer(self.log)
pml = unicode(pmlmlizer.extract_content(oeb_book, self.opts)).encode('cp1252', 'replace')
text, text_sizes = self._text(pml)
chapter_index = self._index_item(r'(?s)\\C(?P<val>[0-4])="(?P<text>.+?)"', pml)
chapter_index += self._index_item(r'(?s)\\X(?P<val>[0-4])(?P<text>.+?)\\X[0-4]', pml)
chapter_index += self._index_item(r'(?s)\\x(?P<text>.+?)\\x', pml)
link_index = self._index_item(r'(?s)\\Q="(?P<text>.+?)"', pml)
images = self._images(oeb_book.manifest, pmlmlizer.image_hrefs)
metadata = [self._metadata(metadata)]
hr = [self._header_record(len(text), len(chapter_index), len(link_index), len(images))]
'''
Record order as generated by Dropbook.
1. eReader Header
2. Compressed text
3. Small font page index
4. Large font page index
5. Chapter index
6. Links index
7. Images
8. (Extrapolation: there should be one more record type here though yet uncovered what it might be).
9. Metadata
10. Sidebar records
11. Footnote records
12. Text block size record
13. "MeTaInFo\x00" word record
'''
sections = hr+text+chapter_index+link_index+images+metadata+[text_sizes]+['MeTaInFo\x00']
lengths = [len(i) if i not in images else len(i[0]) + len(i[1]) for i in sections]
pdbHeaderBuilder = PdbHeaderBuilder(IDENTITY, metadata[0].partition('\x00')[0])
pdbHeaderBuilder.build_header(lengths, out_stream)
for item in sections:
if item in images:
out_stream.write(item[0])
out_stream.write(item[1])
else:
out_stream.write(item)
def _text(self, pml):
pml_pages = []
text_sizes = ''
index = 0
while index < len(pml):
'''
Split on the space character closest to MAX_RECORD_SIZE when possible.
'''
split = pml.rfind(' ', index, MAX_RECORD_SIZE)
if split == -1:
len_end = len(pml[index:])
if len_end > MAX_RECORD_SIZE:
split = MAX_RECORD_SIZE
else:
split = len_end
if split == 0:
split = 1
pml_pages.append(zlib.compress(pml[index:index+split]))
text_sizes += struct.pack('>H', split)
index += split
return pml_pages, text_sizes
def _index_item(self, regex, pml):
index = []
for mo in re.finditer(regex, pml):
item = ''
if 'text' in mo.groupdict().keys():
item += struct.pack('>L', mo.start())
text = mo.group('text')
# Strip all PML tags from text
text = re.sub(r'\\U[0-9a-z]{4}', '', text)
text = re.sub(r'\\a\d{3}', '', text)
text = re.sub(r'\\.', '', text)
# Add appropriate spacing to denote the various levels of headings
if 'val' in mo.groupdict().keys():
text = '%s%s' % (' ' * 4 * int(mo.group('val')), text)
item += text
item += '\x00'
if item:
index.append(item)
return index
def _images(self, manifest, image_hrefs):
'''
Image format.
0-4 : 'PNG '. There must be a space after PNG.
4-36 : Image name. Must be exactly 32 bytes long. Pad with \x00 for names shorter than 32 bytes
36-58 : Unknown.
58-60 : Width.
60-62 : Height.
62-...: Raw image data in 8 bit PNG format.
'''
images = []
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
for item in manifest:
if item.media_type in OEB_RASTER_IMAGES and item.href in image_hrefs.keys():
try:
im = Image.open(cStringIO.StringIO(item.data)).convert('P')
im.thumbnail((300,300), Image.ANTIALIAS)
data = cStringIO.StringIO()
im.save(data, 'PNG')
data = data.getvalue()
header = 'PNG '
header += image_hrefs[item.href].ljust(32, '\x00')[:32]
header = header.ljust(58, '\x00')
header += struct.pack('>HH', im.size[0], im.size[1])
header = header.ljust(62, '\x00')
if len(data) + len(header) < 65505:
images.append((header, data))
except Exception as e:
self.log.error('Error: Could not include file %s becuase ' \
'%s.' % (item.href, e))
return images
def _metadata(self, metadata):
'''
Metadata takes the form:
title\x00
author\x00
copyright\x00
publisher\x00
isbn\x00
'''
title = _('Unknown')
author = _('Unknown')
copyright = ''
publisher = ''
isbn = ''
if metadata:
if len(metadata.title) >= 1:
title = metadata.title[0].value
if len(metadata.creator) >= 1:
from calibre.ebooks.metadata import authors_to_string
author = authors_to_string([x.value for x in metadata.creator])
if len(metadata.rights) >= 1:
copyright = metadata.rights[0].value
if len(metadata.publisher) >= 1:
publisher = metadata.publisher[0].value
return '%s\x00%s\x00%s\x00%s\x00%s\x00' % (title, author, copyright, publisher, isbn)
def _header_record(self, text_count, chapter_count, link_count, image_count):
'''
text_count = the number of text pages
image_count = the number of images
'''
compression = 10 # zlib compression.
non_text_offset = text_count + 1
chapter_offset = non_text_offset
link_offset = chapter_offset + chapter_count
if image_count > 0:
image_data_offset = link_offset + link_count
meta_data_offset = image_data_offset + image_count
last_data_offset = meta_data_offset + 1
else:
meta_data_offset = link_offset + link_count
last_data_offset = meta_data_offset + 1
image_data_offset = last_data_offset
if chapter_count == 0:
chapter_offset = last_data_offset
if link_count == 0:
link_offset = last_data_offset
record = ''
record += struct.pack('>H', compression) # [0:2] # Compression. Specifies compression and drm. 2 = palmdoc, 10 = zlib. 260 and 272 = DRM
record += struct.pack('>H', 0) # [2:4] # Unknown.
record += struct.pack('>H', 0) # [4:6] # Unknown.
record += struct.pack('>H', 25152) # [6:8] # 25152 is MAGIC. Somehow represents the cp1252 encoding of the text
record += struct.pack('>H', 0) # [8:10] # Number of small font pages. 0 if page index is not built.
record += struct.pack('>H', 0) # [10:12] # Number of large font pages. 0 if page index is not built.
record += struct.pack('>H', non_text_offset) # [12:14] # Non-Text record start.
record += struct.pack('>H', chapter_count) # [14:16] # Number of chapter index records.
record += struct.pack('>H', 0) # [16:18] # Number of small font page index records.
record += struct.pack('>H', 0) # [18:20] # Number of large font page index records.
record += struct.pack('>H', image_count) # [20:22] # Number of images.
record += struct.pack('>H', link_count) # [22:24] # Number of links.
record += struct.pack('>H', 1) # [24:26] # 1 if has metadata, 0 if not.
record += struct.pack('>H', 0) # [26:28] # Unknown.
record += struct.pack('>H', 0) # [28:30] # Number of Footnotes.
record += struct.pack('>H', 0) # [30:32] # Number of Sidebars.
record += struct.pack('>H', chapter_offset) # [32:34] # Chapter index offset.
record += struct.pack('>H', 2560) # [34:36] # 2560 is MAGIC.
record += struct.pack('>H', last_data_offset) # [36:38] # Small font page offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [38:40] # Large font page offset. This will be the last data offset if there are none.
record += struct.pack('>H', image_data_offset) # [40:42] # Image offset. This will be the last data offset if there are none.
record += struct.pack('>H', link_offset) # [42:44] # Links offset. This will be the last data offset if there are none.
record += struct.pack('>H', meta_data_offset) # [44:46] # Metadata offset. This will be the last data offset if there are none.
record += struct.pack('>H', 0) # [46:48] # Unknown.
record += struct.pack('>H', last_data_offset) # [48:50] # Footnote offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [50:52] # Sidebar offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [52:54] # Last data offset.
for i in range(54, 132, 2):
record += struct.pack('>H', 0) # [54:132]
return record
|
gpl-3.0
| 5,998,684,638,693,554,000 | 6,800,376,132,233,548,000 | 41.741036 | 158 | 0.5261 | false |
lsqtongxin/django
|
django/utils/log.py
|
116
|
5216
|
from __future__ import unicode_literals
import logging
import logging.config # needed when logging_config doesn't start with logging.config
import sys
import warnings
from copy import copy
from django.conf import settings
from django.core import mail
from django.core.mail import get_connection
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.module_loading import import_string
from django.views.debug import ExceptionReporter
# Default logging for Django. This sends an email to the site admins on every
# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
# the console (DEBUG=True) or discarded by mean of the NullHandler (DEBUG=False).
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'mail_admins'],
},
'py.warnings': {
'handlers': ['console'],
},
}
}
def configure_logging(logging_config, logging_settings):
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
# RemovedInNextVersionWarning is a subclass of DeprecationWarning which
# is hidden by default, hence we force the "default" behavior
warnings.simplefilter("default", RemovedInNextVersionWarning)
if logging_config:
# First find the logging configuration function ...
logging_config_func = import_string(logging_config)
logging.config.dictConfig(DEFAULT_LOGGING)
# ... then invoke it with the logging settings
if logging_settings:
logging_config_func(logging_settings)
class AdminEmailHandler(logging.Handler):
"""An exception log handler that emails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the email report.
"""
def __init__(self, include_html=False, email_backend=None):
logging.Handler.__init__(self)
self.include_html = include_html
self.email_backend = email_backend
def emit(self, record):
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
subject = self.format_subject(subject)
# Since we add a nicely formatted traceback on our own, create a copy
# of the log record without the exception data.
no_exc_record = copy(record)
no_exc_record.exc_info = None
no_exc_record.exc_text = None
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
message = "%s\n\n%s" % (self.format(no_exc_record), reporter.get_traceback_text())
html_message = reporter.get_traceback_html() if self.include_html else None
self.send_mail(subject, message, fail_silently=True, html_message=html_message)
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_admins(subject, message, *args, connection=self.connection(), **kwargs)
def connection(self):
return get_connection(backend=self.email_backend, fail_silently=True)
def format_subject(self, subject):
"""
Escape CR and LF characters, and limit length.
RFC 2822's hard limit is 998 characters per line. So, minus "Subject: "
the actual subject must be no longer than 989 characters.
"""
formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
return formatted_subject[:989]
class CallbackFilter(logging.Filter):
"""
A logging filter that checks the return value of a given callable (which
takes the record-to-be-logged as its only parameter) to decide whether to
log a record.
"""
def __init__(self, callback):
self.callback = callback
def filter(self, record):
if self.callback(record):
return 1
return 0
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not settings.DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
|
bsd-3-clause
| -4,263,743,603,880,784,400 | -9,145,571,728,809,754,000 | 32.435897 | 90 | 0.624425 | false |
mchdks/python-social-auth
|
social/backends/soundcloud.py
|
83
|
2156
|
"""
Soundcloud OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/soundcloud.html
"""
from social.p3 import urlencode
from social.backends.oauth import BaseOAuth2
class SoundcloudOAuth2(BaseOAuth2):
"""Soundcloud OAuth authentication backend"""
name = 'soundcloud'
AUTHORIZATION_URL = 'https://soundcloud.com/connect'
ACCESS_TOKEN_URL = 'https://api.soundcloud.com/oauth2/token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
REDIRECT_STATE = False
EXTRA_DATA = [
('id', 'id'),
('refresh_token', 'refresh_token'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Soundcloud account"""
fullname, first_name, last_name = self.get_user_names(
response.get('full_name')
)
return {'username': response.get('username'),
'email': response.get('email') or '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://api.soundcloud.com/me.json',
params={'oauth_token': access_token})
def auth_url(self):
"""Return redirect url"""
state = None
if self.STATE_PARAMETER or self.REDIRECT_STATE:
# Store state in session for further request validation. The state
# value is passed as state parameter (as specified in OAuth2 spec),
# but also added to redirect_uri, that way we can still verify the
# request if the provider doesn't implement the state parameter.
# Reuse token if any.
name = self.name + '_state'
state = self.strategy.session_get(name) or self.state_token()
self.strategy.session_set(name, state)
params = self.auth_params(state)
params.update(self.get_scope_argument())
params.update(self.auth_extra_arguments())
return self.AUTHORIZATION_URL + '?' + urlencode(params)
|
bsd-3-clause
| 8,942,156,569,665,987,000 | 2,327,774,875,514,811,400 | 38.2 | 79 | 0.607607 | false |
mcgee/ns-3
|
doc/manual/source/conf.py
|
75
|
7047
|
# -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2010, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Manual'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-manual.tex', u'ns-3 Manual',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{amssymb}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-manual', u'ns-3 Manual',
[u'ns-3 project'], 1)
]
|
gpl-2.0
| -7,263,439,851,065,173,000 | 130,291,103,733,865,730 | 31.625 | 80 | 0.704981 | false |
ardumont/pygit2
|
test/test_repository.py
|
1
|
21901
|
# -*- coding: UTF-8 -*-
#
# Copyright 2010-2014 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Repository objects."""
# Import from the future
from __future__ import absolute_import
from __future__ import unicode_literals
# Import from the Standard Library
import binascii
import unittest
import tempfile
import os
from os.path import join, realpath
import sys
# Import from pygit2
from pygit2 import GIT_OBJ_ANY, GIT_OBJ_BLOB, GIT_OBJ_COMMIT
from pygit2 import init_repository, clone_repository, discover_repository
from pygit2 import Oid, Reference, hashfile
import pygit2
from . import utils
try:
import __pypy__
except ImportError:
__pypy__ = None
HEAD_SHA = '784855caf26449a1914d2cf62d12b9374d76ae78'
PARENT_SHA = 'f5e5aa4e36ab0fe62ee1ccc6eb8f79b866863b87' # HEAD^
BLOB_HEX = 'af431f20fc541ed6d5afede3e2dc7160f6f01f16'
BLOB_RAW = binascii.unhexlify(BLOB_HEX.encode('ascii'))
BLOB_OID = Oid(raw=BLOB_RAW)
class RepositoryTest(utils.BareRepoTestCase):
def test_is_empty(self):
self.assertFalse(self.repo.is_empty)
def test_is_bare(self):
self.assertTrue(self.repo.is_bare)
def test_head(self):
head = self.repo.head
self.assertEqual(HEAD_SHA, head.target.hex)
self.assertEqual(type(head), Reference)
self.assertFalse(self.repo.head_is_unborn)
self.assertFalse(self.repo.head_is_detached)
def test_set_head(self):
# Test setting a detatched HEAD.
self.repo.set_head(Oid(hex=PARENT_SHA))
self.assertEqual(self.repo.head.target.hex, PARENT_SHA)
# And test setting a normal HEAD.
self.repo.set_head("refs/heads/master")
self.assertEqual(self.repo.head.name, "refs/heads/master")
self.assertEqual(self.repo.head.target.hex, HEAD_SHA)
def test_read(self):
self.assertRaises(TypeError, self.repo.read, 123)
self.assertRaisesWithArg(KeyError, '1' * 40, self.repo.read, '1' * 40)
ab = self.repo.read(BLOB_OID)
a = self.repo.read(BLOB_HEX)
self.assertEqual(ab, a)
self.assertEqual((GIT_OBJ_BLOB, b'a contents\n'), a)
a2 = self.repo.read('7f129fd57e31e935c6d60a0c794efe4e6927664b')
self.assertEqual((GIT_OBJ_BLOB, b'a contents 2\n'), a2)
a_hex_prefix = BLOB_HEX[:4]
a3 = self.repo.read(a_hex_prefix)
self.assertEqual((GIT_OBJ_BLOB, b'a contents\n'), a3)
def test_write(self):
data = b"hello world"
# invalid object type
self.assertRaises(ValueError, self.repo.write, GIT_OBJ_ANY, data)
oid = self.repo.write(GIT_OBJ_BLOB, data)
self.assertEqual(type(oid), Oid)
def test_contains(self):
self.assertRaises(TypeError, lambda: 123 in self.repo)
self.assertTrue(BLOB_OID in self.repo)
self.assertTrue(BLOB_HEX in self.repo)
self.assertTrue(BLOB_HEX[:10] in self.repo)
self.assertFalse('a' * 40 in self.repo)
self.assertFalse('a' * 20 in self.repo)
def test_iterable(self):
l = [obj for obj in self.repo]
oid = Oid(hex=BLOB_HEX)
self.assertTrue(oid in l)
def test_lookup_blob(self):
self.assertRaises(TypeError, lambda: self.repo[123])
self.assertEqual(self.repo[BLOB_OID].hex, BLOB_HEX)
a = self.repo[BLOB_HEX]
self.assertEqual(b'a contents\n', a.read_raw())
self.assertEqual(BLOB_HEX, a.hex)
self.assertEqual(GIT_OBJ_BLOB, a.type)
def test_lookup_blob_prefix(self):
a = self.repo[BLOB_HEX[:5]]
self.assertEqual(b'a contents\n', a.read_raw())
self.assertEqual(BLOB_HEX, a.hex)
self.assertEqual(GIT_OBJ_BLOB, a.type)
def test_lookup_commit(self):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit = self.repo[commit_sha]
self.assertEqual(commit_sha, commit.hex)
self.assertEqual(GIT_OBJ_COMMIT, commit.type)
self.assertEqual(('Second test data commit.\n\n'
'This commit has some additional text.\n'),
commit.message)
def test_lookup_commit_prefix(self):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit_sha_prefix = commit_sha[:7]
too_short_prefix = commit_sha[:3]
commit = self.repo[commit_sha_prefix]
self.assertEqual(commit_sha, commit.hex)
self.assertEqual(GIT_OBJ_COMMIT, commit.type)
self.assertEqual(
('Second test data commit.\n\n'
'This commit has some additional text.\n'),
commit.message)
self.assertRaises(ValueError, self.repo.__getitem__, too_short_prefix)
def test_expand_id(self):
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
expanded = self.repo.expand_id(commit_sha[:7])
self.assertEqual(commit_sha, expanded.hex)
@unittest.skipIf(__pypy__ is not None, "skip refcounts checks in pypy")
def test_lookup_commit_refcount(self):
start = sys.getrefcount(self.repo)
commit_sha = '5fe808e8953c12735680c257f56600cb0de44b10'
commit = self.repo[commit_sha]
del commit
end = sys.getrefcount(self.repo)
self.assertEqual(start, end)
def test_get_path(self):
directory = realpath(self.repo.path)
expected = realpath(self.repo_path)
self.assertEqual(directory, expected)
def test_get_workdir(self):
self.assertEqual(self.repo.workdir, None)
def test_revparse_single(self):
parent = self.repo.revparse_single('HEAD^')
self.assertEqual(parent.hex, PARENT_SHA)
def test_hash(self):
data = "foobarbaz"
hashed_sha1 = pygit2.hash(data)
written_sha1 = self.repo.create_blob(data)
self.assertEqual(hashed_sha1, written_sha1)
def test_hashfile(self):
data = "bazbarfoo"
tempfile_path = tempfile.mkstemp()[1]
with open(tempfile_path, 'w') as fh:
fh.write(data)
hashed_sha1 = hashfile(tempfile_path)
os.unlink(tempfile_path)
written_sha1 = self.repo.create_blob(data)
self.assertEqual(hashed_sha1, written_sha1)
def test_conflicts_in_bare_repository(self):
def create_conflict_file(repo, branch, content):
oid = repo.create_blob(content.encode('utf-8'))
tb = repo.TreeBuilder()
tb.insert('conflict', oid, pygit2.GIT_FILEMODE_BLOB)
tree = tb.write()
sig = pygit2.Signature('Author', '[email protected]')
commit = repo.create_commit(branch.name, sig, sig,
'Conflict', tree, [branch.target])
self.assertIsNotNone(commit)
return commit
b1 = self.repo.create_branch('b1', self.repo.head.peel())
c1 = create_conflict_file(self.repo, b1, 'ASCII - abc')
b2 = self.repo.create_branch('b2', self.repo.head.peel())
c2 = create_conflict_file(self.repo, b2, 'Unicode - äüö')
index = self.repo.merge_commits(c1, c2)
self.assertIsNotNone(index.conflicts)
# ConflictCollection does not allow calling len(...) on it directly so
# we have to calculate length by iterating over its entries
self.assertEqual(sum(1 for _ in index.conflicts), 1)
(a, t, o) = index.conflicts['conflict']
diff = self.repo.merge_file_from_index(a, t, o)
self.assertEqual(diff, '''<<<<<<< conflict
ASCII - abc
=======
Unicode - äüö
>>>>>>> conflict
''')
class RepositoryTest_II(utils.RepoTestCase):
def test_is_empty(self):
self.assertFalse(self.repo.is_empty)
def test_is_bare(self):
self.assertFalse(self.repo.is_bare)
def test_get_path(self):
directory = realpath(self.repo.path)
expected = realpath(join(self.repo_path, '.git'))
self.assertEqual(directory, expected)
def test_get_workdir(self):
directory = realpath(self.repo.workdir)
expected = realpath(self.repo_path)
self.assertEqual(directory, expected)
def test_set_workdir(self):
directory = tempfile.mkdtemp()
self.repo.workdir = directory
self.assertEqual(realpath(self.repo.workdir), realpath(directory))
def test_checkout_ref(self):
ref_i18n = self.repo.lookup_reference('refs/heads/i18n')
# checkout i18n with conflicts and default strategy should
# not be possible
self.assertRaises(pygit2.GitError, self.repo.checkout, ref_i18n)
# checkout i18n with GIT_CHECKOUT_FORCE
head = self.repo.head
head = self.repo[head.target]
self.assertTrue('new' not in head.tree)
self.repo.checkout(ref_i18n, strategy=pygit2.GIT_CHECKOUT_FORCE)
head = self.repo.head
head = self.repo[head.target]
self.assertEqual(head.hex, ref_i18n.target.hex)
self.assertTrue('new' in head.tree)
self.assertTrue('bye.txt' not in self.repo.status())
def test_checkout_branch(self):
branch_i18n = self.repo.lookup_branch('i18n')
# checkout i18n with conflicts and default strategy should
# not be possible
self.assertRaises(pygit2.GitError, self.repo.checkout, branch_i18n)
# checkout i18n with GIT_CHECKOUT_FORCE
head = self.repo.head
head = self.repo[head.target]
self.assertTrue('new' not in head.tree)
self.repo.checkout(branch_i18n, strategy=pygit2.GIT_CHECKOUT_FORCE)
head = self.repo.head
head = self.repo[head.target]
self.assertEqual(head.hex, branch_i18n.target.hex)
self.assertTrue('new' in head.tree)
self.assertTrue('bye.txt' not in self.repo.status())
def test_checkout_index(self):
# some changes to working dir
with open(os.path.join(self.repo.workdir, 'hello.txt'), 'w') as f:
f.write('new content')
# checkout index
self.assertTrue('hello.txt' in self.repo.status())
self.repo.checkout(strategy=pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('hello.txt' not in self.repo.status())
def test_checkout_head(self):
# some changes to the index
with open(os.path.join(self.repo.workdir, 'bye.txt'), 'w') as f:
f.write('new content')
self.repo.index.add('bye.txt')
# checkout from index should not change anything
self.assertTrue('bye.txt' in self.repo.status())
self.repo.checkout(strategy=pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('bye.txt' in self.repo.status())
# checkout from head will reset index as well
self.repo.checkout('HEAD', strategy=pygit2.GIT_CHECKOUT_FORCE)
self.assertTrue('bye.txt' not in self.repo.status())
def test_checkout_alternative_dir(self):
ref_i18n = self.repo.lookup_reference('refs/heads/i18n')
extra_dir = os.path.join(self.repo.workdir, 'extra-dir')
os.mkdir(extra_dir)
self.assertTrue(len(os.listdir(extra_dir)) == 0)
self.repo.checkout(ref_i18n, directory=extra_dir)
self.assertFalse(len(os.listdir(extra_dir)) == 0)
def test_merge_base(self):
commit = self.repo.merge_base(
'5ebeeebb320790caf276b9fc8b24546d63316533',
'4ec4389a8068641da2d6578db0419484972284c8')
self.assertEqual(commit.hex,
'acecd5ea2924a4b900e7e149496e1f4b57976e51')
# Create a commit without any merge base to any other
sig = pygit2.Signature("me", "[email protected]")
indep = self.repo.create_commit(None, sig, sig, "a new root commit",
self.repo[commit].peel(pygit2.Tree).id, [])
self.assertEqual(None, self.repo.merge_base(indep, commit))
def test_ahead_behind(self):
ahead, behind = self.repo.ahead_behind('5ebeeebb320790caf276b9fc8b24546d63316533',
'4ec4389a8068641da2d6578db0419484972284c8')
self.assertEqual(1, ahead)
self.assertEqual(2, behind)
ahead, behind = self.repo.ahead_behind('4ec4389a8068641da2d6578db0419484972284c8',
'5ebeeebb320790caf276b9fc8b24546d63316533')
self.assertEqual(2, ahead)
self.assertEqual(1, behind)
def test_reset_hard(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_HARD)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
#Hard reset will reset the working copy too
self.assertFalse("hola mundo\n" in lines)
self.assertFalse("bonjour le monde\n" in lines)
def test_reset_soft(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_SOFT)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
#Soft reset will not reset the working copy
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
#soft reset will keep changes in the index
diff = self.repo.diff(cached=True)
self.assertRaises(KeyError, lambda: diff[0])
def test_reset_mixed(self):
ref = "5ebeeebb320790caf276b9fc8b24546d63316533"
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
self.repo.reset(
ref,
pygit2.GIT_RESET_MIXED)
self.assertEqual(self.repo.head.target.hex, ref)
with open(os.path.join(self.repo.workdir, "hello.txt")) as f:
lines = f.readlines()
#mixed reset will not reset the working copy
self.assertTrue("hola mundo\n" in lines)
self.assertTrue("bonjour le monde\n" in lines)
#mixed reset will set the index to match working copy
diff = self.repo.diff(cached=True)
self.assertTrue("hola mundo\n" in diff.patch)
self.assertTrue("bonjour le monde\n" in diff.patch)
class RepositorySignatureTest(utils.RepoTestCase):
def test_default_signature(self):
config = self.repo.config
config['user.name'] = 'Random J Hacker'
config['user.email'] ='[email protected]'
sig = self.repo.default_signature
self.assertEqual('Random J Hacker', sig.name)
self.assertEqual('[email protected]', sig.email)
class NewRepositoryTest(utils.NoRepoTestCase):
def test_new_repo(self):
repo = init_repository(self._temp_dir, False)
oid = repo.write(GIT_OBJ_BLOB, "Test")
self.assertEqual(type(oid), Oid)
assert os.path.exists(os.path.join(self._temp_dir, '.git'))
class InitRepositoryTest(utils.NoRepoTestCase):
# under the assumption that repo.is_bare works
def test_no_arg(self):
repo = init_repository(self._temp_dir)
self.assertFalse(repo.is_bare)
def test_pos_arg_false(self):
repo = init_repository(self._temp_dir, False)
self.assertFalse(repo.is_bare)
def test_pos_arg_true(self):
repo = init_repository(self._temp_dir, True)
self.assertTrue(repo.is_bare)
def test_keyword_arg_false(self):
repo = init_repository(self._temp_dir, bare=False)
self.assertFalse(repo.is_bare)
def test_keyword_arg_true(self):
repo = init_repository(self._temp_dir, bare=True)
self.assertTrue(repo.is_bare)
class DiscoverRepositoryTest(utils.NoRepoTestCase):
def test_discover_repo(self):
repo = init_repository(self._temp_dir, False)
subdir = os.path.join(self._temp_dir, "test1", "test2")
os.makedirs(subdir)
self.assertEqual(repo.path, discover_repository(subdir))
class EmptyRepositoryTest(utils.EmptyRepoTestCase):
def test_is_empty(self):
self.assertTrue(self.repo.is_empty)
def test_is_base(self):
self.assertFalse(self.repo.is_bare)
def test_head(self):
self.assertTrue(self.repo.head_is_unborn)
self.assertFalse(self.repo.head_is_detached)
class CloneRepositoryTest(utils.NoRepoTestCase):
def test_clone_repository(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(repo_path, self._temp_dir)
self.assertFalse(repo.is_empty)
self.assertFalse(repo.is_bare)
def test_clone_bare_repository(self):
repo_path = "./test/data/testrepo.git/"
repo = clone_repository(repo_path, self._temp_dir, bare=True)
self.assertFalse(repo.is_empty)
self.assertTrue(repo.is_bare)
def test_clone_repository_and_remote_callbacks(self):
src_repo_relpath = "./test/data/testrepo.git/"
repo_path = os.path.join(self._temp_dir, "clone-into")
url = 'file://' + os.path.realpath(src_repo_relpath)
def create_repository(path, bare):
return init_repository(path, bare)
# here we override the name
def create_remote(repo, name, url):
return repo.remotes.create("custom_remote", url)
repo = clone_repository(url, repo_path, repository=create_repository, remote=create_remote)
self.assertFalse(repo.is_empty)
self.assertTrue('refs/remotes/custom_remote/master' in repo.listall_references())
self.assertIsNotNone(repo.remotes["custom_remote"])
def test_clone_with_credentials(self):
repo = clone_repository(
"https://bitbucket.org/libgit2/testgitrepository.git",
self._temp_dir, callbacks=pygit2.RemoteCallbacks(credentials=pygit2.UserPass("libgit2", "libgit2")))
self.assertFalse(repo.is_empty)
def test_clone_with_checkout_branch(self):
# create a test case which isolates the remote
test_repo = clone_repository('./test/data/testrepo.git',
os.path.join(self._temp_dir, 'testrepo-orig.git'),
bare=True)
test_repo.create_branch('test', test_repo[test_repo.head.target])
repo = clone_repository(test_repo.path,
os.path.join(self._temp_dir, 'testrepo.git'),
checkout_branch='test', bare=True)
self.assertEqual(repo.lookup_reference('HEAD').target, 'refs/heads/test')
# FIXME The tests below are commented because they are broken:
#
# - test_clone_push_url: Passes, but does nothing useful.
#
# - test_clone_fetch_spec: Segfaults because of a bug in libgit2 0.19,
# this has been fixed already, so wait for 0.20
#
# - test_clone_push_spec: Passes, but does nothing useful.
#
# def test_clone_push_url(self):
# repo_path = "./test/data/testrepo.git/"
# repo = clone_repository(
# repo_path, self._temp_dir, push_url="custom_push_url"
# )
# self.assertFalse(repo.is_empty)
# # FIXME: When pygit2 supports retrieving the pushurl parameter,
# # enable this test
# # self.assertEqual(repo.remotes[0].pushurl, "custom_push_url")
# def test_clone_fetch_spec(self):
# repo_path = "./test/data/testrepo.git/"
# repo = clone_repository(repo_path, self._temp_dir,
# fetch_spec="refs/heads/test")
# self.assertFalse(repo.is_empty)
# # FIXME: When pygit2 retrieve the fetchspec we passed to git clone.
# # fetchspec seems to be going through, but the Repository class is
# # not getting it.
# # self.assertEqual(repo.remotes[0].fetchspec, "refs/heads/test")
# def test_clone_push_spec(self):
# repo_path = "./test/data/testrepo.git/"
# repo = clone_repository(repo_path, self._temp_dir,
# push_spec="refs/heads/test")
# self.assertFalse(repo.is_empty)
# # FIXME: When pygit2 supports retrieving the pushspec parameter,
# # enable this test
# # not sure how to test this either... couldn't find pushspec
# # self.assertEqual(repo.remotes[0].fetchspec, "refs/heads/test")
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| -5,062,015,847,481,946,000 | 4,899,534,802,172,312,000 | 37.078261 | 112 | 0.642293 | false |
joericearchitect/site-joe-rice-architect
|
devops/deployment/ansible/ec2-inventory/ec2.py
|
12
|
63782
|
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
import six
from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
pass
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# AWS credentials.
self.credentials = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'hostname_variable'):
self.hostname_variable = config.get('ec2', 'hostname_variable')
else:
self.hostname_variable = None
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include RDS cluster instances?
if config.has_option('ec2', 'include_rds_clusters'):
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
else:
self.include_rds_clusters = False
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument)
self.boto_profile = self.args.boto_profile
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# AWS credentials (prefer environment variables)
if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
os.environ.get('AWS_PROFILE')):
if config.has_option('credentials', 'aws_access_key_id'):
aws_access_key_id = config.get('credentials', 'aws_access_key_id')
else:
aws_access_key_id = None
if config.has_option('credentials', 'aws_secret_access_key'):
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
else:
aws_secret_access_key = None
if config.has_option('credentials', 'aws_security_token'):
aws_security_token = config.get('credentials', 'aws_security_token')
else:
aws_security_token = None
if aws_access_key_id:
self.credentials = {
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key
}
if aws_security_token:
self.credentials['security_token'] = aws_security_token
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_name = 'ansible-ec2'
aws_profile = lambda: (self.boto_profile or
os.environ.get('AWS_PROFILE') or
os.environ.get('AWS_ACCESS_KEY_ID') or
self.credentials.get('aws_access_key_id', None))
if aws_profile():
cache_name = '%s-%s' % (cache_name, aws_profile())
self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
self.cache_path_index = cache_dir + "/%s.index" % cache_name
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if config.has_option('ec2', 'expand_csv_tags'):
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
else:
self.expand_csv_tags = False
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
for instance_filter in filters:
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
if self.include_rds_clusters:
self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
# Pull the tags back in a second step
# AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
# reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
instance_ids = []
for reservation in reservations:
instance_ids.extend([instance.id for instance in reservation.instances])
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
tags_by_instance_id = defaultdict(dict)
for tag in tags:
tags_by_instance_id[tag.res_id][tag.name] = tag.value
for reservation in reservations:
for instance in reservation.instances:
instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
marker = None
while True:
instances = conn.get_all_dbinstances(marker=marker)
marker = instances.marker
for instance in instances:
self.add_rds_instance(instance, region)
if not marker:
break
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def include_rds_clusters_by_region(self, region):
if not HAS_BOTO3:
self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
"getting RDS clusters")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
marker, clusters = '', []
while marker is not None:
resp = client.describe_db_clusters(Marker=marker)
clusters.extend(resp["DBClusters"])
marker = resp.get('Marker', None)
account_id = boto.connect_iam().get_user().arn.split(':')[4]
c_dict = {}
for c in clusters:
# remove these datetime objects as there is no serialisation to json
# currently in place and we don't need the data yet
if 'EarliestRestorableTime' in c:
del c['EarliestRestorableTime']
if 'LatestRestorableTime' in c:
del c['LatestRestorableTime']
if self.ec2_instance_filters == {}:
matches_filter = True
else:
matches_filter = False
try:
# arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
tags = client.list_tags_for_resource(
ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
c['Tags'] = tags['TagList']
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
# get AWS tag key e.g. tag:env will be 'env'
tag_name = filter_key.split(":", 1)[1]
# Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
if matches_filter:
# it matches a filter, so stop looking for further matches
break
except Exception as e:
if e.message.find('DBInstanceNotFound') >= 0:
# AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
# Ignore errors when trying to find tags for these
pass
# ignore empty clusters caused by AWS bug
if len(c['DBClusterMembers']) == 0:
continue
elif matches_filter:
c_dict[c['DBClusterIdentifier']] = c
self.inventory['db_clusters'] = c_dict
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
else:
hostname = self.to_safe(hostname).lower()
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
hostname = self.to_safe(hostname).lower()
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
if self.expand_csv_tags and ',' in v:
v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {}
for k, v in value.items():
instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
|
apache-2.0
| 1,734,681,224,937,568,500 | -2,305,905,599,071,212,500 | 41.239735 | 152 | 0.594431 | false |
techtonik/readthedocs.org
|
readthedocs/vcs_support/base.py
|
34
|
4583
|
import logging
import os
import shutil
import subprocess
from collections import namedtuple
from os.path import basename
log = logging.getLogger(__name__)
class VCSVersion(object):
"""
Represents a Version (tag or branch) in a VCS.
This class should only be instantiated in BaseVCS subclasses.
It can act as a context manager to temporarily switch to this tag (eg to
build docs for this tag).
"""
def __init__(self, repository, identifier, verbose_name):
self.repository = repository
self.identifier = identifier
self.verbose_name = verbose_name
def __repr__(self):
return "<VCSVersion: %s:%s" % (self.repository.repo_url,
self.verbose_name)
class VCSProject(namedtuple("VCSProject",
"name default_branch working_dir repo_url")):
"""Transient object to encapsulate a projects stuff"""
pass
class BaseCLI(object):
"""
Helper class for CLI-heavy classes.
"""
log_tmpl = u'VCS[{name}:{ident}]: {args}'
def __call__(self, *args):
return self.run(args)
def run(self, *args):
"""
:param bits: list of command and args. See `subprocess` docs
"""
process = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.working_dir, shell=False,
env=self.env)
try:
log.info(self.log_tmpl.format(ident=basename(self.working_dir),
name=self.name,
args=' '.join(args)))
except UnicodeDecodeError:
# >:x
pass
stdout, stderr = process.communicate()
try:
log.info(self.log_tmpl.format(ident=basename(self.working_dir),
name=self.name,
args=stdout))
except UnicodeDecodeError:
# >:x
pass
return (process.returncode, stdout, stderr)
@property
def env(self):
return os.environ.copy()
class BaseVCS(BaseCLI):
"""
Base for VCS Classes.
Built on top of the BaseCLI.
"""
supports_tags = False # Whether this VCS supports tags or not.
supports_branches = False # Whether this VCS supports branches or not.
# =========================================================================
# General methods
# =========================================================================
def __init__(self, project, version, **kwargs):
self.default_branch = project.default_branch
self.name = project.name
self.repo_url = project.repo_url
self.working_dir = project.working_dir
def check_working_dir(self):
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
def make_clean_working_dir(self):
"Ensures that the working dir exists and is empty"
shutil.rmtree(self.working_dir, ignore_errors=True)
self.check_working_dir()
def update(self):
"""
If self.working_dir is already a valid local copy of the repository,
update the repository, else create a new local copy of the repository.
"""
self.check_working_dir()
# =========================================================================
# Tag / Branch related methods
# These methods only apply if supports_tags = True and/or
# support_branches = True
# =========================================================================
@property
def tags(self):
"""
Returns a list of VCSVersion objects. See VCSVersion for more
information.
"""
raise NotImplementedError
@property
def branches(self):
"""
Returns a list of VCSVersion objects. See VCSVersion for more
information.
"""
raise NotImplementedError
@property
def commit(self):
"""
Returns a string representing the current commit.
"""
raise NotImplementedError
def checkout(self, identifier=None):
"""
Set the state to the given identifier.
If identifier is None, checkout to the latest revision.
The type and format of identifier may change from VCS to VCS, so each
backend is responsible to understand it's identifiers.
"""
self.check_working_dir()
|
mit
| -7,357,364,368,956,136,000 | 409,669,555,986,314,200 | 29.966216 | 79 | 0.538075 | false |
mybios/angle
|
src/tests/deqp_tests/generate_deqp_tests.py
|
24
|
1253
|
import os
import re
import sys
def ReadFileAsLines(filename):
"""Reads a file, removing blank lines and lines that start with #"""
file = open(filename, "r")
raw_lines = file.readlines()
file.close()
lines = []
for line in raw_lines:
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
lines.append(line)
return lines
def GetCleanTestName(testName):
replacements = { "dEQP-": "", ".*": "", ".":"_", }
cleanName = testName
for replaceKey in replacements:
cleanName = cleanName.replace(replaceKey, replacements[replaceKey])
return cleanName
def GenerateTests(outFile, testNames):
''' Remove duplicate tests '''
testNames = list(set(testNames))
outFile.write("#include \"deqp_tests.h\"\n\n")
for test in testNames:
outFile.write("TEST(deqp, " + GetCleanTestName(test) + ")\n")
outFile.write("{\n")
outFile.write(" RunDEQPTest(\"" + test + "\", GetCurrentConfig());\n")
outFile.write("}\n\n")
def main(argv):
tests = ReadFileAsLines(argv[0])
output = open(argv[1], 'wb')
GenerateTests(output, tests)
output.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
bsd-3-clause
| 3,399,387,491,086,999,000 | -8,113,941,366,991,162,000 | 27.477273 | 81 | 0.605746 | false |
pepeportela/edx-platform
|
lms/djangoapps/verify_student/tests/test_fake_software_secure.py
|
10
|
2751
|
"""
Tests for the fake software secure response.
"""
from django.test import TestCase
from mock import patch
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
class SoftwareSecureFakeViewTest(UrlResetMixin, TestCase):
"""
Base class to test the fake software secure view.
"""
URLCONF_MODULES = ['verify_student.urls']
def setUp(self, **kwargs):
enable_software_secure_fake = kwargs.get('enable_software_secure_fake', False)
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_SOFTWARE_SECURE_FAKE': enable_software_secure_fake}):
super(SoftwareSecureFakeViewTest, self).setUp()
self.user = UserFactory.create(username="test", password="test")
self.attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
self.client.login(username="test", password="test")
class SoftwareSecureFakeViewDisabledTest(SoftwareSecureFakeViewTest):
"""
Test the fake software secure response when feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is not enabled.
"""
def setUp(self):
super(SoftwareSecureFakeViewDisabledTest, self).setUp(enable_software_secure_fake=False)
def test_get_method_without_enable_feature_flag(self):
"""
Test that the user gets 404 response if the feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is not enabled.
"""
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 404)
class SoftwareSecureFakeViewEnabledTest(SoftwareSecureFakeViewTest):
"""
Test the fake software secure response when feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is enabled.
"""
def setUp(self):
super(SoftwareSecureFakeViewEnabledTest, self).setUp(enable_software_secure_fake=True)
def test_get_method_without_logged_in_user(self):
"""
Test that the user gets 302 response if that user is not logged in.
"""
self.client.logout()
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 302)
def test_get_method(self):
"""
Test that GET method of fake software secure view uses the most recent
attempt for the logged-in user.
"""
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 200)
self.assertIn('EdX-ID', response.content)
self.assertIn('results_callback', response.content)
|
agpl-3.0
| 3,818,870,972,470,027,300 | 6,411,858,550,511,274,000 | 33.822785 | 119 | 0.685932 | false |
SomethingExplosive/android_external_chromium_org
|
third_party/tlslite/tlslite/Checker.py
|
359
|
6301
|
"""Class for post-handshake certificate checking."""
from utils.cryptomath import hashAndBase64
from X509 import X509
from X509CertChain import X509CertChain
from errors import *
class Checker:
"""This class is passed to a handshake function to check the other
party's certificate chain.
If a handshake function completes successfully, but the Checker
judges the other party's certificate chain to be missing or
inadequate, a subclass of
L{tlslite.errors.TLSAuthenticationError} will be raised.
Currently, the Checker can check either an X.509 or a cryptoID
chain (for the latter, cryptoIDlib must be installed).
"""
def __init__(self, cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
checkResumedSession=False):
"""Create a new Checker instance.
You must pass in one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
@type cryptoID: str
@param cryptoID: A cryptoID which the other party's certificate
chain must match. The cryptoIDlib module must be installed.
Mutually exclusive with all of the 'x509...' arguments.
@type protocol: str
@param protocol: A cryptoID protocol URI which the other
party's certificate chain must match. Requires the 'cryptoID'
argument.
@type x509Fingerprint: str
@param x509Fingerprint: A hex-encoded X.509 end-entity
fingerprint which the other party's end-entity certificate must
match. Mutually exclusive with the 'cryptoID' and
'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed. Mutually exclusive with the 'cryptoID' and
'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type checkResumedSession: bool
@param checkResumedSession: If resumed sessions should be
checked. This defaults to False, on the theory that if the
session was checked once, we don't need to bother
re-checking it.
"""
if cryptoID and (x509Fingerprint or x509TrustList):
raise ValueError()
if x509Fingerprint and x509TrustList:
raise ValueError()
if x509CommonName and not x509TrustList:
raise ValueError()
if protocol and not cryptoID:
raise ValueError()
if cryptoID:
import cryptoIDlib #So we raise an error here
if x509TrustList:
import cryptlib_py #So we raise an error here
self.cryptoID = cryptoID
self.protocol = protocol
self.x509Fingerprint = x509Fingerprint
self.x509TrustList = x509TrustList
self.x509CommonName = x509CommonName
self.checkResumedSession = checkResumedSession
def __call__(self, connection):
"""Check a TLSConnection.
When a Checker is passed to a handshake function, this will
be called at the end of the function.
@type connection: L{tlslite.TLSConnection.TLSConnection}
@param connection: The TLSConnection to examine.
@raise tlslite.errors.TLSAuthenticationError: If the other
party's certificate chain is missing or bad.
"""
if not self.checkResumedSession and connection.resumed:
return
if self.cryptoID or self.x509Fingerprint or self.x509TrustList:
if connection._client:
chain = connection.session.serverCertChain
else:
chain = connection.session.clientCertChain
if self.x509Fingerprint or self.x509TrustList:
if isinstance(chain, X509CertChain):
if self.x509Fingerprint:
if chain.getFingerprint() != self.x509Fingerprint:
raise TLSFingerprintError(\
"X.509 fingerprint mismatch: %s, %s" % \
(chain.getFingerprint(), self.x509Fingerprint))
else: #self.x509TrustList
if not chain.validate(self.x509TrustList):
raise TLSValidationError("X.509 validation failure")
if self.x509CommonName and \
(chain.getCommonName() != self.x509CommonName):
raise TLSAuthorizationError(\
"X.509 Common Name mismatch: %s, %s" % \
(chain.getCommonName(), self.x509CommonName))
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError()
elif self.cryptoID:
import cryptoIDlib.CertChain
if isinstance(chain, cryptoIDlib.CertChain.CertChain):
if chain.cryptoID != self.cryptoID:
raise TLSFingerprintError(\
"cryptoID mismatch: %s, %s" % \
(chain.cryptoID, self.cryptoID))
if self.protocol:
if not chain.checkProtocol(self.protocol):
raise TLSAuthorizationError(\
"cryptoID protocol mismatch")
if not chain.validate():
raise TLSValidationError("cryptoID validation failure")
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError()
|
bsd-3-clause
| 5,545,581,902,395,710,000 | -5,189,829,511,655,133,000 | 42.157534 | 80 | 0.607364 | false |
ninapavlich/lesleyloraine
|
lesleyloraine/apps/email/migrations/0002_auto_20160205_2337.py
|
3
|
1436
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('email', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='emailcategory',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
migrations.AlterField(
model_name='emailcategorysubscriptionsettings',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
migrations.AlterField(
model_name='emailreceipt',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
migrations.AlterField(
model_name='emailtemplate',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
migrations.AlterField(
model_name='usersubscriptionsettings',
name='admin_note',
field=models.TextField(help_text=b'Not publicly visible', null=True, verbose_name='admin note', blank=True),
),
]
|
mit
| -8,843,042,579,871,244,000 | 8,097,073,594,717,022,000 | 35.820513 | 120 | 0.606546 | false |
arifsetiawan/edx-platform
|
lms/djangoapps/courseware/features/conditional.py
|
102
|
4723
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, steps
from nose.tools import assert_in, assert_true # pylint: disable=no-name-in-module
from common import i_am_registered_for_the_course, visit_scenario_item
from problems_setup import add_problem_to_course, answer_problem
@steps
class ConditionalSteps(object):
COURSE_NUM = 'test_course'
def setup_conditional(self, step, condition_type, condition, cond_value):
r'that a course has a Conditional conditioned on (?P<condition_type>\w+) (?P<condition>\w+)=(?P<cond_value>\w+)$'
i_am_registered_for_the_course(step, self.COURSE_NUM)
world.scenario_dict['VERTICAL'] = world.ItemFactory(
parent_location=world.scenario_dict['SECTION'].location,
category='vertical',
display_name="Test Vertical",
)
world.scenario_dict['WRAPPER'] = world.ItemFactory(
parent_location=world.scenario_dict['VERTICAL'].location,
category='wrapper',
display_name="Test Poll Wrapper"
)
if condition_type == 'problem':
world.scenario_dict['CONDITION_SOURCE'] = add_problem_to_course(self.COURSE_NUM, 'string')
elif condition_type == 'poll':
world.scenario_dict['CONDITION_SOURCE'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='poll_question',
display_name='Conditional Poll',
data={
'question': 'Is this a good poll?',
'answers': [
{'id': 'yes', 'text': 'Yes, of course'},
{'id': 'no', 'text': 'Of course not!'}
],
}
)
else:
raise Exception("Unknown condition type: {!r}".format(condition_type))
metadata = {
'xml_attributes': {
condition: cond_value
}
}
world.scenario_dict['CONDITIONAL'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='conditional',
display_name="Test Conditional",
metadata=metadata,
sources_list=[world.scenario_dict['CONDITION_SOURCE'].location],
)
world.ItemFactory(
parent_location=world.scenario_dict['CONDITIONAL'].location,
category='html',
display_name='Conditional Contents',
data='<html><div class="hidden-contents">Hidden Contents</p></html>'
)
def setup_problem_attempts(self, step, not_attempted=None):
r'that the conditioned problem has (?P<not_attempted>not )?been attempted$'
visit_scenario_item('CONDITION_SOURCE')
if not_attempted is None:
answer_problem(self.COURSE_NUM, 'string', True)
world.css_click("button.check")
def when_i_view_the_conditional(self, step):
r'I view the conditional$'
visit_scenario_item('CONDITIONAL')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Conditional]").data("initialized")')
def check_visibility(self, step, visible):
r'the conditional contents are (?P<visible>\w+)$'
world.wait_for_ajax_complete()
assert_in(visible, ('visible', 'hidden'))
if visible == 'visible':
world.wait_for_visible('.hidden-contents')
assert_true(world.css_visible('.hidden-contents'))
else:
assert_true(world.is_css_not_present('.hidden-contents'))
assert_true(
world.css_contains_text(
'.conditional-message',
'must be attempted before this will become visible.'
)
)
def answer_poll(self, step, answer):
r' I answer the conditioned poll "([^"]*)"$'
visit_scenario_item('CONDITION_SOURCE')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Poll]").data("initialized")')
world.wait_for_ajax_complete()
answer_text = [
poll_answer['text']
for poll_answer
in world.scenario_dict['CONDITION_SOURCE'].answers
if poll_answer['id'] == answer
][0]
text_selector = '.poll_answer .text'
poll_texts = world.retry_on_exception(
lambda: [elem.text for elem in world.css_find(text_selector)]
)
for idx, poll_text in enumerate(poll_texts):
if poll_text == answer_text:
world.css_click(text_selector, index=idx)
return
ConditionalSteps()
|
agpl-3.0
| -6,081,359,066,611,955,000 | -6,481,192,810,695,124,000 | 36.188976 | 121 | 0.580563 | false |
lucywyman/slides
|
source/source/conf.py
|
1
|
9886
|
# -*- coding: utf-8 -*-
#
# slides documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 1 21:05:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'rst2pdf.pdfbuilder',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'slides'
copyright = u'2015, l'
author = u'l'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2015.07.01'
# The full version, including alpha/beta/rc tags.
release = '2015.07.01'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['./static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
#html_style = 'styles.css'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'slidesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
#latex_documents = [
# (master_doc, 'slides.tex', u'slides Documentation',
# u'l', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# (master_doc, 'slides', u'slides Documentation',
# [author], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# (master_doc, 'slides', u'slides Documentation',
# author, 'slides', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Hieroglyph Slide Configuration ------------
extensions += [
'hieroglyph',
]
slide_title = 'slides'
slide_theme = 'single-level'
slide_levels = 3
# Place custom static assets in the static directory and uncomment
# the following lines to include them
slide_theme_options = {
# 'custom_css': 'custom.css',
# 'custom_js': 'custom.js',
}
# ----------------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
mit
| 3,677,548,235,442,005,000 | 3,304,223,805,052,963,000 | 29.990596 | 79 | 0.698867 | false |
bw2/gemini
|
gemini/vep.py
|
4
|
11612
|
#!/usr/bin/env python
#############
# CSQ: Consequence|Codons|Amino_acids|Gene|hgnc|Feature|EXON|polyphen|sift|Protein_position|BIOTYPE
# missense_variant|gAg/gTg|E/V|ENSG00000188157||ENST00000379370|12/36|probably_damaging(0.932)|deleterious(0.02)|728/2045_protein_coding
# nc_transcript_variant|||ENSG00000116254|CHD5|ENST00000491020|5/6|||||
#############
from collections import defaultdict, namedtuple
import itertools
class EffectDetails(object):
def __init__(self, impact_string, severity, detail_string, counter, labels):
fields = self._prep_fields(detail_string, labels)
self.effect_severity = severity
self.effect_name = impact_string
self.anno_id = counter
fields.pop("consequence", None)
self.codon_change = fields.pop("codons", None)
self.aa_change = fields.pop("amino_acids", None)
self.ensembl_gene = fields.pop("gene", None)
self.hgnc = fields.pop("symbol", None)
self.gene = self.hgnc or self.ensembl_gene
self.transcript = fields.pop("feature", None)
self.exon = fields.pop("exon", None)
self.polyphen = fields.pop("polyphen", None)
self.sift = fields.pop("sift", None)
self.aa_length = fields.pop("protein_position", None)
self.biotype = fields.pop("biotype", None)
self.warnings = fields.pop("warning", None)
self.extra_fields = {"vep_%s" % k: v for k, v in fields.items()}
self.consequence = effect_dict[self.effect_name] if self.effect_severity is not None else self.effect_name
self.so = self.effect_name # VEP impacts are SO by default
# rules for being exonic.
# 1. the impact must be in the list of exonic impacts
# 3. must be protein_coding
self.is_exonic = 0
if self.effect_name in exonic_impacts and \
self.biotype == "protein_coding":
self.is_exonic = 1
# rules for being loss-of-function (lof).
# must be protein_coding
# must be a coding variant with HIGH impact
if self.effect_severity == "HIGH" and self.biotype == "protein_coding":
self.is_lof = 1
else:
self.is_lof = 0
# Rules for being coding
# must be protein_coding
# Exonic but not UTR's
if self.is_exonic and not (self.effect_name == "5_prime_UTR_variant" or
self.effect_name == "3_prime_UTR_variant"):
self.is_coding = 1
else:
self.is_coding = 0
# parse Polyphen predictions
if self.polyphen is not None:
self.polyphen_b = self.polyphen.split("(")
self.polyphen_pred = self.polyphen_b[0]
self.polyphen2 = self.polyphen_b[1].split(")")
self.polyphen_score = self.polyphen2[0]
else:
self.polyphen_pred = None
self.polyphen_score = None
# parse SIFT predictions
if self.sift is not None:
self.sift_b = self.sift.split("(")
self.sift_pred = self.sift_b[0]
self.sift2 = self.sift_b[1].split(")")
self.sift_score = self.sift2[0]
else:
self.sift_pred = None
self.sift_score = None
def _prep_fields(self, detail_string, labels):
"""Prepare a dictionary mapping labels to provided fields in the consequence.
"""
out = {}
for key, val in itertools.izip_longest(labels, detail_string.split("|")):
if val and val.strip():
if key is None:
out["warnings"] = val.strip()
else:
out[key.strip().lower()] = val.strip()
return out
def __str__(self):
return "\t".join([self.consequence, self.effect_severity, str(self.codon_change),
str(self.aa_change), str(self.aa_length), str(self.biotype),
str(self.ensembl_gene), str(self.gene), str(self.transcript),
str(self.exon), str(self.is_exonic), str(self.anno_id), str(self.polyphen_pred),
str(self.polyphen_score), str(self.sift_pred), str(self.sift_score),
str(self.is_coding), str(self.is_lof), str(self.so)])
def __repr__(self):
return self.__str__()
exonic_impacts = ["stop_gained",
"stop_lost",
"frameshift_variant",
"initiator_codon_variant",
"inframe_deletion",
"inframe_insertion",
"missense_variant",
"incomplete_terminal_codon_variant",
"stop_retained_variant",
"synonymous_variant",
"coding_sequence_variant",
"5_prime_UTR_variant",
"3_prime_UTR_variant",
"transcript_ablation",
"transcript_amplification",
"feature_elongation",
"feature_truncation"]
effect_names = ["splice_acceptor_variant", "splice_donor_variant",
"stop_gained", "stop_lost",
"non_coding_exon_variant", "frameshift_variant",
"initiator_codon_variant", "inframe_deletion",
"inframe_insertion", "missense_variant",
"splice_region_variant", "incomplete_terminal_codon_variant",
"stop_retained_variant", "synonymous_variant",
"coding_sequence_variant", "mature_miRNA_variant",
"5_prime_UTR_variant", "3_prime_UTR_variant",
"intron_variant", "NMD_transcript_variant",
"nc_transcript_variant", "upstream_gene_variant",
"downstream_gene_variant", "regulatory_region_variant",
"TF_binding_site_variant", "intergenic_variant",
"regulatory_region_ablation", "regulatory_region_amplification",
"transcript_ablation", "transcript_amplification",
"TFBS_ablation", "TFBS_amplification",
"feature_elongation", "feature_truncation"]
effect_dict = defaultdict()
effect_dict = {
'splice_acceptor_variant': 'splice_acceptor', 'splice_donor_variant': 'splice_donor',
'stop_gained': 'stop_gain', 'stop_lost': 'stop_loss',
'non_coding_exon_variant': 'nc_exon', 'frameshift_variant': 'frame_shift',
'initiator_codon_variant': 'transcript_codon_change', 'inframe_deletion': 'inframe_codon_loss',
'inframe_insertion': 'inframe_codon_gain', 'missense_variant': 'non_syn_coding',
'splice_region_variant': 'splice_region', 'incomplete_terminal_codon_variant': 'incomplete_terminal_codon',
'stop_retained_variant': 'synonymous_stop', 'synonymous_variant': 'synonymous_coding',
'coding_sequence_variant': 'CDS', 'mature_miRNA_variant': 'mature_miRNA',
'5_prime_UTR_variant': 'UTR_5_prime', '3_prime_UTR_variant': 'UTR_3_prime',
'intron_variant': 'intron', 'NMD_transcript_variant': 'NMD_transcript',
'nc_transcript_variant': 'nc_transcript', 'upstream_gene_variant': 'upstream',
'downstream_gene_variant': 'downstream', 'regulatory_region_variant': 'regulatory_region',
'TF_binding_site_variant': 'TF_binding_site', 'intergenic_variant': 'intergenic',
'regulatory_region_ablation': 'regulatory_region_ablation', 'regulatory_region_amplification': 'regulatory_region_amplification',
'transcript_ablation': 'transcript_ablation', 'transcript_amplification': 'transcript_amplification',
'TFBS_ablation': 'TFBS_ablation', 'TFBS_amplification': 'TFBS_amplification',
'feature_elongation': 'feature_elongation', 'feature_truncation': 'feature_truncation'}
effect_desc = ["The variant hits the splice acceptor site (2 basepair region at 3' end of an intron)", "The variant hits the splice donor site (2 basepair region at 5'end of an intron)",
"Variant causes a STOP codon", "Variant causes stop codon to be mutated into a non-stop codon",
"Variant causes a change in the non coding exon sequence", "Insertion or deletion causes a frame shift in coding sequence",
"Variant causes atleast one base change in the first codon of a transcript", "An inframe non-syn variant that deletes bases from the coding sequence",
"An inframe non-syn variant that inserts bases in the coding sequence", "The variant causes a different amino acid in the coding sequence",
"Variant causes a change within the region of a splice site (1-3bps into an exon or 3-8bps into an intron)", "The variant hits the incomplete codon of a transcript whose end co-ordinate is not known",
"The variant causes stop codon to be mutated into another stop codon", "The variant causes no amino acid change in coding sequence",
"Variant hits coding sequence with indeterminate effect", "The variant hits a microRNA",
"Variant hits the 5 prime untranslated region", "Variant hits the 3 prime untranslated region",
"Variant hits an intron", "A variant hits a transcript that is predicted to undergo nonsense mediated decay",
"Variant hits a gene that does not code for a protein", "The variant hits upstream of a gene (5' of a gene)",
"The variant hits downstream of a gene (3' of a gene)", "Variant hits the regulatory region annotated by Ensembl(e.g promoter)",
"Variant falls in a transcription factor binding motif within an Ensembl regulatory region", "The variant is located in the intergenic region, between genes",
"SV causes ablation of a regulatory region", "SV results in an amplification of a regulatory region",
"SV causes an ablation/deletion of a transcript feature", "SV causes an amplification of a transcript feature",
"SV results in a deletion of the TFBS", "SV results in an amplification of a region containing TFBS",
"SV causes an extension of a genomic feature wrt reference", "SV causes a reduction of a genomic feature compared to reference"]
effect_priorities = ["HIGH", "HIGH",
"HIGH", "HIGH",
"LOW", "HIGH",
"HIGH", "MED",
"MED", "MED",
"MED", "LOW",
"LOW", "LOW",
"LOW", "MED",
"LOW", "LOW",
"LOW", "LOW",
"LOW", "LOW",
"LOW", "MED",
"MED", "LOW",
"MED", "MED",
"LOW", "LOW",
"MED", "MED",
"LOW", "LOW"]
effect_priority_codes = [1, 1,
1, 1,
3, 1,
1, 2,
2, 2,
2, 3,
3, 3,
3, 2,
3, 3,
3, 3,
3, 3,
3, 2,
2, 3,
2, 2,
3, 3,
2, 2,
3, 3]
effect_ids = range(1, 35)
effect_map = {}
EffectInfo = namedtuple(
'EffectInfo', ['id', 'priority', 'priority_code', 'desc'])
for i, effect_name in enumerate(effect_names):
info = EffectInfo(effect_ids[i], effect_priorities[i],
effect_priority_codes[i], effect_desc[i])
effect_map[effect_name] = info
|
mit
| -1,276,381,186,029,272,600 | 5,394,649,785,589,117,000 | 50.839286 | 215 | 0.574061 | false |
MartijnBraam/CouchPotatoServer
|
libs/rtorrent/err.py
|
182
|
1638
|
# Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.common import convert_version_tuple_to_str
class RTorrentVersionError(Exception):
def __init__(self, min_version, cur_version):
self.min_version = min_version
self.cur_version = cur_version
self.msg = "Minimum version required: {0}".format(
convert_version_tuple_to_str(min_version))
def __str__(self):
return(self.msg)
class MethodError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return(self.msg)
|
gpl-3.0
| -4,230,582,944,486,372,400 | 7,623,512,675,236,657,000 | 39.95 | 72 | 0.729548 | false |
Panos512/invenio
|
modules/miscutil/lib/upgrades/invenio_2014_11_04_format_recjson.py
|
5
|
1452
|
## -*- mode: python; coding: utf-8; -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Add new format `recjson` to format table."""
from invenio.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
"""Upgrade recipe information."""
return "New format recjson to format table."
def do_upgrade():
"""Upgrade recipe procedure."""
if not run_sql("SELECT id FROM format WHERE code='recjson'"):
run_sql("INSERT INTO format "
"(name,code,description,content_type,visibility) "
"VALUES ('recjson','recjson', 'recjson record representation',"
"'application/json', 0)")
def estimate():
"""Upgrade recipe time estimate."""
return 1
|
gpl-2.0
| -1,004,371,898,769,867,800 | 2,959,887,956,501,866,000 | 32.767442 | 79 | 0.680441 | false |
spthaolt/VTK
|
Examples/Modelling/Python/expCos.py
|
8
|
2383
|
#!/usr/bin/env python
# This example demonstrates how to use a programmable filter and how
# to use the special vtkDataSetToDataSet::GetOutputPort() methods
import vtk
from math import *
# We create a 100 by 100 point plane to sample
plane = vtk.vtkPlaneSource()
plane.SetXResolution(100)
plane.SetYResolution(100)
# We transform the plane by a factor of 10 on X and Y
transform = vtk.vtkTransform()
transform.Scale(10, 10, 1)
transF = vtk.vtkTransformPolyDataFilter()
transF.SetInputConnection(plane.GetOutputPort())
transF.SetTransform(transform)
# Compute Bessel function and derivatives. We'll use a programmable filter
# for this. Note the unusual GetPolyDataInput() & GetOutputPort() methods.
besselF = vtk.vtkProgrammableFilter()
besselF.SetInputConnection(transF.GetOutputPort())
# The SetExecuteMethod takes a Python function as an argument
# In here is where all the processing is done.
def bessel():
input = besselF.GetPolyDataInput()
numPts = input.GetNumberOfPoints()
newPts = vtk.vtkPoints()
derivs = vtk.vtkFloatArray()
for i in range(0, numPts):
x = input.GetPoint(i)
x0, x1 = x[:2]
r = sqrt(x0*x0+x1*x1)
x2 = exp(-r)*cos(10.0*r)
deriv = -exp(-r)*(cos(10.0*r)+10.0*sin(10.0*r))
newPts.InsertPoint(i, x0, x1, x2)
derivs.InsertValue(i, deriv)
besselF.GetPolyDataOutput().CopyStructure(input)
besselF.GetPolyDataOutput().SetPoints(newPts)
besselF.GetPolyDataOutput().GetPointData().SetScalars(derivs)
besselF.SetExecuteMethod(bessel)
# We warp the plane based on the scalar values calculated above
warp = vtk.vtkWarpScalar()
warp.SetInput(besselF.GetPolyDataOutput())
warp.XYPlaneOn()
warp.SetScaleFactor(0.5)
# We create a mapper and actor as usual. In the case we adjust the
# scalar range of the mapper to match that of the computed scalars
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(warp.GetPolyDataOutput())
mapper.SetScalarRange(besselF.GetPolyDataOutput().GetScalarRange())
carpet = vtk.vtkActor()
carpet.SetMapper(mapper)
# Create the RenderWindow, Renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(carpet)
renWin.SetSize(500, 500)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.5)
iren.Initialize()
renWin.Render()
iren.Start()
|
bsd-3-clause
| 3,037,394,601,680,233,500 | 6,110,747,293,109,646,000 | 28.419753 | 74 | 0.744859 | false |
Karaage-Cluster/karaage
|
karaage/projects/utils.py
|
2
|
1729
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from karaage.machines.models import Account
from karaage.projects.models import Project
def add_user_to_project(person, project):
if not person.has_account():
Account.create(person, project)
project.group.members.add(person)
def remove_user_from_project(person, project):
project.group.members.remove(person)
def get_new_pid(institute):
""" Return a new Project ID
Keyword arguments:
institute_id -- Institute id
"""
number = '0001'
prefix = 'p%s' % institute.name.replace(' ', '')[:4]
found = True
while found:
try:
Project.objects.get(pid=prefix + number)
number = str(int(number) + 1)
if len(number) == 1:
number = '000' + number
elif len(number) == 2:
number = '00' + number
elif len(number) == 3:
number = '0' + number
except Project.DoesNotExist:
found = False
return prefix + number
|
gpl-3.0
| -4,947,836,999,311,023,000 | -2,620,025,622,683,408,400 | 30.436364 | 70 | 0.658762 | false |
Omegaphora/external_chromium_org_tools_gyp
|
pylib/gyp/xcode_ninja.py
|
22
|
10034
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp)
return (new_target_list, new_target_dicts, new_data)
|
bsd-3-clause
| 2,027,793,057,739,907,000 | 8,504,809,502,094,531,000 | 37.741313 | 80 | 0.669524 | false |
samklr/spark-timeseries
|
python/sparkts/test/test_timeseriesrdd.py
|
6
|
5407
|
from test_utils import PySparkTestCase
from sparkts.timeseriesrdd import *
from sparkts.timeseriesrdd import _TimeSeriesSerializer
from sparkts.datetimeindex import *
import pandas as pd
import numpy as np
from unittest import TestCase
from io import BytesIO
from pyspark.sql import SQLContext
class TimeSeriesSerializerTestCase(TestCase):
def test_times_series_serializer(self):
serializer = _TimeSeriesSerializer()
stream = BytesIO()
series = [('abc', np.array([4.0, 4.0, 5.0])), ('123', np.array([1.0, 2.0, 3.0]))]
serializer.dump_stream(iter(series), stream)
stream.seek(0)
reconstituted = list(serializer.load_stream(stream))
self.assertEquals(reconstituted[0][0], series[0][0])
self.assertEquals(reconstituted[1][0], series[1][0])
self.assertTrue((reconstituted[0][1] == series[0][1]).all())
self.assertTrue((reconstituted[1][1] == series[1][1]).all())
class TimeSeriesRDDTestCase(PySparkTestCase):
def test_time_series_rdd(self):
freq = DayFrequency(1, self.sc)
start = '2015-04-09'
dt_index = uniform(start, periods=10, freq=freq, sc=self.sc)
vecs = [np.arange(0, 10), np.arange(10, 20), np.arange(20, 30)]
rdd = self.sc.parallelize(vecs).map(lambda x: (str(x[0]), x))
tsrdd = TimeSeriesRDD(dt_index, rdd)
self.assertEquals(tsrdd.count(), 3)
contents = tsrdd.collectAsMap()
self.assertEquals(len(contents), 3)
self.assertTrue((contents["0"] == np.arange(0, 10)).all())
self.assertTrue((contents["10"] == np.arange(10, 20)).all())
self.assertTrue((contents["20"] == np.arange(20, 30)).all())
subslice = tsrdd['2015-04-10':'2015-04-15']
self.assertEquals(subslice.index(), uniform('2015-04-10', periods=6, freq=freq, sc=self.sc))
contents = subslice.collectAsMap()
self.assertEquals(len(contents), 3)
self.assertTrue((contents["0"] == np.arange(1, 7)).all())
self.assertTrue((contents["10"] == np.arange(11, 17)).all())
self.assertTrue((contents["20"] == np.arange(21, 27)).all())
def test_to_instants(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
samples = tsrdd.to_instants().collect()
target_dates = ['2015-4-9', '2015-4-10', '2015-4-11', '2015-4-12']
self.assertEquals([x[0] for x in samples], [pd.Timestamp(x) for x in target_dates])
self.assertTrue((samples[0][1] == np.arange(0, 20, 4)).all())
self.assertTrue((samples[1][1] == np.arange(1, 20, 4)).all())
self.assertTrue((samples[2][1] == np.arange(2, 20, 4)).all())
self.assertTrue((samples[3][1] == np.arange(3, 20, 4)).all())
def test_to_observations(self):
sql_ctx = SQLContext(self.sc)
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
print(dt_index._jdt_index.size())
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
obsdf = tsrdd.to_observations_dataframe(sql_ctx)
tsrdd_from_df = time_series_rdd_from_observations( \
dt_index, obsdf, 'timestamp', 'key', 'value')
ts1 = tsrdd.collect()
ts1.sort(key = lambda x: x[0])
ts2 = tsrdd_from_df.collect()
ts2.sort(key = lambda x: x[0])
self.assertTrue(all([pair[0][0] == pair[1][0] and (pair[0][1] == pair[1][1]).all() \
for pair in zip(ts1, ts2)]))
df1 = obsdf.collect()
df1.sort(key = lambda x: x.value)
df2 = tsrdd_from_df.to_observations_dataframe(sql_ctx).collect()
df2.sort(key = lambda x: x.value)
self.assertEquals(df1, df2)
def test_filter(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
filtered = tsrdd.filter(lambda x: x[0] == 'a' or x[0] == 'b')
self.assertEquals(filtered.count(), 2)
# assert it has TimeSeriesRDD functionality:
filtered['2015-04-10':'2015-04-15'].count()
def test_to_pandas_series_rdd(self):
vecs = [np.arange(x, x + 4) for x in np.arange(0, 20, 4)]
labels = ['a', 'b', 'c', 'd', 'e']
start = '2015-4-9'
dt_index = uniform(start, periods=4, freq=DayFrequency(1, self.sc), sc=self.sc)
rdd = self.sc.parallelize(zip(labels, vecs), 3)
tsrdd = TimeSeriesRDD(dt_index, rdd)
series_arr = tsrdd.to_pandas_series_rdd().collect()
pd_index = dt_index.to_pandas_index()
self.assertEquals(len(vecs), len(series_arr))
for i in xrange(len(vecs)):
self.assertEquals(series_arr[i][0], labels[i])
self.assertTrue(pd.Series(vecs[i], pd_index).equals(series_arr[i][1]))
|
apache-2.0
| 4,979,603,162,859,046,000 | -8,435,122,001,551,309,000 | 45.213675 | 100 | 0.591086 | false |
DMCsys/smartalkaudio
|
oss-survey/xmms2-0.8DrO_o/doc/tutorial/python/tut2.py
|
1
|
1951
|
#!/usr/bin/env python
# XMMS2 - X Music Multiplexer System
# Copyright (C) 2003-2006 XMMS2 Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# This file is a part of the XMMS2 client tutorial #2
# Here we will learn to retrieve results from a command
import xmmsclient
import os
import sys
"""
The first part of this program is
commented in tut1.py See that one for
instructions
"""
xmms = xmmsclient.XMMS("tutorial2")
try:
xmms.connect(os.getenv("XMMS_PATH"))
except IOError, detail:
print "Connection failed:", detail
sys.exit(1)
"""
Now we send a command that will return
a result. Let's find out which entry
is currently playing.
Note that this program has be run while
xmms2 is playing something, otherwise
XMMS.playback_current_id will return 0.
"""
result = xmms.playback_current_id()
"""
We are still doing sync operations, wait for the
answer and block.
"""
result.wait()
"""
Also this time we need to check for errors.
Errors can occur on all commands, but not signals
and broadcasts. We will talk about these later.
"""
if result.iserror():
print "playback current id returns error, %s" % result.get_error()
"""
Let's retrieve the value from the XMMSResult object.
You don't have to know what type of value is returned
in response to which command - simply call
XMMSResult.value()
In this case XMMS.playback_current_id will return a UINT
"""
id = result.value()
"""Print the value"""
print "Currently playing id is %d" % id
|
gpl-3.0
| 9,027,773,254,154,672,000 | 2,479,178,649,015,292,400 | 26.097222 | 69 | 0.736033 | false |
twilio/twilio-python
|
twilio/rest/preview/understand/assistant/task/sample.py
|
2
|
18621
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SampleList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, task_sid):
"""
Initialize the SampleList
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleList
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleList
"""
super(SampleList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples'.format(**self._solution)
def stream(self, language=values.unset, limit=None, page_size=None):
"""
Streams SampleInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode language: An ISO language-country string of the sample.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.understand.assistant.task.sample.SampleInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(language=language, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, language=values.unset, limit=None, page_size=None):
"""
Lists SampleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: An ISO language-country string of the sample.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.understand.assistant.task.sample.SampleInstance]
"""
return list(self.stream(language=language, limit=limit, page_size=page_size, ))
def page(self, language=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of SampleInstance records from the API.
Request is executed immediately
:param unicode language: An ISO language-country string of the sample.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
data = values.of({
'Language': language,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return SamplePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SampleInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SamplePage(self._version, response, self._solution)
def create(self, language, tagged_text, source_channel=values.unset):
"""
Create the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The created SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def get(self, sid):
"""
Constructs a SampleContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a SampleContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.SampleList>'
class SamplePage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the SamplePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:returns: twilio.rest.preview.understand.assistant.task.sample.SamplePage
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
super(SamplePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SampleInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.SamplePage>'
class SampleContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, task_sid, sid):
"""
Initialize the SampleContext
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
super(SampleContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, 'sid': sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the SampleInstance
:returns: The fetched SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The updated SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.SampleContext {}>'.format(context)
class SampleInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, assistant_sid, task_sid, sid=None):
"""
Initialize the SampleInstance
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
super(SampleInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'task_sid': payload.get('task_sid'),
'language': payload.get('language'),
'assistant_sid': payload.get('assistant_sid'),
'sid': payload.get('sid'),
'tagged_text': payload.get('tagged_text'),
'url': payload.get('url'),
'source_channel': payload.get('source_channel'),
}
# Context
self._context = None
self._solution = {
'assistant_sid': assistant_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SampleContext for this SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
if self._context is None:
self._context = SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique ID of the Account that created this Sample.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date that this resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date that this resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def task_sid(self):
"""
:returns: The unique ID of the Task associated with this Sample.
:rtype: unicode
"""
return self._properties['task_sid']
@property
def language(self):
"""
:returns: An ISO language-country string of the sample.
:rtype: unicode
"""
return self._properties['language']
@property
def assistant_sid(self):
"""
:returns: The unique ID of the Assistant.
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def sid(self):
"""
:returns: A 34 character string that uniquely identifies this resource.
:rtype: unicode
"""
return self._properties['sid']
@property
def tagged_text(self):
"""
:returns: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:rtype: unicode
"""
return self._properties['tagged_text']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def source_channel(self):
"""
:returns: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:rtype: unicode
"""
return self._properties['source_channel']
def fetch(self):
"""
Fetch the SampleInstance
:returns: The fetched SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return self._proxy.fetch()
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The updated SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return self._proxy.update(language=language, tagged_text=tagged_text, source_channel=source_channel, )
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.SampleInstance {}>'.format(context)
|
mit
| -8,059,345,424,897,606,000 | -7,461,123,202,439,127,000 | 36.694332 | 192 | 0.626873 | false |
gaddman/ansible
|
lib/ansible/modules/network/aci/aci_config_rollback.py
|
2
|
9612
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_config_rollback
short_description: Provides rollback and rollback preview functionality (config:ImportP)
description:
- Provides rollback and rollback preview functionality for Cisco ACI fabrics.
- Config Rollbacks are done using snapshots C(aci_snapshot) with the configImportP class.
seealso:
- module: aci_config_snapshot
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(config:ImportP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
compare_export_policy:
description:
- The export policy that the C(compare_snapshot) is associated to.
compare_snapshot:
description:
- The name of the snapshot to compare with C(snapshot).
description:
description:
- The description for the Import Policy.
aliases: [ descr ]
export_policy:
description:
- The export policy that the C(snapshot) is associated to.
required: yes
fail_on_decrypt:
description:
- Determines if the APIC should fail the rollback if unable to decrypt secured data.
- The APIC defaults to C(yes) when unset.
type: bool
import_mode:
description:
- Determines how the import should be handled by the APIC.
- The APIC defaults to C(atomic) when unset.
choices: [ atomic, best-effort ]
import_policy:
description:
- The name of the Import Policy to use for config rollback.
import_type:
description:
- Determines how the current and snapshot configuration should be compared for replacement.
- The APIC defaults to C(replace) when unset.
choices: [ merge, replace ]
snapshot:
description:
- The name of the snapshot to rollback to, or the base snapshot to use for comparison.
- The C(aci_snapshot) module can be used to query the list of available snapshots.
required: yes
state:
description:
- Use C(preview) for previewing the diff between two snapshots.
- Use C(rollback) for reverting the configuration to a previous snapshot.
choices: [ preview, rollback ]
default: rollback
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
---
- name: Create a Snapshot
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: present
delegate_to: localhost
- name: Query Existing Snapshots
aci_config_snapshot:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
state: query
delegate_to: localhost
- name: Compare Snapshot Files
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
compare_export_policy: config_backup
compare_snapshot: run-2017-08-27T23-43-56
state: preview
delegate_to: localhost
- name: Rollback Configuration
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
import_policy: rollback_config
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
state: rollback
delegate_to: localhost
- name: Rollback Configuration
aci_config_rollback:
host: apic
username: admin
password: SomeSecretPassword
import_policy: rollback_config
export_policy: config_backup
snapshot: run-2017-08-28T06-24-01
description: Rollback 8-27 changes
import_mode: atomic
import_type: replace
fail_on_decrypt: yes
state: rollback
delegate_to: localhost
'''
RETURN = r'''
preview:
description: A preview between two snapshots
returned: when state is preview
type: string
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
from ansible.module_utils.urls import fetch_url
# Optional, only used for rollback preview
try:
import lxml.etree
from xmljson import cobra
XML_TO_JSON = True
except ImportError:
XML_TO_JSON = False
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
compare_export_policy=dict(type='str'),
compare_snapshot=dict(type='str'),
description=dict(type='str', aliases=['descr']),
export_policy=dict(type='str'),
fail_on_decrypt=dict(type='bool'),
import_mode=dict(type='str', choices=['atomic', 'best-effort']),
import_policy=dict(type='str'),
import_type=dict(type='str', choices=['merge', 'replace']),
snapshot=dict(type='str', required=True),
state=dict(type='str', default='rollback', choices=['preview', 'rollback']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['state', 'preview', ['compare_export_policy', 'compare_snapshot']],
['state', 'rollback', ['import_policy']],
],
)
aci = ACIModule(module)
description = module.params['description']
export_policy = module.params['export_policy']
fail_on_decrypt = aci.boolean(module.params['fail_on_decrypt'])
import_mode = module.params['import_mode']
import_policy = module.params['import_policy']
import_type = module.params['import_type']
snapshot = module.params['snapshot']
state = module.params['state']
if state == 'rollback':
if snapshot.startswith('run-'):
snapshot = snapshot.replace('run-', '', 1)
if not snapshot.endswith('.tar.gz'):
snapshot += '.tar.gz'
filename = 'ce2_{0}-{1}'.format(export_policy, snapshot)
aci.construct_url(
root_class=dict(
aci_class='configImportP',
aci_rn='fabric/configimp-{0}'.format(import_policy),
module_object=import_policy,
target_filter={'name': import_policy},
),
)
aci.get_existing()
aci.payload(
aci_class='configImportP',
class_config=dict(
adminSt='triggered',
descr=description,
failOnDecryptErrors=fail_on_decrypt,
fileName=filename,
importMode=import_mode,
importType=import_type,
name=import_policy,
snapshot='yes',
),
)
aci.get_diff(aci_class='configImportP')
aci.post_config()
elif state == 'preview':
aci.url = '%(protocol)s://%(host)s/mqapi2/snapshots.diff.xml' % module.params
aci.filter_string = (
'?s1dn=uni/backupst/snapshots-[uni/fabric/configexp-%(export_policy)s]/snapshot-%(snapshot)s&'
's2dn=uni/backupst/snapshots-[uni/fabric/configexp-%(compare_export_policy)s]/snapshot-%(compare_snapshot)s'
) % module.params
# Generate rollback comparison
get_preview(aci)
aci.exit_json()
def get_preview(aci):
'''
This function is used to generate a preview between two snapshots and add the parsed results to the aci module return data.
'''
uri = aci.url + aci.filter_string
resp, info = fetch_url(aci.module, uri, headers=aci.headers, method='GET', timeout=aci.module.params['timeout'], use_proxy=aci.module.params['use_proxy'])
aci.method = 'GET'
aci.response = info['msg']
aci.status = info['status']
# Handle APIC response
if info['status'] == 200:
xml_to_json(aci, resp.read())
else:
aci.result['raw'] = resp.read()
aci.fail_json(msg="Request failed: %(code)s %(text)s (see 'raw' output)" % aci.error)
def xml_to_json(aci, response_data):
'''
This function is used to convert preview XML data into JSON.
'''
if XML_TO_JSON:
xml = lxml.etree.fromstring(to_bytes(response_data))
xmldata = cobra.data(xml)
aci.result['preview'] = xmldata
else:
aci.result['preview'] = response_data
if __name__ == "__main__":
main()
|
gpl-3.0
| -8,690,066,011,250,116,000 | 8,106,166,437,999,290,000 | 29.906752 | 158 | 0.660112 | false |
tempbottle/kbengine
|
kbe/res/scripts/common/Lib/idlelib/ScrolledList.py
|
76
|
4159
|
from tkinter import *
class ScrolledList:
default = "(None)"
def __init__(self, master, **options):
# Create top frame, with scrollbar and listbox
self.master = master
self.frame = frame = Frame(master)
self.frame.pack(fill="both", expand=1)
self.vbar = vbar = Scrollbar(frame, name="vbar")
self.vbar.pack(side="right", fill="y")
self.listbox = listbox = Listbox(frame, exportselection=0,
background="white")
if options:
listbox.configure(options)
listbox.pack(expand=1, fill="both")
# Tie listbox and scrollbar together
vbar["command"] = listbox.yview
listbox["yscrollcommand"] = vbar.set
# Bind events to the list box
listbox.bind("<ButtonRelease-1>", self.click_event)
listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
listbox.bind("<ButtonPress-3>", self.popup_event)
listbox.bind("<Key-Up>", self.up_event)
listbox.bind("<Key-Down>", self.down_event)
# Mark as empty
self.clear()
def close(self):
self.frame.destroy()
def clear(self):
self.listbox.delete(0, "end")
self.empty = 1
self.listbox.insert("end", self.default)
def append(self, item):
if self.empty:
self.listbox.delete(0, "end")
self.empty = 0
self.listbox.insert("end", str(item))
def get(self, index):
return self.listbox.get(index)
def click_event(self, event):
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
self.on_select(index)
return "break"
def double_click_event(self, event):
index = self.listbox.index("active")
self.select(index)
self.on_double(index)
return "break"
menu = None
def popup_event(self, event):
if not self.menu:
self.make_menu()
menu = self.menu
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
menu.tk_popup(event.x_root, event.y_root)
def make_menu(self):
menu = Menu(self.listbox, tearoff=0)
self.menu = menu
self.fill_menu()
def up_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index - 1
else:
index = self.listbox.size() - 1
if index < 0:
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def down_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index + 1
else:
index = 0
if index >= self.listbox.size():
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def select(self, index):
self.listbox.focus_set()
self.listbox.activate(index)
self.listbox.selection_clear(0, "end")
self.listbox.selection_set(index)
self.listbox.see(index)
# Methods to override for specific actions
def fill_menu(self):
pass
def on_select(self, index):
pass
def on_double(self, index):
pass
def _scrolled_list(parent):
root = Tk()
root.title("Test ScrolledList")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
class MyScrolledList(ScrolledList):
def fill_menu(self): self.menu.add_command(label="right click")
def on_select(self, index): print("select", self.get(index))
def on_double(self, index): print("double", self.get(index))
scrolled_list = MyScrolledList(root)
for i in range(30):
scrolled_list.append("Item %02d" % i)
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_scrolled_list)
|
lgpl-3.0
| 8,398,037,187,977,273,000 | 3,475,676,148,180,455,400 | 28.707143 | 77 | 0.573696 | false |
jjshoe/ansible-modules-core
|
network/eos/eos_command.py
|
5
|
5315
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: eos_command
version_added: "2.1"
author: "Peter sprygada (@privateip)"
short_description: Run arbitrary command on EOS device
description:
- Sends an aribtrary set of commands to and EOS node and returns the results
read from the device. The M(eos_command) modulule includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: eos
options:
commands:
description:
- The commands to send to the remote EOS device over the
configured provider. The resulting output from the command
is returned. If the I(waitfor) argument is provided, the
module is not returned until the condition is satisfied or
the number of retires as expired.
required: true
waitfor:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the waitfor
conditionals
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
- eos_command:
commands: "{{ lookup('file', 'commands.txt') }}"
- eos_command:
commands:
- show interface {{ item }}
with_items: interfaces
- eos_command:
commands:
- show version
waitfor:
- "result[0] contains 4.15.0F"
- eos_command:
commands:
- show version | json
- show interfaces | json
- show version
waitfor:
- "result[2] contains '4.15.0F'"
- "result[1].interfaces.Management1.interfaceAddress[0].primaryIp.maskLen eq 24"
- "result[0].modelName == 'vEOS'"
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
retured: failed
type: list
sample: ['...', '...']
"""
import time
import shlex
import re
import json
INDEX_RE = re.compile(r'(\[\d+\])')
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def main():
spec = dict(
commands=dict(type='list'),
waitfor=dict(type='list'),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = get_module(argument_spec=spec,
supports_check_mode=True)
commands = module.params['commands']
retries = module.params['retries']
interval = module.params['interval']
try:
queue = set()
for entry in (module.params['waitfor'] or list()):
queue.add(Conditional(entry))
except AttributeError, exc:
module.fail_json(msg=exc.message)
result = dict(changed=False)
while retries > 0:
response = module.execute(commands)
result['stdout'] = response
for index, cmd in enumerate(commands):
if cmd.endswith('json'):
response[index] = json.loads(response[index])
for item in list(queue):
if item(response):
queue.remove(item)
if not queue:
break
time.sleep(interval)
retries -= 1
else:
failed_conditions = [item.raw for item in queue]
module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions)
result['stdout_lines'] = list(to_lines(result['stdout']))
return module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.eos import *
if __name__ == '__main__':
main()
|
gpl-3.0
| -71,169,039,613,272,060 | 6,785,745,818,443,824,000 | 27.88587 | 94 | 0.657949 | false |
jimi-c/ansible
|
lib/ansible/modules/monitoring/monit.py
|
102
|
7631
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Darryl Stoflet <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- Manage the state of a program monitored via I(Monit)
version_added: "1.2"
options:
name:
description:
- The name of the I(monit) program/process to manage
required: true
state:
description:
- The state of service
required: true
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
timeout:
description:
- If there are pending actions for the service monitored by monit, then Ansible will check
for up to this many seconds to verify the requested action has been performed.
Ansible will sleep for five seconds between each check.
default: 300
version_added: "2.1"
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
- monit:
name: httpd
state: started
'''
import time
import re
from ansible.module_utils.basic import AnsibleModule
def main():
arg_spec = dict(
name=dict(required=True),
timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
def monit_version():
rc, out, err = module.run_command('%s -V' % MONIT, check_rc=True)
version_line = out.split('\n')[0]
version = re.search(r"[0-9]+\.[0-9]+", version_line).group().split('.')
# Use only major and minor even if there are more these should be enough
return int(version[0]), int(version[1])
def is_version_higher_than_5_18():
return (MONIT_MAJOR_VERSION, MONIT_MINOR_VERSION) > (5, 18)
def parse(parts):
if is_version_higher_than_5_18():
return parse_current(parts)
else:
return parse_older_versions(parts)
def parse_older_versions(parts):
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
return ' '.join(parts[2:]).lower()
else:
return ''
def parse_current(parts):
if len(parts) > 2 and parts[2].lower() == 'process' and parts[0] == name:
return ''.join(parts[1]).lower()
else:
return ''
def get_status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s %s' % (MONIT, SUMMARY_COMMAND), check_rc=True)
for line in out.split('\n'):
# Sample output lines:
# Process 'name' Running
# Process 'name' Running - restart pending
parts = parse(line.split())
if parts != '':
return parts
return ''
def run_command(command):
"""Runs a monit command, and returns the new status."""
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return get_status()
def wait_for_monit_to_stop_pending():
"""Fails this run if there is no status or it's pending/initializing for timeout"""
timeout_time = time.time() + timeout
sleep_time = 5
running_status = get_status()
while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
if time.time() >= timeout_time:
module.fail_json(
msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
running_status
),
state=state
)
time.sleep(sleep_time)
running_status = get_status()
MONIT_MAJOR_VERSION, MONIT_MINOR_VERSION = monit_version()
SUMMARY_COMMAND = ('summary', 'summary -B')[is_version_higher_than_5_18()]
if state == 'reloaded':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command('%s reload' % MONIT)
if rc != 0:
module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
present = get_status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
wait_for_monit_to_stop_pending()
running = 'running' in get_status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('stop')
if status in ['not monitored'] or 'stop pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not stopped' % name, status=status)
if running and state == 'unmonitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('unmonitor')
if status in ['not monitored'] or 'unmonitor pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not unmonitored' % name, status=status)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('restart')
if status in ['initializing', 'running'] or 'restart pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not restarted' % name, status=status)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('start')
if status in ['initializing', 'running'] or 'start pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not started' % name, status=status)
elif not running and state == 'monitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('monitor')
if status not in ['not monitored']:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not monitored' % name, status=status)
module.exit_json(changed=False, name=name, state=state)
if __name__ == '__main__':
main()
|
gpl-3.0
| 5,088,576,262,938,772,000 | -7,844,931,924,997,186,000 | 34.826291 | 129 | 0.602018 | false |
jeanlinux/calibre
|
src/calibre/gui2/tag_browser/model.py
|
11
|
60438
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from future_builtins import map
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import traceback, cPickle, copy, os
from PyQt5.Qt import (QAbstractItemModel, QIcon, QFont, Qt,
QMimeData, QModelIndex, pyqtSignal, QObject)
from calibre.constants import config_dir
from calibre.gui2 import gprefs, config, error_dialog, file_icon_provider
from calibre.db.categories import Tag
from calibre.utils.config import tweaks
from calibre.utils.icu import sort_key, lower, strcmp, collation_order
from calibre.library.field_metadata import TagsIcons, category_icon_map
from calibre.gui2.dialogs.confirm_delete import confirm
from calibre.utils.formatter import EvalFormatter
TAG_SEARCH_STATES = {'clear': 0, 'mark_plus': 1, 'mark_plusplus': 2,
'mark_minus': 3, 'mark_minusminus': 4}
DRAG_IMAGE_ROLE = Qt.UserRole + 1000
_bf = None
def bf():
global _bf
if _bf is None:
_bf = QFont()
_bf.setBold(True)
_bf = (_bf)
return _bf
class TagTreeItem(object): # {{{
CATEGORY = 0
TAG = 1
ROOT = 2
def __init__(self, data=None, category_icon=None, icon_map=None,
parent=None, tooltip=None, category_key=None, temporary=False):
self.parent = parent
self.children = []
self.blank = QIcon()
self.id_set = set()
self.is_gst = False
self.boxed = False
self.icon_state_map = list(icon_map)
if self.parent is not None:
self.parent.append(self)
if data is None:
self.type = self.ROOT
else:
self.type = self.TAG if category_icon is None else self.CATEGORY
if self.type == self.CATEGORY:
self.name, self.icon = data, category_icon
self.py_name = data
self.category_key = category_key
self.temporary = temporary
self.tag = Tag(data, category=category_key,
is_editable=category_key not in
['news', 'search', 'identifiers', 'languages'],
is_searchable=category_key not in ['search'])
elif self.type == self.TAG:
self.icon_state_map[0] = data.icon
self.tag = data
self.tooltip = (tooltip + ' ') if tooltip else ''
def break_cycles(self):
del self.parent
del self.children
def __str__(self):
if self.type == self.ROOT:
return 'ROOT'
if self.type == self.CATEGORY:
return 'CATEGORY:'+str(
self.name)+':%d'%len(getattr(self,
'children', []))
return 'TAG: %s'%self.tag.name
def row(self):
if self.parent is not None:
return self.parent.children.index(self)
return 0
def append(self, child):
child.parent = self
self.children.append(child)
def data(self, role):
if role == Qt.UserRole:
return self
if self.type == self.TAG:
return self.tag_data(role)
if self.type == self.CATEGORY:
return self.category_data(role)
return None
def category_data(self, role):
if role == Qt.DisplayRole:
return (self.py_name + ' [%d]'%len(self.child_tags()))
if role == Qt.EditRole:
return (self.py_name)
if role == Qt.DecorationRole:
if self.tag.state:
return self.icon_state_map[self.tag.state]
return self.icon
if role == Qt.FontRole:
return bf()
if role == Qt.ToolTipRole and self.tooltip is not None:
return (self.tooltip)
if role == DRAG_IMAGE_ROLE:
return self.icon
return None
def tag_data(self, role):
tag = self.tag
if tag.use_sort_as_name:
name = tag.sort
tt_author = True
else:
p = self
while p.parent.type != self.ROOT:
p = p.parent
if not tag.is_hierarchical:
name = tag.original_name
else:
name = tag.name
tt_author = False
if role == Qt.DisplayRole:
count = len(self.id_set)
count = count if count > 0 else tag.count
if count == 0:
return ('%s'%(name))
else:
return ('[%d] %s'%(count, name))
if role == Qt.EditRole:
return (tag.original_name)
if role == Qt.DecorationRole:
return self.icon_state_map[tag.state]
if role == Qt.ToolTipRole:
if tt_author:
if tag.tooltip is not None:
return ('(%s) %s'%(tag.name, tag.tooltip))
else:
return (tag.name)
if tag.tooltip:
return (self.tooltip + tag.tooltip)
else:
return (self.tooltip)
if role == DRAG_IMAGE_ROLE:
return self.icon_state_map[0]
return None
def toggle(self, set_to=None):
'''
set_to: None => advance the state, otherwise a value from TAG_SEARCH_STATES
'''
if set_to is None:
while True:
self.tag.state = (self.tag.state + 1)%5
if self.tag.state == TAG_SEARCH_STATES['mark_plus'] or \
self.tag.state == TAG_SEARCH_STATES['mark_minus']:
if self.tag.is_searchable:
break
elif self.tag.state == TAG_SEARCH_STATES['mark_plusplus'] or\
self.tag.state == TAG_SEARCH_STATES['mark_minusminus']:
if self.tag.is_searchable and len(self.children) and \
self.tag.is_hierarchical == '5state':
break
else:
break
else:
self.tag.state = set_to
def all_children(self):
res = []
def recurse(nodes, res):
for t in nodes:
res.append(t)
recurse(t.children, res)
recurse(self.children, res)
return res
def child_tags(self):
res = []
def recurse(nodes, res, depth):
if depth > 100:
return
for t in nodes:
if t.type != TagTreeItem.CATEGORY:
res.append(t)
recurse(t.children, res, depth+1)
recurse(self.children, res, 1)
return res
# }}}
class TagsModel(QAbstractItemModel): # {{{
search_item_renamed = pyqtSignal()
tag_item_renamed = pyqtSignal()
refresh_required = pyqtSignal()
restriction_error = pyqtSignal()
drag_drop_finished = pyqtSignal(object)
user_categories_edited = pyqtSignal(object, object)
def __init__(self, parent):
QAbstractItemModel.__init__(self, parent)
self.node_map = {}
self.category_nodes = []
iconmap = {}
for key in category_icon_map:
iconmap[key] = QIcon(I(category_icon_map[key]))
self.category_icon_map = TagsIcons(iconmap)
self.category_custom_icons = dict()
for k, v in gprefs['tags_browser_category_icons'].iteritems():
icon = QIcon(os.path.join(config_dir, 'tb_icons', v))
if len(icon.availableSizes()) > 0:
self.category_custom_icons[k] = icon
self.categories_with_ratings = ['authors', 'series', 'publisher', 'tags']
self.icon_state_map = [None, QIcon(I('plus.png')), QIcon(I('plusplus.png')),
QIcon(I('minus.png')), QIcon(I('minusminus.png'))]
self.hidden_categories = set()
self.search_restriction = None
self.filter_categories_by = None
self.collapse_model = 'disable'
self.row_map = []
self.root_item = self.create_node(icon_map=self.icon_state_map)
self.db = None
self._build_in_progress = False
self.reread_collapse_model({}, rebuild=False)
@property
def gui_parent(self):
return QObject.parent(self)
def set_custom_category_icon(self, key, path):
d = gprefs['tags_browser_category_icons']
if path:
d[key] = path
self.category_custom_icons[key] = QIcon(os.path.join(config_dir,
'tb_icons', path))
else:
if key in d:
path = os.path.join(config_dir, 'tb_icons', d[key])
try:
os.remove(path)
except:
pass
del d[key]
del self.category_custom_icons[key]
gprefs['tags_browser_category_icons'] = d
def reread_collapse_model(self, state_map, rebuild=True):
if gprefs['tags_browser_collapse_at'] == 0:
self.collapse_model = 'disable'
else:
self.collapse_model = gprefs['tags_browser_partition_method']
if rebuild:
self.rebuild_node_tree(state_map)
def set_database(self, db):
self.beginResetModel()
hidden_cats = db.new_api.pref('tag_browser_hidden_categories', None)
# migrate from config to db prefs
if hidden_cats is None:
hidden_cats = config['tag_browser_hidden_categories']
self.hidden_categories = set()
# strip out any non-existence field keys
for cat in hidden_cats:
if cat in db.field_metadata:
self.hidden_categories.add(cat)
db.new_api.set_pref('tag_browser_hidden_categories', list(self.hidden_categories))
self.db = db
self._run_rebuild()
self.endResetModel()
def rebuild_node_tree(self, state_map={}):
if self._build_in_progress:
print ('Tag Browser build already in progress')
traceback.print_stack()
return
# traceback.print_stack()
# print ()
self._build_in_progress = True
self.beginResetModel()
self._run_rebuild(state_map=state_map)
self.endResetModel()
self._build_in_progress = False
def _run_rebuild(self, state_map={}):
for node in self.node_map.itervalues():
node.break_cycles()
del node # Clear reference to node in the current frame
self.node_map.clear()
self.category_nodes = []
self.root_item = self.create_node(icon_map=self.icon_state_map)
self._rebuild_node_tree(state_map=state_map)
def _rebuild_node_tree(self, state_map):
# Note that _get_category_nodes can indirectly change the
# user_categories dict.
data = self._get_category_nodes(config['sort_tags_by'])
gst = self.db.prefs.get('grouped_search_terms', {})
last_category_node = None
category_node_map = {}
self.category_node_tree = {}
for i, key in enumerate(self.row_map):
if self.hidden_categories:
if key in self.hidden_categories:
continue
found = False
for cat in self.hidden_categories:
if cat.startswith('@') and key.startswith(cat + '.'):
found = True
if found:
continue
is_gst = False
if key.startswith('@') and key[1:] in gst:
tt = _(u'The grouped search term name is "{0}"').format(key)
is_gst = True
elif key == 'news':
tt = ''
else:
cust_desc = ''
fm = self.db.field_metadata[key]
if fm['is_custom']:
cust_desc = fm['display'].get('description', '')
if cust_desc:
cust_desc = '\n' + _('Description:') + ' ' + cust_desc
tt = _(u'The lookup/search name is "{0}"{1}').format(key, cust_desc)
if self.category_custom_icons.get(key, None) is None:
self.category_custom_icons[key] = (
self.category_icon_map['gst'] if is_gst else
self.category_icon_map.get(key, self.category_icon_map['custom:']))
if key.startswith('@'):
path_parts = [p for p in key.split('.')]
path = ''
last_category_node = self.root_item
tree_root = self.category_node_tree
for i,p in enumerate(path_parts):
path += p
if path not in category_node_map:
node = self.create_node(parent=last_category_node,
data=p[1:] if i == 0 else p,
category_icon=self.category_custom_icons[key],
tooltip=tt if path == key else path,
category_key=path,
icon_map=self.icon_state_map)
last_category_node = node
category_node_map[path] = node
self.category_nodes.append(node)
node.can_be_edited = (not is_gst) and (i == (len(path_parts)-1))
node.is_gst = is_gst
if not is_gst:
node.tag.is_hierarchical = '5state'
tree_root[p] = {}
tree_root = tree_root[p]
else:
last_category_node = category_node_map[path]
tree_root = tree_root[p]
path += '.'
else:
node = self.create_node(parent=self.root_item,
data=self.categories[key],
category_icon=self.category_custom_icons[key],
tooltip=tt, category_key=key,
icon_map=self.icon_state_map)
node.is_gst = False
category_node_map[key] = node
last_category_node = node
self.category_nodes.append(node)
self._create_node_tree(data, state_map)
def _create_node_tree(self, data, state_map):
sort_by = config['sort_tags_by']
eval_formatter = EvalFormatter()
if data is None:
print ('_create_node_tree: no data!')
traceback.print_stack()
return
collapse = gprefs['tags_browser_collapse_at']
collapse_model = self.collapse_model
if collapse == 0:
collapse_model = 'disable'
elif collapse_model != 'disable':
if sort_by == 'name':
collapse_template = tweaks['categories_collapsed_name_template']
elif sort_by == 'rating':
collapse_model = 'partition'
collapse_template = tweaks['categories_collapsed_rating_template']
else:
collapse_model = 'partition'
collapse_template = tweaks['categories_collapsed_popularity_template']
def get_name_components(name):
components = [t.strip() for t in name.split('.') if t.strip()]
if len(components) == 0 or '.'.join(components) != name:
components = [name]
return components
def process_one_node(category, collapse_model, state_map): # {{{
collapse_letter = None
category_node = category
key = category_node.category_key
is_gst = category_node.is_gst
if key not in data:
return
if key in gprefs['tag_browser_dont_collapse']:
collapse_model = 'disable'
cat_len = len(data[key])
if cat_len <= 0:
return
category_child_map = {}
fm = self.db.field_metadata[key]
clear_rating = True if key not in self.categories_with_ratings and \
not fm['is_custom'] and \
not fm['kind'] == 'user' \
else False
in_uc = fm['kind'] == 'user' and not is_gst
tt = key if in_uc else None
if collapse_model == 'first letter':
# Build a list of 'equal' first letters by noticing changes
# in ICU's 'ordinal' for the first letter. In this case, the
# first letter can actually be more than one letter long.
cl_list = [None] * len(data[key])
last_ordnum = 0
last_c = ' '
for idx,tag in enumerate(data[key]):
if not tag.sort:
c = ' '
else:
c = icu_upper(tag.sort)
ordnum, ordlen = collation_order(c)
if last_ordnum != ordnum:
last_c = c[0:ordlen]
last_ordnum = ordnum
cl_list[idx] = last_c
top_level_component = 'z' + data[key][0].original_name
last_idx = -collapse
category_is_hierarchical = not (
key in ['authors', 'publisher', 'news', 'formats', 'rating'] or
key not in self.db.prefs.get('categories_using_hierarchy', []) or
config['sort_tags_by'] != 'name')
is_formats = key == 'formats'
if is_formats:
fip = file_icon_provider().icon_from_ext
for idx,tag in enumerate(data[key]):
components = None
if clear_rating:
tag.avg_rating = None
tag.state = state_map.get((tag.name, tag.category), 0)
if collapse_model != 'disable' and cat_len > collapse:
if collapse_model == 'partition':
# Only partition at the top level. This means that we must
# not do a break until the outermost component changes.
if idx >= last_idx + collapse and \
not tag.original_name.startswith(top_level_component+'.'):
if cat_len > idx + collapse:
last = idx + collapse - 1
else:
last = cat_len - 1
if category_is_hierarchical:
ct = copy.copy(data[key][last])
components = get_name_components(ct.original_name)
ct.sort = ct.name = components[0]
d = {'last': ct}
# Do the first node after the last node so that
# the components array contains the right values
# to be used later
ct2 = copy.copy(tag)
components = get_name_components(ct2.original_name)
ct2.sort = ct2.name = components[0]
d['first'] = ct2
else:
d = {'first': tag}
d['last'] = data[key][last]
name = eval_formatter.safe_format(collapse_template,
d, '##TAG_VIEW##', None)
if name.startswith('##TAG_VIEW##'):
# Formatter threw an exception. Don't create subnode
node_parent = sub_cat = category
else:
sub_cat = self.create_node(parent=category, data=name,
tooltip=None, temporary=True,
category_icon=category_node.icon,
category_key=category_node.category_key,
icon_map=self.icon_state_map)
sub_cat.tag.is_searchable = False
sub_cat.is_gst = is_gst
node_parent = sub_cat
last_idx = idx # remember where we last partitioned
else:
node_parent = sub_cat
else: # by 'first letter'
cl = cl_list[idx]
if cl != collapse_letter:
collapse_letter = cl
sub_cat = self.create_node(parent=category,
data=collapse_letter,
category_icon=category_node.icon,
tooltip=None, temporary=True,
category_key=category_node.category_key,
icon_map=self.icon_state_map)
sub_cat.is_gst = is_gst
node_parent = sub_cat
else:
node_parent = category
# category display order is important here. The following works
# only if all the non-user categories are displayed before the
# user categories
if category_is_hierarchical or tag.is_hierarchical:
components = get_name_components(tag.original_name)
else:
components = [tag.original_name]
if (not tag.is_hierarchical) and (in_uc or
(fm['is_custom'] and fm['display'].get('is_names', False)) or
not category_is_hierarchical or len(components) == 1):
if is_formats:
try:
tag.icon = fip(tag.name.replace('ORIGINAL_', ''))
except Exception:
tag.icon = self.category_custom_icons[key]
else:
tag.icon = self.category_custom_icons[key]
n = self.create_node(parent=node_parent, data=tag, tooltip=tt,
icon_map=self.icon_state_map)
if tag.id_set is not None:
n.id_set |= tag.id_set
category_child_map[tag.name, tag.category] = n
else:
for i,comp in enumerate(components):
if i == 0:
child_map = category_child_map
top_level_component = comp
else:
child_map = dict([((t.tag.name, t.tag.category), t)
for t in node_parent.children
if t.type != TagTreeItem.CATEGORY])
if (comp,tag.category) in child_map:
node_parent = child_map[(comp,tag.category)]
node_parent.tag.is_hierarchical = \
'5state' if tag.category != 'search' else '3state'
else:
if i < len(components)-1:
t = copy.copy(tag)
t.original_name = '.'.join(components[:i+1])
t.count = 0
if key != 'search':
# This 'manufactured' intermediate node can
# be searched, but cannot be edited.
t.is_editable = False
else:
t.is_searchable = t.is_editable = False
else:
t = tag
if not in_uc:
t.original_name = t.name
t.is_hierarchical = \
'5state' if t.category != 'search' else '3state'
t.name = comp
t.icon = self.category_custom_icons[key]
node_parent = self.create_node(parent=node_parent, data=t,
tooltip=tt, icon_map=self.icon_state_map)
child_map[(comp,tag.category)] = node_parent
# This id_set must not be None
node_parent.id_set |= tag.id_set
return
# }}}
for category in self.category_nodes:
process_one_node(category, collapse_model,
state_map.get(category.category_key, {}))
def get_category_editor_data(self, category):
for cat in self.root_item.children:
if cat.category_key == category:
return [(t.tag.id, t.tag.original_name, t.tag.count)
for t in cat.child_tags() if t.tag.count > 0]
def is_in_user_category(self, index):
if not index.isValid():
return False
p = self.get_node(index)
while p.type != TagTreeItem.CATEGORY:
p = p.parent
return p.tag.category.startswith('@')
# Drag'n Drop {{{
def mimeTypes(self):
return ["application/calibre+from_library",
'application/calibre+from_tag_browser']
def mimeData(self, indexes):
data = []
for idx in indexes:
if idx.isValid():
# get some useful serializable data
node = self.get_node(idx)
path = self.path_for_index(idx)
if node.type == TagTreeItem.CATEGORY:
d = (node.type, node.py_name, node.category_key)
else:
t = node.tag
p = node
while p.type != TagTreeItem.CATEGORY:
p = p.parent
d = (node.type, p.category_key, p.is_gst, t.original_name,
t.category, path)
data.append(d)
else:
data.append(None)
raw = bytearray(cPickle.dumps(data, -1))
ans = QMimeData()
ans.setData('application/calibre+from_tag_browser', raw)
return ans
def dropMimeData(self, md, action, row, column, parent):
fmts = set([unicode(x) for x in md.formats()])
if not fmts.intersection(set(self.mimeTypes())):
return False
if "application/calibre+from_library" in fmts:
if action != Qt.CopyAction:
return False
return self.do_drop_from_library(md, action, row, column, parent)
elif 'application/calibre+from_tag_browser' in fmts:
return self.do_drop_from_tag_browser(md, action, row, column, parent)
def do_drop_from_tag_browser(self, md, action, row, column, parent):
if not parent.isValid():
return False
dest = self.get_node(parent)
if dest.type != TagTreeItem.CATEGORY:
return False
if not md.hasFormat('application/calibre+from_tag_browser'):
return False
data = str(md.data('application/calibre+from_tag_browser'))
src = cPickle.loads(data)
for s in src:
if s[0] != TagTreeItem.TAG:
return False
return self.move_or_copy_item_to_user_category(src, dest, action)
def move_or_copy_item_to_user_category(self, src, dest, action):
'''
src is a list of tuples representing items to copy. The tuple is
(type, containing category key, category key is global search term,
full name, category key, path to node)
The type must be TagTreeItem.TAG
dest is the TagTreeItem node to receive the items
action is Qt.CopyAction or Qt.MoveAction
'''
def process_source_node(user_cats, src_parent, src_parent_is_gst,
is_uc, dest_key, idx):
'''
Copy/move an item and all its children to the destination
'''
copied = False
src_name = idx.tag.original_name
src_cat = idx.tag.category
# delete the item if the source is a user category and action is move
if is_uc and not src_parent_is_gst and src_parent in user_cats and \
action == Qt.MoveAction:
new_cat = []
for tup in user_cats[src_parent]:
if src_name == tup[0] and src_cat == tup[1]:
continue
new_cat.append(list(tup))
user_cats[src_parent] = new_cat
else:
copied = True
# Now add the item to the destination user category
add_it = True
if not is_uc and src_cat == 'news':
src_cat = 'tags'
for tup in user_cats[dest_key]:
if src_name == tup[0] and src_cat == tup[1]:
add_it = False
if add_it:
user_cats[dest_key].append([src_name, src_cat, 0])
for c in idx.children:
copied = process_source_node(user_cats, src_parent, src_parent_is_gst,
is_uc, dest_key, c)
return copied
user_cats = self.db.prefs.get('user_categories', {})
path = None
for s in src:
src_parent, src_parent_is_gst = s[1:3]
path = s[5]
if src_parent.startswith('@'):
is_uc = True
src_parent = src_parent[1:]
else:
is_uc = False
dest_key = dest.category_key[1:]
if dest_key not in user_cats:
continue
idx = self.index_for_path(path)
if idx.isValid():
process_source_node(user_cats, src_parent, src_parent_is_gst,
is_uc, dest_key,
self.get_node(idx))
self.db.new_api.set_pref('user_categories', user_cats)
self.refresh_required.emit()
return True
def do_drop_from_library(self, md, action, row, column, parent):
idx = parent
if idx.isValid():
node = self.data(idx, Qt.UserRole)
if node.type == TagTreeItem.TAG:
fm = self.db.metadata_for_field(node.tag.category)
if node.tag.category in \
('tags', 'series', 'authors', 'rating', 'publisher', 'languages') or \
(fm['is_custom'] and (
fm['datatype'] in ['text', 'rating', 'series',
'enumeration'] or (
fm['datatype'] == 'composite' and
fm['display'].get('make_category', False)))):
mime = 'application/calibre+from_library'
ids = list(map(int, str(md.data(mime)).split()))
self.handle_drop(node, ids)
return True
elif node.type == TagTreeItem.CATEGORY:
fm_dest = self.db.metadata_for_field(node.category_key)
if fm_dest['kind'] == 'user':
fm_src = self.db.metadata_for_field(md.column_name)
if md.column_name in ['authors', 'publisher', 'series'] or \
(fm_src['is_custom'] and (
(fm_src['datatype'] in ['series', 'text', 'enumeration'] and
not fm_src['is_multiple']))or
(fm_src['datatype'] == 'composite' and
fm_src['display'].get('make_category', False))):
mime = 'application/calibre+from_library'
ids = list(map(int, str(md.data(mime)).split()))
self.handle_user_category_drop(node, ids, md.column_name)
return True
return False
def handle_user_category_drop(self, on_node, ids, column):
categories = self.db.prefs.get('user_categories', {})
cat_contents = categories.get(on_node.category_key[1:], None)
if cat_contents is None:
return
cat_contents = set([(v, c) for v,c,ign in cat_contents])
fm_src = self.db.metadata_for_field(column)
label = fm_src['label']
for id in ids:
if not fm_src['is_custom']:
if label == 'authors':
value = self.db.authors(id, index_is_id=True)
value = [v.replace('|', ',') for v in value.split(',')]
elif label == 'publisher':
value = self.db.publisher(id, index_is_id=True)
elif label == 'series':
value = self.db.series(id, index_is_id=True)
else:
if fm_src['datatype'] != 'composite':
value = self.db.get_custom(id, label=label, index_is_id=True)
else:
value = self.db.get_property(id, loc=fm_src['rec_index'],
index_is_id=True)
if value:
if not isinstance(value, list):
value = [value]
cat_contents |= set([(v, column) for v in value])
categories[on_node.category_key[1:]] = [[v, c, 0] for v,c in cat_contents]
self.db.new_api.set_pref('user_categories', categories)
self.refresh_required.emit()
def handle_drop(self, on_node, ids):
# print 'Dropped ids:', ids, on_node.tag
key = on_node.tag.category
if (key == 'authors' and len(ids) >= 5):
if not confirm('<p>'+_('Changing the authors for several books can '
'take a while. Are you sure?') +
'</p>', 'tag_browser_drop_authors', self.gui_parent):
return
elif len(ids) > 15:
if not confirm('<p>'+_('Changing the metadata for that many books '
'can take a while. Are you sure?') +
'</p>', 'tag_browser_many_changes', self.gui_parent):
return
fm = self.db.metadata_for_field(key)
is_multiple = fm['is_multiple']
val = on_node.tag.original_name
for id in ids:
mi = self.db.get_metadata(id, index_is_id=True)
# Prepare to ignore the author, unless it is changed. Title is
# always ignored -- see the call to set_metadata
set_authors = False
# Author_sort cannot change explicitly. Changing the author might
# change it.
mi.author_sort = None # Never will change by itself.
if key == 'authors':
mi.authors = [val]
set_authors=True
elif fm['datatype'] == 'rating':
mi.set(key, len(val) * 2)
elif fm['is_custom'] and fm['datatype'] == 'series':
mi.set(key, val, extra=1.0)
elif is_multiple:
new_val = mi.get(key, [])
if val in new_val:
# Fortunately, only one field can change, so the continue
# won't break anything
continue
new_val.append(val)
mi.set(key, new_val)
else:
mi.set(key, val)
self.db.set_metadata(id, mi, set_title=False,
set_authors=set_authors, commit=False)
self.db.commit()
self.drag_drop_finished.emit(ids)
# }}}
def get_in_vl(self):
return self.db.data.get_base_restriction() or self.db.data.get_search_restriction()
def get_book_ids_to_use(self):
if self.db.data.get_base_restriction() or self.db.data.get_search_restriction():
return self.db.search('', return_matches=True, sort_results=False)
return None
def _get_category_nodes(self, sort):
'''
Called by __init__. Do not directly call this method.
'''
self.row_map = []
self.categories = {}
# Get the categories
try:
data = self.db.new_api.get_categories(sort=sort,
icon_map=self.category_icon_map,
book_ids=self.get_book_ids_to_use(),
first_letter_sort=self.collapse_model == 'first letter')
except:
import traceback
traceback.print_exc()
data = self.db.new_api.get_categories(sort=sort, icon_map=self.category_icon_map,
first_letter_sort=self.collapse_model == 'first letter')
self.restriction_error.emit()
# Reconstruct the user categories, putting them into metadata
self.db.field_metadata.remove_dynamic_categories()
tb_cats = self.db.field_metadata
for user_cat in sorted(self.db.prefs.get('user_categories', {}).keys(),
key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
while True:
try:
tb_cats.add_user_category(label=cat_name, name=user_cat)
dot = cat_name.rfind('.')
if dot < 0:
break
cat_name = cat_name[:dot]
except ValueError:
break
for cat in sorted(self.db.prefs.get('grouped_search_terms', {}).keys(),
key=sort_key):
if (u'@' + cat) in data:
try:
tb_cats.add_user_category(label=u'@' + cat, name=cat)
except ValueError:
traceback.print_exc()
self.db.new_api.refresh_search_locations()
if len(self.db.saved_search_names()):
tb_cats.add_search_category(label='search', name=_('Searches'))
if self.filter_categories_by:
for category in data.keys():
data[category] = [t for t in data[category]
if lower(t.name).find(self.filter_categories_by) >= 0]
tb_categories = self.db.field_metadata
order = tweaks['tag_browser_category_order']
defvalue = order.get('*', 100)
tb_keys = sorted(tb_categories.keys(), key=lambda x: order.get(x, defvalue))
for category in tb_keys:
if category in data: # The search category can come and go
self.row_map.append(category)
self.categories[category] = tb_categories[category]['name']
return data
def set_categories_filter(self, txt):
if txt:
self.filter_categories_by = icu_lower(txt)
else:
self.filter_categories_by = None
def get_categories_filter(self):
return self.filter_categories_by
def refresh(self, data=None):
'''
Here to trap usages of refresh in the old architecture. Can eventually
be removed.
'''
print ('TagsModel: refresh called!')
traceback.print_stack()
return False
def create_node(self, *args, **kwargs):
node = TagTreeItem(*args, **kwargs)
self.node_map[id(node)] = node
return node
def get_node(self, idx):
ans = self.node_map.get(idx.internalId(), self.root_item)
return ans
def createIndex(self, row, column, internal_pointer=None):
idx = QAbstractItemModel.createIndex(self, row, column,
id(internal_pointer))
return idx
def index_for_category(self, name):
for row, category in enumerate(self.category_nodes):
if category.category_key == name:
return self.index(row, 0, QModelIndex())
def columnCount(self, parent):
return 1
def data(self, index, role):
if not index.isValid():
return None
item = self.get_node(index)
return item.data(role)
def setData(self, index, value, role=Qt.EditRole):
if not index.isValid():
return False
# set up to reposition at the same item. We can do this except if
# working with the last item and that item is deleted, in which case
# we position at the parent label
val = unicode(value or '').strip()
if not val:
error_dialog(self.gui_parent, _('Item is blank'),
_('An item cannot be set to nothing. Delete it instead.')).exec_()
return False
item = self.get_node(index)
if item.type == TagTreeItem.CATEGORY and item.category_key.startswith('@'):
if val.find('.') >= 0:
error_dialog(self.gui_parent, _('Rename user category'),
_('You cannot use periods in the name when '
'renaming user categories'), show=True)
return False
user_cats = self.db.prefs.get('user_categories', {})
user_cat_keys_lower = [icu_lower(k) for k in user_cats]
ckey = item.category_key[1:]
ckey_lower = icu_lower(ckey)
dotpos = ckey.rfind('.')
if dotpos < 0:
nkey = val
else:
nkey = ckey[:dotpos+1] + val
nkey_lower = icu_lower(nkey)
if ckey == nkey:
return True
for c in sorted(user_cats.keys(), key=sort_key):
if icu_lower(c).startswith(ckey_lower):
if len(c) == len(ckey):
if strcmp(ckey, nkey) != 0 and \
nkey_lower in user_cat_keys_lower:
error_dialog(self.gui_parent, _('Rename user category'),
_('The name %s is already used')%nkey, show=True)
return False
user_cats[nkey] = user_cats[ckey]
del user_cats[ckey]
elif c[len(ckey)] == '.':
rest = c[len(ckey):]
if strcmp(ckey, nkey) != 0 and \
icu_lower(nkey + rest) in user_cat_keys_lower:
error_dialog(self.gui_parent, _('Rename user category'),
_('The name %s is already used')%(nkey+rest), show=True)
return False
user_cats[nkey + rest] = user_cats[ckey + rest]
del user_cats[ckey + rest]
self.user_categories_edited.emit(user_cats, nkey) # Does a refresh
return True
key = item.tag.category
name = item.tag.original_name
# make certain we know about the item's category
if key not in self.db.field_metadata:
return False
if key == 'authors':
if val.find('&') >= 0:
error_dialog(self.gui_parent, _('Invalid author name'),
_('Author names cannot contain & characters.')).exec_()
return False
if key == 'search':
if val in self.db.saved_search_names():
error_dialog(self.gui_parent, _('Duplicate search name'),
_('The saved search name %s is already used.')%val).exec_()
return False
self.db.saved_search_rename(unicode(item.data(role) or ''), val)
item.tag.name = val
self.search_item_renamed.emit() # Does a refresh
else:
restrict_to_book_ids=self.get_book_ids_to_use() if item.use_vl else None
self.db.new_api.rename_items(key, {item.tag.id: val},
restrict_to_book_ids=restrict_to_book_ids)
self.tag_item_renamed.emit()
item.tag.name = val
item.tag.state = TAG_SEARCH_STATES['clear']
if not restrict_to_book_ids:
self.rename_item_in_all_user_categories(name, key, val)
self.refresh_required.emit()
return True
def rename_item_in_all_user_categories(self, item_name, item_category, new_name):
'''
Search all user categories for items named item_name with category
item_category and rename them to new_name. The caller must arrange to
redisplay the tree as appropriate.
'''
user_cats = self.db.prefs.get('user_categories', {})
for k in user_cats.keys():
new_contents = []
for tup in user_cats[k]:
if tup[0] == item_name and tup[1] == item_category:
new_contents.append([new_name, item_category, 0])
else:
new_contents.append(tup)
user_cats[k] = new_contents
self.db.new_api.set_pref('user_categories', user_cats)
def delete_item_from_all_user_categories(self, item_name, item_category):
'''
Search all user categories for items named item_name with category
item_category and delete them. The caller must arrange to redisplay the
tree as appropriate.
'''
user_cats = self.db.prefs.get('user_categories', {})
for cat in user_cats.keys():
self.delete_item_from_user_category(cat, item_name, item_category,
user_categories=user_cats)
self.db.new_api.set_pref('user_categories', user_cats)
def delete_item_from_user_category(self, category, item_name, item_category,
user_categories=None):
if user_categories is not None:
user_cats = user_categories
else:
user_cats = self.db.prefs.get('user_categories', {})
new_contents = []
for tup in user_cats[category]:
if tup[0] != item_name or tup[1] != item_category:
new_contents.append(tup)
user_cats[category] = new_contents
if user_categories is None:
self.db.new_api.set_pref('user_categories', user_cats)
def headerData(self, *args):
return None
def flags(self, index, *args):
ans = Qt.ItemIsEnabled|Qt.ItemIsEditable
if index.isValid():
node = self.data(index, Qt.UserRole)
if node.type == TagTreeItem.TAG:
if node.tag.is_editable:
ans |= Qt.ItemIsDragEnabled
fm = self.db.metadata_for_field(node.tag.category)
if node.tag.category in \
('tags', 'series', 'authors', 'rating', 'publisher', 'languages') or \
(fm['is_custom'] and
fm['datatype'] in ['text', 'rating', 'series', 'enumeration']):
ans |= Qt.ItemIsDropEnabled
else:
ans |= Qt.ItemIsDropEnabled
return ans
def supportedDropActions(self):
return Qt.CopyAction|Qt.MoveAction
def path_for_index(self, index):
ans = []
while index.isValid():
ans.append(index.row())
index = self.parent(index)
ans.reverse()
return ans
def index_for_path(self, path):
parent = QModelIndex()
for idx,v in enumerate(path):
tparent = self.index(v, 0, parent)
if not tparent.isValid():
if v > 0 and idx == len(path) - 1:
# Probably the last item went away. Use the one before it
tparent = self.index(v-1, 0, parent)
if not tparent.isValid():
# Not valid. Use the last valid index
break
else:
# There isn't one before it. Use the last valid index
break
parent = tparent
return parent
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
parent_item = self.root_item
else:
parent_item = self.get_node(parent)
try:
child_item = parent_item.children[row]
except IndexError:
return QModelIndex()
ans = self.createIndex(row, column, child_item)
return ans
def parent(self, index):
if not index.isValid():
return QModelIndex()
child_item = self.get_node(index)
parent_item = getattr(child_item, 'parent', None)
if parent_item is self.root_item or parent_item is None:
return QModelIndex()
ans = self.createIndex(parent_item.row(), 0, parent_item)
if not ans.isValid():
return QModelIndex()
return ans
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parent_item = self.root_item
else:
parent_item = self.get_node(parent)
return len(parent_item.children)
def reset_all_states(self, except_=None):
update_list = []
def process_tag(tag_item):
tag = tag_item.tag
if tag is except_:
tag_index = self.createIndex(tag_item.row(), 0, tag_item)
self.dataChanged.emit(tag_index, tag_index)
elif tag.state != 0 or tag in update_list:
tag_index = self.createIndex(tag_item.row(), 0, tag_item)
tag.state = 0
update_list.append(tag)
self.dataChanged.emit(tag_index, tag_index)
for t in tag_item.children:
process_tag(t)
for t in self.root_item.children:
process_tag(t)
def clear_state(self):
self.reset_all_states()
def toggle(self, index, exclusive, set_to=None):
'''
exclusive: clear all states before applying this one
set_to: None => advance the state, otherwise a value from TAG_SEARCH_STATES
'''
if not index.isValid():
return False
item = self.get_node(index)
item.toggle(set_to=set_to)
if exclusive:
self.reset_all_states(except_=item.tag)
self.dataChanged.emit(index, index)
return True
def tokens(self):
ans = []
# Tags can be in the news and the tags categories. However, because of
# the desire to use two different icons (tags and news), the nodes are
# not shared, which can lead to the possibility of searching twice for
# the same tag. The tags_seen set helps us prevent that
tags_seen = set()
# Tag nodes are in their own category and possibly in user categories.
# They will be 'checked' in both places, but we want to put the node
# into the search string only once. The nodes_seen set helps us do that
nodes_seen = set()
node_searches = {TAG_SEARCH_STATES['mark_plus'] : 'true',
TAG_SEARCH_STATES['mark_plusplus'] : '.true',
TAG_SEARCH_STATES['mark_minus'] : 'false',
TAG_SEARCH_STATES['mark_minusminus'] : '.false'}
for node in self.category_nodes:
if node.tag.state:
if node.category_key == "news":
if node_searches[node.tag.state] == 'true':
ans.append('tags:"=' + _('News') + '"')
else:
ans.append('( not tags:"=' + _('News') + '")')
else:
ans.append('%s:%s'%(node.category_key, node_searches[node.tag.state]))
key = node.category_key
for tag_item in node.all_children():
if tag_item.type == TagTreeItem.CATEGORY:
if self.collapse_model == 'first letter' and \
tag_item.temporary and not key.startswith('@') \
and tag_item.tag.state:
k = 'author_sort' if key == 'authors' else key
letters_seen = {}
for subnode in tag_item.children:
if subnode.tag.sort:
letters_seen[subnode.tag.sort[0]] = True
if letters_seen:
charclass = ''.join(letters_seen)
if k == 'author_sort':
expr = r'%s:"~(^[%s])|(&\s*[%s])"'%(k, charclass, charclass)
elif k == 'series':
expr = r'series_sort:"~^[%s]"'%(charclass)
else:
expr = r'%s:"~^[%s]"'%(k, charclass)
else:
expr = r'%s:false'%(k)
if node_searches[tag_item.tag.state] == 'true':
ans.append(expr)
else:
ans.append('(not ' + expr + ')')
continue
tag = tag_item.tag
if tag.state != TAG_SEARCH_STATES['clear']:
if tag.state == TAG_SEARCH_STATES['mark_minus'] or \
tag.state == TAG_SEARCH_STATES['mark_minusminus']:
prefix = ' not '
else:
prefix = ''
if node.is_gst:
category = key
else:
category = tag.category if key != 'news' else 'tag'
add_colon = False
if self.db.field_metadata[tag.category]['is_csp']:
add_colon = True
if tag.name and tag.name[0] == u'\u2605': # char is a star. Assume rating
ans.append('%s%s:%s'%(prefix, category, len(tag.name)))
else:
name = tag.original_name
use_prefix = tag.state in [TAG_SEARCH_STATES['mark_plusplus'],
TAG_SEARCH_STATES['mark_minusminus']]
if category == 'tags':
if name in tags_seen:
continue
tags_seen.add(name)
if tag in nodes_seen:
continue
nodes_seen.add(tag)
n = name.replace(r'"', r'\"')
if name.startswith('.'):
n = '.' + n
ans.append('%s%s:"=%s%s%s"'%(prefix, category,
'.' if use_prefix else '', n,
':' if add_colon else ''))
return ans
def find_item_node(self, key, txt, start_path, equals_match=False):
'''
Search for an item (a node) in the tags browser list that matches both
the key (exact case-insensitive match) and txt (not equals_match =>
case-insensitive contains match; equals_match => case_insensitive
equal match). Returns the path to the node. Note that paths are to a
location (second item, fourth item, 25 item), not to a node. If
start_path is None, the search starts with the topmost node. If the tree
is changed subsequent to calling this method, the path can easily refer
to a different node or no node at all.
'''
if not txt:
return None
txt = lower(txt) if not equals_match else txt
self.path_found = None
if start_path is None:
start_path = []
def process_tag(depth, tag_index, tag_item, start_path):
path = self.path_for_index(tag_index)
if depth < len(start_path) and path[depth] <= start_path[depth]:
return False
tag = tag_item.tag
if tag is None:
return False
name = tag.original_name
if (equals_match and strcmp(name, txt) == 0) or \
(not equals_match and lower(name).find(txt) >= 0):
self.path_found = path
return True
for i,c in enumerate(tag_item.children):
if process_tag(depth+1, self.createIndex(i, 0, c), c, start_path):
return True
return False
def process_level(depth, category_index, start_path):
path = self.path_for_index(category_index)
if depth < len(start_path):
if path[depth] < start_path[depth]:
return False
if path[depth] > start_path[depth]:
start_path = path
my_key = self.get_node(category_index).category_key
for j in xrange(self.rowCount(category_index)):
tag_index = self.index(j, 0, category_index)
tag_item = self.get_node(tag_index)
if tag_item.type == TagTreeItem.CATEGORY:
if process_level(depth+1, tag_index, start_path):
return True
elif not key or strcmp(key, my_key) == 0:
if process_tag(depth+1, tag_index, tag_item, start_path):
return True
return False
for i in xrange(self.rowCount(QModelIndex())):
if process_level(0, self.index(i, 0, QModelIndex()), start_path):
break
return self.path_found
def find_category_node(self, key, parent=QModelIndex()):
'''
Search for an category node (a top-level node) in the tags browser list
that matches the key (exact case-insensitive match). Returns the path to
the node. Paths are as in find_item_node.
'''
if not key:
return None
for i in xrange(self.rowCount(parent)):
idx = self.index(i, 0, parent)
node = self.get_node(idx)
if node.type == TagTreeItem.CATEGORY:
ckey = node.category_key
if strcmp(ckey, key) == 0:
return self.path_for_index(idx)
if len(node.children):
v = self.find_category_node(key, idx)
if v is not None:
return v
return None
def set_boxed(self, idx):
tag_item = self.get_node(idx)
tag_item.boxed = True
self.dataChanged.emit(idx, idx)
def clear_boxed(self):
'''
Clear all boxes around items.
'''
def process_tag(tag_index, tag_item):
if tag_item.boxed:
tag_item.boxed = False
self.dataChanged.emit(tag_index, tag_index)
for i,c in enumerate(tag_item.children):
process_tag(self.index(i, 0, tag_index), c)
def process_level(category_index):
for j in xrange(self.rowCount(category_index)):
tag_index = self.index(j, 0, category_index)
tag_item = self.get_node(tag_index)
if tag_item.boxed:
tag_item.boxed = False
self.dataChanged.emit(tag_index, tag_index)
if tag_item.type == TagTreeItem.CATEGORY:
process_level(tag_index)
else:
process_tag(tag_index, tag_item)
for i in xrange(self.rowCount(QModelIndex())):
process_level(self.index(i, 0, QModelIndex()))
# }}}
|
gpl-3.0
| 137,031,868,009,757,070 | 5,087,486,525,077,089,000 | 41.264336 | 96 | 0.491661 | false |
qgis/QGIS
|
python/plugins/db_manager/db_plugins/oracle/plugin.py
|
29
|
22869
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS (Oracle)
Date : Aug 27, 2014
copyright : (C) 2014 by Médéric RIBREUX
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias <[email protected]> (GPLv2 license)
- DB Manager by Giuseppe Sucameli <[email protected]> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
# this will disable the dbplugin if the connector raise an ImportError
from .connector import OracleDBConnector
from qgis.PyQt.QtCore import Qt, QCoreApplication
from qgis.PyQt.QtGui import QIcon, QKeySequence
from qgis.PyQt.QtWidgets import QAction, QApplication, QMessageBox
from qgis.core import QgsApplication, QgsVectorLayer, NULL, QgsSettings
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, \
Database, Schema, Table, VectorTable, TableField, TableConstraint, \
TableIndex, TableTrigger
from qgis.core import QgsCredentials
def classFactory():
return OracleDBPlugin
class OracleDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QgsApplication.getThemeIcon("/mIconOracle.svg")
@classmethod
def typeName(self):
return 'oracle'
@classmethod
def typeNameString(self):
return QCoreApplication.translate('db_manager', 'Oracle Spatial')
@classmethod
def providerName(self):
return 'oracle'
@classmethod
def connectionSettingsKey(self):
return '/Oracle/connections'
def connectToUri(self, uri):
self.db = self.databasesFactory(self, uri)
if self.db:
return True
return False
def databasesFactory(self, connection, uri):
return ORDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QgsSettings()
settings.beginGroup(u"/{0}/{1}".format(
self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(
self.tr('There is no defined database connection "{0}".'.format(
conn_name)))
from qgis.core import QgsDataSourceUri
uri = QgsDataSourceUri()
settingsList = ["host", "port", "database", "username", "password"]
host, port, database, username, password = [
settings.value(x, "", type=str) for x in settingsList]
# get all of the connection options
useEstimatedMetadata = settings.value(
"estimatedMetadata", False, type=bool)
uri.setParam('userTablesOnly', str(
settings.value("userTablesOnly", False, type=bool)))
uri.setParam('geometryColumnsOnly', str(
settings.value("geometryColumnsOnly", False, type=bool)))
uri.setParam('allowGeometrylessTables', str(
settings.value("allowGeometrylessTables", False, type=bool)))
uri.setParam('onlyExistingTypes', str(
settings.value("onlyExistingTypes", False, type=bool)))
uri.setParam('includeGeoAttributes', str(
settings.value("includeGeoAttributes", False, type=bool)))
settings.endGroup()
uri.setConnection(host, port, database, username, password)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
err = u""
try:
return self.connectToUri(uri)
except ConnectionError as e:
err = str(e)
# ask for valid credentials
max_attempts = 3
for i in range(max_attempts):
(ok, username, password) = QgsCredentials.instance().get(
uri.connectionInfo(False), username, password, err)
if not ok:
return False
uri.setConnection(host, port, database, username, password)
try:
self.connectToUri(uri)
except ConnectionError as e:
if i == max_attempts - 1: # failed the last attempt
raise e
err = str(e)
continue
QgsCredentials.instance().put(
uri.connectionInfo(False), username, password)
return True
return False
class ORDatabase(Database):
def __init__(self, connection, uri):
self.connName = connection.connectionName()
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return OracleDBConnector(uri, self.connName)
def dataTablesFactory(self, row, db, schema=None):
return ORTable(row, db, schema)
def vectorTablesFactory(self, row, db, schema=None):
return ORVectorTable(row, db, schema)
def info(self):
from .info_model import ORDatabaseInfo
return ORDatabaseInfo(self)
def schemasFactory(self, row, db):
return ORSchema(row, db)
def columnUniqueValuesModel(self, col, table, limit=10):
l = u""
if limit:
l = u"WHERE ROWNUM < {:d}".format(limit)
con = self.database().connector
# Prevent geometry column show
tableName = table.replace(u'"', u"").split(u".")
if len(tableName) == 0:
tableName = [None, tableName[0]]
colName = col.replace(u'"', u"").split(u".")[-1]
if con.isGeometryColumn(tableName, colName):
return None
query = u"SELECT DISTINCT {} FROM {} {}".format(col, table, l)
return self.sqlResultModel(query, self)
def sqlResultModel(self, sql, parent):
from .data_model import ORSqlResultModel
return ORSqlResultModel(self, sql, parent)
def sqlResultModelAsync(self, sql, parent):
from .data_model import ORSqlResultModelAsync
return ORSqlResultModelAsync(self, sql, parent)
def toSqlLayer(self, sql, geomCol, uniqueCol,
layerName=u"QueryLayer", layerType=None,
avoidSelectById=False, filter=""):
uri = self.uri()
con = self.database().connector
if uniqueCol is not None:
uniqueCol = uniqueCol.strip('"').replace('""', '"')
uri.setDataSource(u"", u"({}\n)".format(
sql), geomCol, filter, uniqueCol)
if avoidSelectById:
uri.disableSelectAtId(True)
provider = self.dbplugin().providerName()
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
# handling undetermined geometry type
if not vlayer.isValid():
wkbType, srid = con.getTableMainGeomType(
u"({}\n)".format(sql), geomCol)
uri.setWkbType(wkbType)
if srid:
uri.setSrid(str(srid))
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
return vlayer
def registerDatabaseActions(self, mainWindow):
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Re-connect"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Database"), self.reconnectActionSlot)
if self.schemas():
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Create Schema…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.createSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Delete (Empty) Schema…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.deleteSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "Delete Selected Item"), self)
mainWindow.registerAction(action, None, self.deleteActionSlot)
action.setShortcuts(QKeySequence.Delete)
action = QAction(QgsApplication.getThemeIcon("/mActionCreateTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Create Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.createTableActionSlot)
action = QAction(QgsApplication.getThemeIcon("/mActionEditTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Edit Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.editTableActionSlot)
action = QAction(QgsApplication.getThemeIcon("/mActionDeleteTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Delete Table/View…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.deleteTableActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Empty Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.emptyTableActionSlot)
def supportsComment(self):
return False
class ORSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
# self.oid, self.name, self.owner, self.perms, self.comment = row
self.name = row[0]
class ORTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, self.owner, isView = row
self.estimatedRowCount = None
self.objectType = None
self.isView = False
self.isMaterializedView = False
if isView == 1:
self.isView = True
self.creationDate = None
self.modificationDate = None
def getDates(self):
"""Grab the creation/modification dates of the table"""
self.creationDate, self.modificationDate = (
self.database().connector.getTableDates((self.schemaName(),
self.name)))
def refreshRowEstimation(self):
"""Use ALL_ALL_TABLE to get an estimation of rows"""
if self.isView:
self.estimatedRowCount = 0
self.estimatedRowCount = (
self.database().connector.getTableRowEstimation(
(self.schemaName(), self.name)))
def getType(self):
"""Grab the type of object for the table"""
self.objectType = self.database().connector.getTableType(
(self.schemaName(), self.name))
def getComment(self):
"""Grab the general comment of the table/view"""
self.comment = self.database().connector.getTableComment(
(self.schemaName(), self.name), self.objectType)
def getDefinition(self):
return self.database().connector.getDefinition(
(self.schemaName(), self.name), self.objectType)
def getMViewInfo(self):
if self.objectType == u"MATERIALIZED VIEW":
return self.database().connector.getMViewInfo(
(self.schemaName(), self.name))
else:
return None
def runAction(self, action):
action = str(action)
if action.startswith("rows/"):
if action == "rows/recount":
self.refreshRowCount()
return True
elif action.startswith("index/"):
parts = action.split('/')
index_name = parts[1]
index_action = parts[2]
msg = QApplication.translate(
"DBManagerPlugin",
"Do you want to {} index {}?".format(
index_action, index_name))
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(
None,
QApplication.translate(
"DBManagerPlugin", "Table Index"),
msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if index_action == "rebuild":
self.aboutToChange.emit()
self.database().connector.rebuildTableIndex(
(self.schemaName(), self.name), index_name)
self.refreshIndexes()
return True
elif action.startswith(u"mview/"):
if action == "mview/refresh":
self.aboutToChange.emit()
self.database().connector.refreshMView(
(self.schemaName(), self.name))
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return ORTableField(row, table)
def tableConstraintsFactory(self, row, table):
return ORTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return ORTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return ORTableTrigger(row, table)
def info(self):
from .info_model import ORTableInfo
return ORTableInfo(self)
def tableDataModel(self, parent):
from .data_model import ORTableDataModel
return ORTableDataModel(self, parent)
def getValidQgisUniqueFields(self, onlyOne=False):
""" list of fields valid to load the table as layer in QGIS canvas.
QGIS automatically search for a valid unique field, so it's
needed only for queries and views.
"""
ret = []
# add the pk
pkcols = [x for x in self.fields() if x.primaryKey]
if len(pkcols) == 1:
ret.append(pkcols[0])
# then add integer fields with an unique index
indexes = self.indexes()
if indexes is not None:
for idx in indexes:
if idx.isUnique and len(idx.columns) == 1:
fld = idx.fields()[idx.columns[0]]
if (fld.dataType == u"NUMBER" and not fld.modifier and fld.notNull and fld not in ret):
ret.append(fld)
# and finally append the other suitable fields
for fld in self.fields():
if (fld.dataType == u"NUMBER" and not fld.modifier and fld.notNull and fld not in ret):
ret.append(fld)
if onlyOne:
return ret[0] if len(ret) > 0 else None
return ret
def uri(self):
uri = self.database().uri()
schema = self.schemaName() if self.schemaName() else ''
geomCol = self.geomColumn if self.type in [
Table.VectorType, Table.RasterType] else ""
uniqueCol = self.getValidQgisUniqueFields(
True) if self.isView else None
uri.setDataSource(schema, self.name, geomCol if geomCol else None,
None, uniqueCol.name if uniqueCol else "")
# Handle geographic table
if geomCol:
uri.setWkbType(self.wkbType)
uri.setSrid(str(self.srid))
return uri
class ORVectorTable(ORTable, VectorTable):
def __init__(self, row, db, schema=None):
ORTable.__init__(self, row[0:3], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.wkbType, self.geomDim, \
self.srid = row[-7:-2]
def info(self):
from .info_model import ORVectorTableInfo
return ORVectorTableInfo(self)
def runAction(self, action):
if action.startswith("extent/"):
if action == "extent/update":
self.aboutToChange.emit()
self.updateExtent()
return True
if ORTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
def canUpdateMetadata(self):
return self.database().connector.canUpdateMetadata((self.schemaName(),
self.name))
def updateExtent(self):
self.database().connector.updateMetadata(
(self.schemaName(), self.name),
self.geomColumn, extent=self.extent)
self.refreshTableEstimatedExtent()
self.refresh()
def hasSpatialIndex(self, geom_column=None):
geom_column = geom_column if geom_column else self.geomColumn
for idx in self.indexes():
if geom_column == idx.column:
return True
return False
class ORTableField(TableField):
def __init__(self, row, table):
""" build fields information from query and find primary key """
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, \
self.modifier, self.notNull, self.hasDefault, \
self.default, typeStr, self.comment = row
self.primaryKey = False
self.num = int(self.num)
if self.charMaxLen == NULL:
self.charMaxLen = None
else:
self.charMaxLen = int(self.charMaxLen)
if self.modifier == NULL:
self.modifier = None
else:
self.modifier = int(self.modifier)
if self.notNull.upper() == u"Y":
self.notNull = False
else:
self.notNull = True
if self.comment == NULL:
self.comment = u""
# find out whether fields are part of primary key
for con in self.table().constraints():
if con.type == ORTableConstraint.TypePrimaryKey and self.name == con.column:
self.primaryKey = True
break
def type2String(self):
if (u"TIMESTAMP" in self.dataType or self.dataType in [u"DATE", u"SDO_GEOMETRY", u"BINARY_FLOAT", u"BINARY_DOUBLE"]):
return u"{}".format(self.dataType)
if self.charMaxLen in [None, -1]:
return u"{}".format(self.dataType)
elif self.modifier in [None, -1, 0]:
return u"{}({})".format(self.dataType, self.charMaxLen)
return u"{}({},{})".format(self.dataType, self.charMaxLen,
self.modifier)
def update(self, new_name, new_type_str=None, new_not_null=None,
new_default_str=None):
self.table().aboutToChange.emit()
if self.name == new_name:
new_name = None
if self.type2String() == new_type_str:
new_type_str = None
if self.notNull == new_not_null:
new_not_null = None
if self.default2String() == new_default_str:
new_default_str = None
ret = self.table().database().connector.updateTableColumn(
(self.table().schemaName(), self.table().name),
self.name, new_name, new_type_str,
new_not_null, new_default_str)
# When changing a field, refresh also constraints and
# indexes.
if ret is not False:
self.table().refreshFields()
self.table().refreshConstraints()
self.table().refreshIndexes()
return ret
class ORTableConstraint(TableConstraint):
TypeCheck, TypeForeignKey, TypePrimaryKey, \
TypeUnique, TypeUnknown = list(range(5))
types = {"c": TypeCheck, "r": TypeForeignKey,
"p": TypePrimaryKey, "u": TypeUnique}
def __init__(self, row, table):
""" build constraints info from query """
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.column, self.validated, \
self.generated, self.status = row[0:6]
constr_type_str = constr_type_str.lower()
if constr_type_str in ORTableConstraint.types:
self.type = ORTableConstraint.types[constr_type_str]
else:
self.type = ORTableConstraint.TypeUnknown
if row[6] == NULL:
self.checkSource = u""
else:
self.checkSource = row[6]
if row[8] == NULL:
self.foreignTable = u""
else:
self.foreignTable = row[8]
if row[7] == NULL:
self.foreignOnDelete = u""
else:
self.foreignOnDelete = row[7]
if row[9] == NULL:
self.foreignKey = u""
else:
self.foreignKey = row[9]
def type2String(self):
if self.type == ORTableConstraint.TypeCheck:
return QApplication.translate("DBManagerPlugin", "Check")
if self.type == ORTableConstraint.TypePrimaryKey:
return QApplication.translate("DBManagerPlugin", "Primary key")
if self.type == ORTableConstraint.TypeForeignKey:
return QApplication.translate("DBManagerPlugin", "Foreign key")
if self.type == ORTableConstraint.TypeUnique:
return QApplication.translate("DBManagerPlugin", "Unique")
return QApplication.translate("DBManagerPlugin", 'Unknown')
def fields(self):
""" Hack to make edit dialog box work """
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, self.column, self.indexType, self.status, \
self.analyzed, self.compression, self.isUnique = row
def fields(self):
""" Hack to make edit dialog box work """
self.table().refreshFields()
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.event, self.type, self.enabled = row
|
gpl-2.0
| 8,814,159,737,422,689,000 | -8,999,449,192,474,476,000 | 34.107527 | 125 | 0.579261 | false |
mindnervestech/mnrp
|
addons/crm_claim/crm_claim.py
|
333
|
10079
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
class crm_claim_stage(osv.osv):
""" Model for claim stages. This models the main stages of a claim
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.claim.stage"
_description = "Claim stages"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'section_ids':fields.many2many('crm.case.section', 'section_claim_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'case_default': fields.boolean('Common to All Teams',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
}
_defaults = {
'sequence': lambda *args: 1,
}
class crm_claim(osv.osv):
""" Crm claim
"""
_name = "crm.claim"
_description = "Claim"
_order = "priority,date desc"
_inherit = ['mail.thread']
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
return self.pool.get('crm.lead')._resolve_section_id_from_context(cr, uid, context=context) or False
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
section_id = self._get_default_section_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], section_id, [('sequence', '=', '1')], context=context)
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Claim Subject', required=True),
'active': fields.boolean('Active'),
'action_next': fields.char('Next Action'),
'date_action_next': fields.datetime('Next Action Date'),
'description': fields.text('Description'),
'resolution': fields.text('Resolution'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'write_date': fields.datetime('Update Date' , readonly=True),
'date_deadline': fields.date('Deadline'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Claim Date', select=True),
'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.claim')]"),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'type_action': fields.selection([('correction','Corrective Action'),('prevention','Preventive Action')], 'Action Type'),
'user_id': fields.many2one('res.users', 'Responsible', track_visibility='always'),
'user_fault': fields.char('Trouble Responsible'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help="Responsible sales team."\
" Define Responsible user and Email account for"\
" mail gateway."),
'company_id': fields.many2one('res.company', 'Company'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'email_cc': fields.text('Watchers Emails', size=252, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'email_from': fields.char('Email', size=128, help="Destination email for email gateway."),
'partner_phone': fields.char('Phone'),
'stage_id': fields.many2one ('crm.claim.stage', 'Stage', track_visibility='onchange',
domain="['|', ('section_ids', '=', section_id), ('case_default', '=', True)]"),
'cause': fields.text('Root Cause'),
}
_defaults = {
'user_id': lambda s, cr, uid, c: uid,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
'date': fields.datetime.now,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.case', context=c),
'priority': '1',
'active': lambda *a: 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c)
}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for claim in cases:
if claim.section_id:
section_ids.append(claim.section_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * len(section_ids)
for section_id in section_ids:
search_domain.append(('section_ids', '=', section_id))
search_domain.append(('case_default', '=', True))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('crm.claim.stage').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def onchange_partner_id(self, cr, uid, ids, partner_id, email=False, context=None):
"""This function returns value of partner address based on partner
:param email: ignored
"""
if not partner_id:
return {'value': {'email_from': False, 'partner_phone': False}}
address = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
return {'value': {'email_from': address.email, 'partner_phone': address.phone}}
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('section_id') and not context.get('default_section_id'):
context['default_section_id'] = vals.get('section_id')
# context: no_log, because subtype already handle this
return super(crm_claim, self).create(cr, uid, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
claim = self.browse(cr, uid, id, context=context)
default = dict(default or {},
stage_id = self._get_default_stage_id(cr, uid, context=context),
name = _('%s (copy)') % claim.name)
return super(crm_claim, self).copy(cr, uid, id, default, context=context)
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
}
if msg.get('priority'):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(crm_claim, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
class res_partner(osv.osv):
_inherit = 'res.partner'
def _claim_count(self, cr, uid, ids, field_name, arg, context=None):
Claim = self.pool['crm.claim']
return {
partner_id: Claim.search_count(cr,uid, [('partner_id', '=', partner_id)], context=context)
for partner_id in ids
}
_columns = {
'claim_count': fields.function(_claim_count, string='# Claims', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -6,834,811,936,620,871,000 | 1,176,509,359,179,875,300 | 47.690821 | 238 | 0.594603 | false |
JianfengXu/crosswalk-test-suite
|
stability/stability-lowresource-android-tests/lowresource/TestApp.py
|
3
|
6765
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao<[email protected]>
import sys
import commands
import subprocess
reload(sys)
sys.setdefaultencoding('utf-8')
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
class TestApp():
device = ""
location = ""
pkgname = ""
activname = ""
def __init__(self, device, location, pkgname, activname):
self.device = device
self.location = location
self.pkgname = pkgname
self.activname = activname
def install(self):
action_status = False
if self.location.endswith(".apk"):
if not self.isInstalled():
cmd = "%s -s %s install %s" % (ADB_CMD, self.device, self.location)
(return_code, output) = doCMD(cmd)
if self.isInstalled():
action_status = True
else:
print "-->> %s fail to install." % self.location
else:
print "-->> %s has been installed." % self.pkgname
else:
print "-->> Invalid apk location: %s " % self.location
return action_status
def uninstall(self):
action_status = False
if self.isInstalled():
cmd = "%s -s %s uninstall %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if not self.isInstalled():
action_status = True
else:
print "-->> %s fail to uninstall." % self.pkgname
else:
print "-->> %s has not been installed." % self.pkgname
return action_status
def launch(self):
action_status = False
if not self.isRunning():
cmd = "%s -s %s shell am start -n %s/.%s" % (ADB_CMD, self.device, self.pkgname, self.activname)
(return_code, output) = doCMD(cmd)
if self.isRunning():
action_status = True
else:
print "-->> %s fail to launch." % self.pkgname
else:
print "-->> %s has been launched." % self.pkgname
return action_status
def switch(self):
action_status = False
# If in Activity, switch to background, otherwise switch to front
if self.isActivity():
# Switch to Home
# keycode
# 3 --> "KEYCODE_HOME"
cmd = "%s -s %s shell input keyevent 3" % (ADB_CMD, self.device)
(return_code, output) = doCMD(cmd)
if not self.isActivity():
action_status = True
else:
print "-->> %s fail to switch to background." % self.pkgname
else:
cmd = "%s -s %s shell am start -n %s/.%s" % (ADB_CMD, self.device, self.pkgname, self.activname)
(return_code, output) = doCMD(cmd)
if self.isActivity():
action_status = True
else:
print "-->> %s fail to switch to front." % self.pkgname
return action_status
def stop(self):
action_status = False
if self.isRunning():
cmd = "%s -s %s shell am force-stop %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
if not self.isRunning():
action_status = True
else:
print "-->> %s fail to stop." % self.pkgname
else:
print "-->> %s has been stoped." % self.pkgname
return action_status
def isInstalled(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell pm list packages |grep %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
for line in output:
if self.pkgname in line:
action_status = True
break
return action_status
def isRunning(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell ps |grep %s" % (ADB_CMD, self.device, self.pkgname)
(return_code, output) = doCMD(cmd)
for line in output:
if self.pkgname in line:
action_status = True
break
return action_status
def isActivity(self):
action_status = False
if not self.pkgname == "":
cmd = "%s -s %s shell dumpsys activity |grep \"%s\"" % (ADB_CMD, self.device, "Recent #0")
(return_code, output) = doCMD(cmd)
for line in output:
if self.pkgname in line:
action_status = True
break
return action_status
|
bsd-3-clause
| -61,660,268,473,978,810 | 6,793,312,372,683,431,000 | 36.375691 | 108 | 0.57694 | false |
calfonso/ansible
|
contrib/inventory/scaleway.py
|
20
|
7196
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Scaleway
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Scaleway API
Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.python.org/pypi/requests)
Before using this script you may want to modify scaleway.ini config file.
This script generates an Ansible hosts file with these host groups:
<hostname>: Defines host itself with Scaleway's hostname as group name.
<tag>: Contains all hosts which has "<tag>" as tag.
<region>: Contains all hosts which are in the "<region>" region.
all: Contains all hosts defined in Scaleway.
'''
# (c) 2017, Paul B. <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import os
import requests
import six
from six.moves import configparser
import sys
import time
import traceback
try:
import json
except ImportError:
import simplejson as json
EMPTY_GROUP = {
'children': [],
'hosts': []
}
class ScalewayAPI:
REGIONS = ['par1', 'ams1']
def __init__(self, auth_token, region):
self.session = requests.session()
self.session.headers.update({
'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0])
})
self.session.headers.update({
'X-Auth-Token': auth_token.encode('latin1')
})
self.base_url = 'https://cp-%s.scaleway.com' % (region)
def servers(self):
raw = self.session.get('/'.join([self.base_url, 'servers']))
try:
response = raw.json()
return self.get_resource('servers', response, raw)
except ValueError:
return []
def get_resource(self, resource, response, raw):
raw.raise_for_status()
if resource in response:
return response[resource]
else:
raise ValueError(
"Resource %s not found in Scaleway API response" % (resource))
def env_or_param(env_key, param=None, fallback=None):
env_value = os.environ.get(env_key)
if (param, env_value) == (None, None):
return fallback
elif env_value is not None:
return env_value
else:
return param
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache', 'cache_dir')
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache', 'cache_dir')
try:
existing = os.stat(
'/'.join([dpath, 'scaleway_ansible_inventory.json']))
except OSError:
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
else:
maxage = 60
if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True
return False
def generate_inv_from_api(config):
try:
inventory['all'] = copy.deepcopy(EMPTY_GROUP)
if config.has_option('auth', 'api_token'):
auth_token = config.get('auth', 'api_token')
auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token)
if auth_token is None:
sys.stderr.write('ERROR: missing authentication token for Scaleway API')
sys.exit(1)
if config.has_option('compute', 'regions'):
regions = config.get('compute', 'regions')
if regions == 'all':
regions = ScalewayAPI.REGIONS
else:
regions = map(str.strip, regions.split(','))
else:
regions = [
env_or_param('SCALEWAY_REGION', fallback='par1')
]
for region in regions:
api = ScalewayAPI(auth_token, region)
for server in api.servers():
hostname = server['hostname']
if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'):
ip = server['public_ip']['address']
else:
ip = server['private_ip']
for server_tag in server['tags']:
if server_tag not in inventory:
inventory[server_tag] = copy.deepcopy(EMPTY_GROUP)
inventory[server_tag]['children'].append(hostname)
if region not in inventory:
inventory[region] = copy.deepcopy(EMPTY_GROUP)
inventory[region]['children'].append(hostname)
inventory['all']['children'].append(hostname)
inventory[hostname] = []
inventory[hostname].append(ip)
return inventory
except Exception:
# Return empty hosts output
traceback.print_exc()
return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(config):
''' Reads the inventory from cache or Scaleway api '''
if cache_available(config):
inv = get_cache('scaleway_ansible_inventory.json', config)
else:
inv = generate_inv_from_api(config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
# Read config
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
if cache_available(config):
inventory = get_cache('scaleway_ansible_inventory.json', config)
else:
inventory = get_inventory(config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()
|
gpl-3.0
| -5,259,771,312,776,887,000 | 1,223,426,715,690,203,100 | 30.017241 | 146 | 0.602557 | false |
kirca/odoo
|
addons/account_voucher/report/__init__.py
|
378
|
1083
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_voucher_sales_receipt
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 5,978,724,421,702,086,000 | 3,594,949,668,723,332,000 | 44.125 | 78 | 0.619575 | false |
lucidbard/NewsBlur
|
vendor/oauth2client/anyjson.py
|
302
|
1044
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = '[email protected] (Joe Gregorio)'
try: # pragma: no cover
# Should work for Python2.6 and higher.
import json as simplejson
except ImportError: # pragma: no cover
try:
import simplejson
except ImportError:
# Try to import from django, should work on App Engine
from django.utils import simplejson
|
mit
| -8,405,104,482,442,905,000 | -4,553,047,208,306,440,000 | 31.625 | 74 | 0.744253 | false |
yi719/python-driver
|
cassandra/murmur3.py
|
15
|
2387
|
from six.moves import range
import struct
def body_and_tail(data):
l = len(data)
nblocks = l // 16
tail = l % 16
if nblocks:
return struct.unpack_from('qq' * nblocks, data), struct.unpack_from('b' * tail, data, -tail), l
else:
return tuple(), struct.unpack_from('b' * tail, data, -tail), l
def rotl64(x, r):
# note: not a general-purpose function because it leaves the high-order bits intact
# suitable for this use case without wasting cycles
mask = 2 ** r - 1
rotated = (x << r) | ((x >> 64 - r) & mask)
return rotated
def fmix(k):
# masking off the 31s bits that would be leftover after >> 33 a 64-bit number
k ^= (k >> 33) & 0x7fffffff
k *= 0xff51afd7ed558ccd
k ^= (k >> 33) & 0x7fffffff
k *= 0xc4ceb9fe1a85ec53
k ^= (k >> 33) & 0x7fffffff
return k
INT64_MAX = int(2 ** 63 - 1)
INT64_MIN = -INT64_MAX - 1
INT64_OVF_OFFSET = INT64_MAX + 1
INT64_OVF_DIV = 2 * INT64_OVF_OFFSET
def truncate_int64(x):
if not INT64_MIN <= x <= INT64_MAX:
x = (x + INT64_OVF_OFFSET) % INT64_OVF_DIV - INT64_OVF_OFFSET
return x
def _murmur3(data):
h1 = h2 = 0
c1 = -8663945395140668459 # 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
body, tail, total_len = body_and_tail(data)
# body
for i in range(0, len(body), 2):
k1 = body[i]
k2 = body[i + 1]
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
h1 = rotl64(h1, 27)
h1 += h2
h1 = h1 * 5 + 0x52dce729
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
h2 = rotl64(h2, 31)
h2 += h1
h2 = h2 * 5 + 0x38495ab5
# tail
k1 = k2 = 0
len_tail = len(tail)
if len_tail > 8:
for i in range(len_tail - 1, 7, -1):
k2 ^= tail[i] << (i - 8) * 8
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
if len_tail:
for i in range(min(7, len_tail - 1), -1, -1):
k1 ^= tail[i] << i * 8
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
# finalization
h1 ^= total_len
h2 ^= total_len
h1 += h2
h2 += h1
h1 = fmix(h1)
h2 = fmix(h2)
h1 += h2
return truncate_int64(h1)
try:
from cassandra.cmurmur3 import murmur3
except ImportError:
murmur3 = _murmur3
|
apache-2.0
| -1,819,792,852,146,128,400 | -2,584,573,663,110,634,500 | 20.123894 | 103 | 0.518224 | false |
littlstar/chromium.src
|
chrome/common/extensions/docs/server2/github_file_system_provider.py
|
121
|
1611
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from caching_file_system import CachingFileSystem
from empty_dir_file_system import EmptyDirFileSystem
from github_file_system import GithubFileSystem as OldGithubFileSystem
from new_github_file_system import GithubFileSystem as NewGithubFileSystem
class GithubFileSystemProvider(object):
'''Provides GithubFileSystems bound to an owner/repo pair.
'''
def __init__(self, object_store_creator):
self._object_store_creator = object_store_creator
def Create(self, owner, repo):
'''Creates a GithubFileSystem. For legacy reasons this is hacked
such that the apps samples returns the old GithubFileSystem.
|owner| is the owner of the GitHub account, e.g. 'GoogleChrome'.
|repo| is the repository name, e.g. 'devtools-docs'.
'''
if owner == 'GoogleChrome' and repo == 'chrome-app-samples':
# NOTE: The old GitHub file system implementation doesn't support being
# wrapped by a CachingFileSystem. It's also too slow to run on the dev
# server, since every app API page would need to read from it.
return OldGithubFileSystem.CreateChromeAppsSamples(
self._object_store_creator)
return CachingFileSystem(
NewGithubFileSystem.Create(owner, repo, self._object_store_creator),
self._object_store_creator)
@staticmethod
def ForEmpty():
class EmptyImpl(object):
def Create(self, owner, repo):
return EmptyDirFileSystem()
return EmptyImpl()
|
bsd-3-clause
| -6,315,373,219,129,747,000 | -1,099,013,170,411,539,100 | 39.275 | 77 | 0.732464 | false |
errordeveloper/fe-devel
|
Native/ThirdParty/Private/Python/js_beautifier.py
|
4
|
22785
|
import sys
import os
import exceptions
import glob
fileTypes = ['.js','.kl','.html']
controls = ['case', 'default', 'do', 'else','for', 'if','while','throw', 'switch', 'catch']
keywords = ['break', 'continue', 'finally', 'return',
'try', 'var', 'with', 'delete', 'new', 'typeof', 'instanceof', '#include']
functions = ['function', 'operator']
curly = ['{', '}']
brace = ['(', ')']
bracket = ['[', ']']
allbrackets = []
allbrackets.extend(curly)
allbrackets.extend(brace)
allbrackets.extend(bracket)
quotes = ['"', "'"]
whitespace = [' ', '\n']
comment = ['//', '/*', '*/']
semicolon = [';']
comma = [',','.']
unaoperators = ['++', '--', '>>', '<<']
binoperators = ['===', '!==', '<<=', '>>=', '+=', '-=', '/=', '*=', '%=',
'||', '&&', '>=', '<=', '==', '!=', '^=', '&=', '|=',
'+', '-', '/', '*', '%', '>', '<', ':', '?', '&', '^', '=', '!']
operators = []
operators.extend(unaoperators)
operators.extend(binoperators)
splitters = []
splitters.extend(comment)
splitters.extend(comma)
splitters.extend(semicolon)
splitters.extend(allbrackets)
splitters.extend(quotes)
splitters.extend(whitespace)
splitters.extend(operators)
TYPE_CONTROL = 0
TYPE_KEYWORD = 1
TYPE_FUNCTION = 2
TYPE_CURLY = 4
TYPE_BRACE = 8
TYPE_BRACKET = 16
TYPE_ALL_BRACKETS = TYPE_CURLY | TYPE_BRACE | TYPE_BRACKET
TYPE_QUOTE = 32
TYPE_WHITESPACE = 64
TYPE_COMMENT = 128
TYPE_NO_CODE = TYPE_WHITESPACE | TYPE_COMMENT
TYPE_SEMICOLON = 256
TYPE_COMMA = 512
TYPE_BINOPERATOR = 1024
TYPE_UNAOPERATOR = 2048
TYPE_OPERATOR = TYPE_BINOPERATOR | TYPE_UNAOPERATOR
TYPE_IDENTIFIER = 4096
class token():
string = ''
type = ''
index = -1
def __init__(self,string,type = TYPE_IDENTIFIER,index = 0):
self.string = string
self.type = type
self.index = index
def isTypeOf(self,type):
return self.type
def tokenize(content):
# first some basic formatting
content = content.replace('\t',' ')
# get all of the words
words = []
while len(content) > 0:
minSplitIndex = len(content)
minSplitter = ''
for i in range(len(splitters)):
split = content.partition(splitters[i])
if len(split[1]) > 0:
if len(split[0]) < minSplitIndex:
minSplitIndex = len(split[0])
minSplitter = splitters[i]
if minSplitIndex == len(content):
words.append(content)
content = ''
else:
split = content.partition(minSplitter)
if len(split[0]) > 0:
words.append(split[0])
words.append(split[1])
content = split[2]
# parse the words to tokens
tokens = []
for word in words:
tokenIdentified = False
if not tokenIdentified:
for i in range(len(controls)):
if(word == controls[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_CONTROL,i))
break
if not tokenIdentified:
for i in range(len(keywords)):
if(word == keywords[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_KEYWORD,i))
break
if not tokenIdentified:
for i in range(len(functions)):
if(word == functions[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_FUNCTION,i))
break
if not tokenIdentified:
for i in range(len(curly)):
if(word == curly[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_CURLY,i))
break
if not tokenIdentified:
for i in range(len(brace)):
if(word == brace[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BRACE,i))
break
if not tokenIdentified:
for i in range(len(bracket)):
if(word == bracket[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BRACKET,i))
break
if not tokenIdentified:
for i in range(len(quotes)):
if(word == quotes[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_QUOTE,i))
break
if not tokenIdentified:
for i in range(len(whitespace)):
if(word == whitespace[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_WHITESPACE,i))
break
if not tokenIdentified:
for i in range(len(comment)):
if(word == comment[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_COMMENT,i))
break
if not tokenIdentified:
for i in range(len(semicolon)):
if(word == semicolon[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_SEMICOLON,i))
break
if not tokenIdentified:
for i in range(len(comma)):
if(word == comma[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_COMMA,i))
break
if not tokenIdentified:
for i in range(len(binoperators)):
if(word == binoperators[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BINOPERATOR,i))
break
if not tokenIdentified:
for i in range(len(unaoperators)):
if(word == unaoperators[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_UNAOPERATOR,i))
break
if not tokenIdentified:
tokenIdentified = True
tokens.append(token(word,TYPE_IDENTIFIER,0))
# now since we know the tokens, let's simply some of them
# simplify the comment tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(lastToken.index == 0):
if(tokens[i].type == TYPE_WHITESPACE and tokens[i].index == 1):
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
else:
lastToken.string += tokens[i].string;
elif(lastToken.index == 1):
lastToken.string += tokens[i].string;
if(tokens[i].type == TYPE_COMMENT and tokens[i].index == 2):
newTokens.append(lastToken)
lastToken = False
elif(tokens[i].type == TYPE_COMMENT):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# simplify the string tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(tokens[i].type == TYPE_QUOTE):
if(tokens[i].index == lastToken.index):
lastToken.string += "'"
newTokens.append(lastToken)
lastToken = False
else:
lastToken.string += '"'
else:
lastToken.string += tokens[i].string
elif(tokens[i].type == TYPE_QUOTE):
lastToken = tokens[i]
lastToken.string = "'" # prefer singles
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# simplify the numeric tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)-1):
if(lastToken):
if(tokens[i].type == TYPE_IDENTIFIER):
if(tokens[i].string == 'e' and lastToken.string.find('e') == -1):
lastToken.string += tokens[i].string;
else:
try:
intvalue = int(tokens[i].string[0:1])
lastToken.string += tokens[i].string;
except Exception:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_COMMA and tokens[i].index == 1 and lastToken.string.endswith('e')):
lastToken.string += tokens[i].string;
elif(tokens[i].type == TYPE_BINOPERATOR and tokens[i].string == '-' and tokens[i+1].type == TYPE_IDENTIFIER):
try:
intvalue = int(tokens[i+1].string[0:1])
lastToken.string += tokens[i].string;
except Exception:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
else:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_IDENTIFIER):
try:
intvalue = int(tokens[i].string[0:1])
lastToken = tokens[i]
except Exception:
newTokens.append(tokens[i])
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
newTokens.append(tokens[len(tokens)-1])
tokens = newTokens
# simplify the regex tokens into single tokens
newTokens = []
startIndex = -1
endIndex = -1
string = ''
i = 0
while(i < len(tokens)):
if(startIndex > -1):
tkn = tokens[i];
if(not string.endswith("\\") and (
(tkn.type == TYPE_SEMICOLON) or
(tkn.type == TYPE_BRACE and tkn.index == 1) or
(tkn.type == TYPE_WHITESPACE and tkn == 0)
)):
if(endIndex > -1):
string = ''
for j in range(startIndex,endIndex+1):
string += tokens[j].string
newTokens.append(token(string))
i = endIndex
else:
i = startIndex
newTokens.append(tokens[i])
startIndex = -1
endIndex = -1
string = ''
elif(tkn.type == TYPE_BINOPERATOR and tkn.string == '/'):
endIndex = i
string += tkn.string
else:
string += tkn.string
elif(i > 0 and tokens[i].type == TYPE_BINOPERATOR and tokens[i].string == '/'):
# check if the previous is not an identifier, not an operator
j = i-1
prev = tokens[j]
while(prev.type == TYPE_WHITESPACE and j > 0):
j -= 1
prev = tokens[j]
if((prev.type == TYPE_BINOPERATOR and prev.string == '=') or
(prev.type == TYPE_BRACE and prev.index == 0) or
(prev.type == TYPE_COMMA and prev_index == 0)):
startIndex = i
string = tokens[i].string
else:
newTokens.append(tokens[i])
else:
newTokens.append(tokens[i])
i+=1
tokens = newTokens
# now let's simplify the whitespace tokens into single ones
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(lastToken.index == 0):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
lastToken = False
elif(lastToken.index == 1):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
if(len(lastToken.string) < 2):
lastToken.string += tokens[i].string
else:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_WHITESPACE):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# now let's switch curly and newline tokens
for i in range(len(tokens)-1):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 0):
if(i < len(tokens)-2):
if(tokens[i+2].type == TYPE_WHITESPACE):
tokens.remove(tokens[i+2])
if(i == 0 or tokens[i-1].type != TYPE_COMMENT):
tmp = tokens[i]
tokens[i] = tokens[i+1]
tokens[i+1] = tmp
elif(tokens[i].type == TYPE_CURLY and tokens[i].index == 0):
if(tokens[i+1].type != TYPE_WHITESPACE and not(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 1)):
tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
elif(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 1):
if(tokens[i].type != TYPE_WHITESPACE and not(tokens[i].type == TYPE_CURLY and tokens[i+1].index == 0)):
tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
if(i == len(tokens)-2):
break
# now let's switch curly and newline tokens
curlyCount = 0
braceCount = 0
for i in range(len(tokens)-1):
if(tokens[i].type == TYPE_CURLY):
if(tokens[i].index == 0):
curlyCount += 1
else:
curlyCount -= 1
elif(tokens[i].type == TYPE_BRACE):
if(tokens[i].index == 0):
braceCount += 1
else:
braceCount -= 1
#elif(tokens[i].type == TYPE_COMMA and tokens[i].index == 0):
# if(braceCount <= curlyCount):
# tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
return tokens
def stringify(tokens, extension = 'js'):
lines = []
line = []
# loop over all tokens and put them in lines
for i in range(len(tokens)):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
lines.append(line)
if(len(tokens[i].string) > 1):
lines.append([token('',TYPE_WHITESPACE)])
line = []
continue
line.append(tokens[i])
if(len(line)>0):
lines.append(line)
strings = []
tabs = ''
globalCurlyCount = 0
globalBraceCount = 0
globalBracketCount = 0
globalQuoteCount = 0
entryQuote = 0
history = []
for j in range(len(lines)):
line = lines[j]
curlyCount = 0
braceCount = 0
bracketCount = 0
string = ''
# check if we have a single control line without curly
prevLine = False
if(j > 0):
k = j-1
while(k >= 0):
if(len(lines[k]) > 0 and (len(lines[k]) > 1 or lines[k][0].type != TYPE_WHITESPACE)):
prevLine = lines[k]
break
k -= 1
for i in range(len(line)):
if(line[i].type == TYPE_CURLY):
if(line[i].index == 0):
globalCurlyCount += 1
curlyCount += 1
else:
if(curlyCount == 0):
string = string[2:100000]
globalCurlyCount -= 1
curlyCount -= 1
if(line[i].type == TYPE_BRACE):
if(line[i].index == 0):
globalBraceCount += 1
braceCount += 1
else:
if(braceCount == 0):
string = string[2:100000]
globalBraceCount -= 1
braceCount -= 1
if(line[i].type == TYPE_BRACKET):
if(line[i].index == 0):
globalBracketCount += 1
bracketCount += 1
else:
if(bracketCount == 0):
string = string[2:100000]
globalBracketCount -= 1
bracketCount -= 1
tabCount = curlyCount + braceCount + bracketCount
tabBefore = True
if(prevLine):
if(prevLine[0].type == TYPE_CONTROL and prevLine[0].string != 'case' and prevLine[0].string != 'default'):
lastToken = prevLine[len(prevLine)-1]
if(lastToken.type != TYPE_CURLY or lastToken.index > 0):
string += ' ';
elif(prevLine[len(prevLine)-1].type == TYPE_BINOPERATOR and tabCount <= 0):
tabBefore = False
string += ' ';
if(tabCount < 0 and tabBefore):
for i in range(abs(tabCount)):
tabs = tabs[2:10000]
string += tabs
if(len(line)>1):
firstToken = line[0]
lastToken = line[len(line)-1]
if(firstToken.index == 1 and (firstToken.type == TYPE_CURLY or firstToken.type == TYPE_BRACE or firstToken.type == TYPE_BRACKET) and
lastToken.index == 0 and (lastToken.type == TYPE_CURLY or lastToken.type == TYPE_BRACE or lastToken.type == TYPE_BRACKET)):
string = string[2:10000]
elif(len(line) == 1 and line[0].type == TYPE_CURLY and line[0].index == 0):
string = string[2:10000]
if(tabCount < 0 and not tabBefore):
for i in range(abs(tabCount)):
tabs = tabs[2:10000]
if(tabCount > 0):
for i in range(tabCount):
tabs += ' '
for i in range(0,len(line)):
if(line[i].type == TYPE_BRACE or line[i].type == TYPE_CURLY or line[i].type == TYPE_BRACKET):
if(line[i].index == 0):
history.append(line[i].string)
else:
if(line[i].type == TYPE_CURLY):
if(len(history) > 2 and history[len(history)-1] == 'case'):
tabs = tabs[2:10000]
string = string[2:10000]
history.pop()
if(len(history) > 0):
history.pop()
if(line[i].type == TYPE_COMMENT):
string += line[i].string.strip()
continue
if(line[i].type == TYPE_CURLY):
if(line[i].index == 0 and not string.endswith(' ') and not string.endswith('[') and not string.endswith('(')):
string += ' '+line[i].string
continue
if(line[i].type == TYPE_FUNCTION):
if(line[i+1].type != TYPE_BRACE and (line[i].string == 'function' or extension == 'kl')):
string += line[i].string+' '
continue
if(line[i].type == TYPE_BINOPERATOR):
if(line[i].string == '-'):
if(i==0):
string += line[i].string
continue
if(line[i-1].type != TYPE_IDENTIFIER and line[i-1].index == 0):
string += line[i].string
continue
if(not string.endswith(' ')):
if line[i].string == ":" :
if(len(history) > 0):
if(history[len(history)-1] == '?'):
string += ' '
history.pop()
elif line[i].string == "?":
history.append('?')
string += ' '
elif line[i].string == "!":
if(not string.endswith('(')):
string += ' '
else:
string += ' '
string += line[i].string
if(i < len(line)-1 and line[i].string != '!'):
string += ' '
continue
if(line[i].type == TYPE_COMMA and line[i].index == 0 and i < len(line)-1):
string += line[i].string+' '
continue
if(line[i].type == TYPE_CONTROL):
if(line[i].string == 'case' or line[i].string == 'default'):
if(len(history)>0 and history[len(history)-1] == 'case'):
string = string[2:10000]
else:
history.append('case')
tabs += ' '
if(i < len(line)-1 and (line[i+1].type == TYPE_BRACE or line[i+1].type == TYPE_CONTROL or line[i+1].type == TYPE_COMMENT or line[i+1].type == TYPE_IDENTIFIER)):
string += line[i].string+' '
else:
string += line[i].string
continue
if(line[i].type == TYPE_KEYWORD and (line[i].string == "var" or line[i].string == "#include")):
string += line[i].string+' '
continue
if(line[i].type == TYPE_KEYWORD and line[i].string == "return" and i < len(line)-1 and line[i+1].type != TYPE_SEMICOLON):
string += line[i].string+' '
continue
if(line[i].type == TYPE_IDENTIFIER and len(string) > 0 and not string.endswith(' ') and not string.endswith('.') and not string.endswith('(') and not string.endswith('[') and not string.endswith('{') and not string.endswith('!')):
if(string.endswith('-') and not string[0:len(string)-1].endswith(' ')):
string += line[i].string
else:
string += ' '+line[i].string
continue
if(line[i].type == TYPE_SEMICOLON and i < len(line)-1 and line[i+1].type != TYPE_WHITESPACE):
string += line[i].string + ' '
continue
string += line[i].string
if(len(string.strip())==0):
strings.append('')
else:
strings.append(string)
# now reindent the tabs, based on smallest indent possible
counts = []
for string in strings:
count = 0
while(string[count*2:count*2+1] == ' '):
count += 1
counts.append(count)
def reindent(strings,counts,index):
if(strings[index] == ''):
return
count = counts[index]
while(counts[index+1] == count or strings[index+1] == ''):
index += 1
if(index == len(counts)-1):
return
if(counts[index+1] > count+1):
highIndex = index+1
lowIndex = index+1
# we found a 2 tabbing or higher
# now let's check if the next lower one is also my count
while(counts[lowIndex] >= counts[highIndex] or strings[lowIndex] == ''):
lowIndex += 1
if(lowIndex == len(counts)-1):
break
if(counts[lowIndex] <= count):
# fantastic, we can lower the tabs
diff = count - counts[highIndex] + 1
for i in range(highIndex,lowIndex):
counts[i] += diff
for i in range(len(counts)-1):
reindent(strings,counts,i)
for i in range(len(counts)):
count = 0
while(strings[i][count:count+1] == ' '):
count += 1
newCount = counts[i] * 2
strings[i] = strings[i][(count-newCount):100000]
return '\n'.join(strings)
def parseJSFile(fileName):
# get the content
content = open(fileName).read()
tokens = tokenize(content)
string = stringify(tokens)
if(not string.endswith('\n')):
string += '\n'
open(fileName,'w').write(string)
def parseHTMLFile(fileName):
# get the content
lines = open(fileName).read().replace('\t',' ').replace('\r\n','\n').replace('\r','\n').split('\n')
prejscontent = []
jscontent = []
postjscontent = []
insideJS = 0
for line in lines:
stripped = line.lower().strip()
if(insideJS == 0):
if(stripped.startswith('<')):
stripped = stripped[1:10000].strip()
if(stripped.startswith('script') and stripped.find('src')==-1):
insideJS = 1
prejscontent.append(line)
elif(insideJS == 1):
if(stripped.startswith('<')):
insideJS = 2
postjscontent.append(line)
else:
jscontent.append(line)
else:
postjscontent.append(line)
tokens = tokenize('\n'.join(jscontent))
string = stringify(tokens)
string = '\n'.join(prejscontent) + '\n' + string + '\n' + '\n'.join(postjscontent)
open(fileName,'w').write(string)
def main():
if(not sys.argv or len(sys.argv) == 0):
raise(Exception("No files specified!"))
arguments = []
for arg in sys.argv:
arguments.append(arg)
if(len(arguments) <= 1):
print("Run the tool with all paths to beautify!")
return
files = []
for arg in arguments:
if(arg.find('*') != -1):
matched = glob.glob(arg)
for match in matched:
arguments.append(match)
continue
for ft in fileTypes:
if(arg.lower().endswith(ft)):
if(os.path.exists(arg)):
files.append(arg)
break
else:
raise(Exception("The file '"+arg+' does not exist!'))
# parse each file
for i in range(len(files)):
extension = files[i].lower().rpartition('.')[2]
if(extension == 'js' or extension == 'kl'):
parseJSFile(files[i])
elif(extension == 'html' or extension == 'htm'):
parseHTMLFile(files[i])
else:
raise(Exception("Unsupported file format '"+extension+"'!"))
print(str(i+1)+" of "+str(len(files))+" : beautified '"+files[i]+"' successfully.")
if __name__ == '__main__':
main()
|
agpl-3.0
| 679,771,519,249,903,600 | 1,406,144,598,341,197,600 | 30.691238 | 236 | 0.560588 | false |
krishna-pandey-git/django
|
django/contrib/gis/shortcuts.py
|
388
|
1209
|
import zipfile
from io import BytesIO
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = BytesIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
content_type='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='text/plain')
|
bsd-3-clause
| 5,319,238,194,549,684,000 | -5,049,935,490,697,879,000 | 27.116279 | 79 | 0.694789 | false |
boompieman/iim_project
|
project_python2/lib/python2.7/site-packages/pip/_vendor/retrying.py
|
934
|
9972
|
## Copyright 2013-2014 Ray Holder
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import random
from pip._vendor import six
import sys
import time
import traceback
# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint...
MAX_WAIT = 1073741823
def retry(*dargs, **dkw):
"""
Decorator function that instantiates the Retrying object
@param *dargs: positional arguments passed to Retrying object
@param **dkw: keyword arguments passed to the Retrying object
"""
# support both @retry and @retry() as valid syntax
if len(dargs) == 1 and callable(dargs[0]):
def wrap_simple(f):
@six.wraps(f)
def wrapped_f(*args, **kw):
return Retrying().call(f, *args, **kw)
return wrapped_f
return wrap_simple(dargs[0])
else:
def wrap(f):
@six.wraps(f)
def wrapped_f(*args, **kw):
return Retrying(*dargs, **dkw).call(f, *args, **kw)
return wrapped_f
return wrap
class Retrying(object):
def __init__(self,
stop=None, wait=None,
stop_max_attempt_number=None,
stop_max_delay=None,
wait_fixed=None,
wait_random_min=None, wait_random_max=None,
wait_incrementing_start=None, wait_incrementing_increment=None,
wait_exponential_multiplier=None, wait_exponential_max=None,
retry_on_exception=None,
retry_on_result=None,
wrap_exception=False,
stop_func=None,
wait_func=None,
wait_jitter_max=None):
self._stop_max_attempt_number = 5 if stop_max_attempt_number is None else stop_max_attempt_number
self._stop_max_delay = 100 if stop_max_delay is None else stop_max_delay
self._wait_fixed = 1000 if wait_fixed is None else wait_fixed
self._wait_random_min = 0 if wait_random_min is None else wait_random_min
self._wait_random_max = 1000 if wait_random_max is None else wait_random_max
self._wait_incrementing_start = 0 if wait_incrementing_start is None else wait_incrementing_start
self._wait_incrementing_increment = 100 if wait_incrementing_increment is None else wait_incrementing_increment
self._wait_exponential_multiplier = 1 if wait_exponential_multiplier is None else wait_exponential_multiplier
self._wait_exponential_max = MAX_WAIT if wait_exponential_max is None else wait_exponential_max
self._wait_jitter_max = 0 if wait_jitter_max is None else wait_jitter_max
# TODO add chaining of stop behaviors
# stop behavior
stop_funcs = []
if stop_max_attempt_number is not None:
stop_funcs.append(self.stop_after_attempt)
if stop_max_delay is not None:
stop_funcs.append(self.stop_after_delay)
if stop_func is not None:
self.stop = stop_func
elif stop is None:
self.stop = lambda attempts, delay: any(f(attempts, delay) for f in stop_funcs)
else:
self.stop = getattr(self, stop)
# TODO add chaining of wait behaviors
# wait behavior
wait_funcs = [lambda *args, **kwargs: 0]
if wait_fixed is not None:
wait_funcs.append(self.fixed_sleep)
if wait_random_min is not None or wait_random_max is not None:
wait_funcs.append(self.random_sleep)
if wait_incrementing_start is not None or wait_incrementing_increment is not None:
wait_funcs.append(self.incrementing_sleep)
if wait_exponential_multiplier is not None or wait_exponential_max is not None:
wait_funcs.append(self.exponential_sleep)
if wait_func is not None:
self.wait = wait_func
elif wait is None:
self.wait = lambda attempts, delay: max(f(attempts, delay) for f in wait_funcs)
else:
self.wait = getattr(self, wait)
# retry on exception filter
if retry_on_exception is None:
self._retry_on_exception = self.always_reject
else:
self._retry_on_exception = retry_on_exception
# TODO simplify retrying by Exception types
# retry on result filter
if retry_on_result is None:
self._retry_on_result = self.never_reject
else:
self._retry_on_result = retry_on_result
self._wrap_exception = wrap_exception
def stop_after_attempt(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Stop after the previous attempt >= stop_max_attempt_number."""
return previous_attempt_number >= self._stop_max_attempt_number
def stop_after_delay(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Stop after the time from the first attempt >= stop_max_delay."""
return delay_since_first_attempt_ms >= self._stop_max_delay
def no_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Don't sleep at all before retrying."""
return 0
def fixed_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Sleep a fixed amount of time between each retry."""
return self._wait_fixed
def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""Sleep a random amount of time between wait_random_min and wait_random_max"""
return random.randint(self._wait_random_min, self._wait_random_max)
def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
"""
Sleep an incremental amount of time after each attempt, starting at
wait_incrementing_start and incrementing by wait_incrementing_increment
"""
result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1))
if result < 0:
result = 0
return result
def exponential_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
exp = 2 ** previous_attempt_number
result = self._wait_exponential_multiplier * exp
if result > self._wait_exponential_max:
result = self._wait_exponential_max
if result < 0:
result = 0
return result
def never_reject(self, result):
return False
def always_reject(self, result):
return True
def should_reject(self, attempt):
reject = False
if attempt.has_exception:
reject |= self._retry_on_exception(attempt.value[1])
else:
reject |= self._retry_on_result(attempt.value)
return reject
def call(self, fn, *args, **kwargs):
start_time = int(round(time.time() * 1000))
attempt_number = 1
while True:
try:
attempt = Attempt(fn(*args, **kwargs), attempt_number, False)
except:
tb = sys.exc_info()
attempt = Attempt(tb, attempt_number, True)
if not self.should_reject(attempt):
return attempt.get(self._wrap_exception)
delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time
if self.stop(attempt_number, delay_since_first_attempt_ms):
if not self._wrap_exception and attempt.has_exception:
# get() on an attempt with an exception should cause it to be raised, but raise just in case
raise attempt.get()
else:
raise RetryError(attempt)
else:
sleep = self.wait(attempt_number, delay_since_first_attempt_ms)
if self._wait_jitter_max:
jitter = random.random() * self._wait_jitter_max
sleep = sleep + max(0, jitter)
time.sleep(sleep / 1000.0)
attempt_number += 1
class Attempt(object):
"""
An Attempt encapsulates a call to a target function that may end as a
normal return value from the function or an Exception depending on what
occurred during the execution.
"""
def __init__(self, value, attempt_number, has_exception):
self.value = value
self.attempt_number = attempt_number
self.has_exception = has_exception
def get(self, wrap_exception=False):
"""
Return the return value of this Attempt instance or raise an Exception.
If wrap_exception is true, this Attempt is wrapped inside of a
RetryError before being raised.
"""
if self.has_exception:
if wrap_exception:
raise RetryError(self)
else:
six.reraise(self.value[0], self.value[1], self.value[2])
else:
return self.value
def __repr__(self):
if self.has_exception:
return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2])))
else:
return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value)
class RetryError(Exception):
"""
A RetryError encapsulates the last Attempt instance right before giving up.
"""
def __init__(self, last_attempt):
self.last_attempt = last_attempt
def __str__(self):
return "RetryError[{0}]".format(self.last_attempt)
|
gpl-3.0
| 2,918,564,008,114,284,500 | -7,941,044,414,888,572,000 | 36.348315 | 120 | 0.617329 | false |
aasiutin/electrum
|
gui/qt/qrtextedit.py
|
12
|
1913
|
from electrum.i18n import _
from electrum.plugins import run_hook
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from util import ButtonsTextEdit, MessageBoxMixin
class ShowQRTextEdit(ButtonsTextEdit):
def __init__(self, text=None):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(1)
self.addButton(":icons/qrcode.png", self.qr_show, _("Show as QR code"))
run_hook('show_text_edit', self)
def qr_show(self):
from qrcodewidget import QRDialog
try:
s = str(self.toPlainText())
except:
s = unicode(self.toPlainText())
QRDialog(s).exec_()
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Show as QR code"), self.qr_show)
m.exec_(e.globalPos())
class ScanQRTextEdit(ButtonsTextEdit, MessageBoxMixin):
def __init__(self, text=""):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(0)
self.addButton(":icons/file.png", self.file_input, _("Read file"))
self.addButton(":icons/qrcode.png", self.qr_input, _("Read QR code"))
run_hook('scan_text_edit', self)
def file_input(self):
fileName = unicode(QFileDialog.getOpenFileName(self, 'select file'))
if not fileName:
return
with open(fileName, "r") as f:
data = f.read()
self.setText(data)
def qr_input(self):
from electrum import qrscanner, get_config
try:
data = qrscanner.scan_qr(get_config())
except BaseException as e:
self.show_error(str(e))
return ""
if type(data) != str:
return
self.setText(data)
return data
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Read QR code"), self.qr_input)
m.exec_(e.globalPos())
|
mit
| 7,725,288,937,173,203,000 | -6,603,789,530,263,143,000 | 28.890625 | 79 | 0.597491 | false |
ahupowerdns/pdns
|
regression-tests.api/test_Servers.py
|
13
|
1661
|
from test_helper import ApiTestCase, is_auth, is_recursor
class Servers(ApiTestCase):
def test_list_servers(self):
r = self.session.get(self.url("/api/v1/servers"))
self.assert_success_json(r)
lst = r.json()
self.assertEquals(len(lst), 1) # only localhost allowed in there
data = lst[0]
for k in ('id', 'daemon_type', 'url'):
self.assertIn(k, data)
self.assertEquals(data['id'], 'localhost')
def test_servers_localhost(self):
r = self.session.get(self.url("/api/v1/servers/localhost"))
self.assert_success_json(r)
data = r.json()
for k in ('id', 'type', 'version', 'daemon_type', 'url', 'zones_url', 'config_url'):
self.assertIn(k, data)
self.assertEquals(data['id'], 'localhost')
self.assertEquals(data['type'], 'Server')
# or 'recursor' for recursors
if is_auth():
daemon_type = 'authoritative'
elif is_recursor():
daemon_type = 'recursor'
else:
raise RuntimeError('Unknown daemon type')
self.assertEquals(data['daemon_type'], daemon_type)
def test_read_config(self):
r = self.session.get(self.url("/api/v1/servers/localhost/config"))
self.assert_success_json(r)
data = dict([(r['name'], r['value']) for r in r.json()])
self.assertIn('daemon', data)
def test_read_statistics(self):
r = self.session.get(self.url("/api/v1/servers/localhost/statistics"))
self.assert_success_json(r)
data = dict([(r['name'], r['value']) for r in r.json()])
self.assertIn('uptime', data)
|
gpl-2.0
| -6,937,035,477,299,148,000 | 3,492,863,495,419,209,700 | 37.627907 | 92 | 0.583986 | false |
Lujeni/ansible
|
lib/ansible/modules/cloud/vmware/vmware_guest.py
|
1
|
150199
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_guest
short_description: Manages virtual machines in vCenter
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
modify various virtual machine components like network, disk, customization etc.,
rename a virtual machine and remove a virtual machine with associated components.
version_added: '2.2'
author:
- Loic Blot (@nerzhul) <[email protected]>
- Philippe Dellaert (@pdellaert) <[email protected]>
- Abhijeet Kasurde (@Akasurde) <[email protected]>
requirements:
- python >= 2.6
- PyVmomi
notes:
- Please make sure that the user used for vmware_guest has the correct level of privileges.
- For example, following is the list of minimum privileges required by users to create virtual machines.
- " DataStore > Allocate Space"
- " Virtual Machine > Configuration > Add New Disk"
- " Virtual Machine > Configuration > Add or Remove Device"
- " Virtual Machine > Inventory > Create New"
- " Network > Assign Network"
- " Resource > Assign Virtual Machine to Resource Pool"
- "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
- Tested on vSphere 5.5, 6.0, 6.5 and 6.7
- Use SCSI disks instead of IDE when you want to expand online disks by specifying a SCSI controller
- Uses SysPrep for Windows VM (depends on 'guest_id' parameter match 'win') with PyVmomi
- "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
options:
state:
description:
- Specify the state the virtual machine should be in.
- 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
configurations conforms to task arguments.'
- 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
is removed with its associated components.'
- 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists, then virtual machine is deployed with given parameters.'
- 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
then the specified virtual machine is powered on.'
- 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
then the specified virtual machine is powered off.'
- 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
- 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
- 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
- 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
default: present
choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
- 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
identify uniqueness of the virtual machine.'
- This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists.
- This parameter is case sensitive.
required: yes
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: [ first, last ]
uuid:
description:
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
- This is required if C(name) is not supplied.
- If virtual machine does not exists, then this parameter is ignored.
- Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
template:
description:
- Template or existing virtual machine used to create new virtual machine.
- If this value is not set, virtual machine is created without using a template.
- If the virtual machine already exists, this parameter will be ignored.
- This parameter is case sensitive.
- You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value.
- From version 2.8 onwards, absolute path to virtual machine or template can be used.
aliases: [ 'template_src' ]
is_template:
description:
- Flag the instance as a template.
- This will mark the given virtual machine as template.
default: 'no'
type: bool
version_added: '2.3'
folder:
description:
- Destination folder, absolute path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- This parameter is required, while deploying new virtual machine. version_added 2.5.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
hardware:
description:
- Manage virtual machine's hardware attributes.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
- ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
version_added: 2.5'
- ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
- ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket.'
- " C(num_cpus) must be a multiple of C(num_cpu_cores_per_socket).
For example to create a VM with 2 sockets of 4 cores, specify C(num_cpus): 8 and C(num_cpu_cores_per_socket): 4"
- ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
- " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
will always be equal to the virtual machine's memory size. version_added: 2.5"
- ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
version_added: 2.5.'
- ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
version_added: 2.5'
- ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
machine. Unit is MB. C(memory_reservation) is alias to this. version_added: 2.5'
- ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
version_added: 2.5'
- ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
Unit is MHz. version_added: 2.5'
- ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
If value specified as C(latest), version is set to the most current virtual hardware supported on the host.
C(latest) is added in version 2.10.
Please check VMware documentation for correct virtual machine hardware version.
Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
version then no action is taken. version_added: 2.6'
- ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine.
Allowed values are "bios" and "efi". version_added: 2.7'
- ' - C(virt_based_security) (bool): Enable Virtualization Based Security feature for Windows 10.
(Support from Virtual machine hardware version 14, Guest OS Windows 10 64 bit, Windows Server 2016)'
guest_id:
description:
- Set the guest ID.
- This parameter is case sensitive.
- 'Examples:'
- " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
- " virtual machine with CentOS 64 bit, will be 'centos64Guest'"
- " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
- This field is required when creating a virtual machine, not required when creating from the template.
- >
Valid values are referenced here:
U(https://code.vmware.com/apis/358/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
version_added: '2.3'
disk:
description:
- A list of disks to add.
- This parameter is case sensitive.
- Shrinking disks is not supported.
- Removing existing disks of the virtual machine is not supported.
- 'Valid attributes are:'
- ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
- ' - C(type) (string): Valid values are:'
- ' - C(thin) thin disk'
- ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
- ' Default: C(None) thick disk, no eagerzero.'
- ' - C(datastore) (string): The name of datastore which will be used for the disk. If C(autoselect_datastore) is set to True,
then will select the less used datastore whose name contains this "disk.datastore" string.'
- ' - C(filename) (string): Existing disk image to be used. Filename must already exist on the datastore.'
- ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.8.'
- ' - C(autoselect_datastore) (bool): select the less used datastore. "disk.datastore" and "disk.autoselect_datastore"
will not be used if C(datastore) is specified outside this C(disk) configuration.'
- ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6'
- ' - Available options are :'
- ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.'
- ' - C(independent_persistent): Same as persistent, but not affected by snapshots.'
- ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
cdrom:
description:
- A CD-ROM configuration for the virtual machine.
- Or a list of CD-ROMs configuration for the virtual machine. Added in version 2.9.
- 'Parameters C(controller_type), C(controller_number), C(unit_number), C(state) are added for a list of CD-ROMs
configuration support.'
- 'Valid attributes are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM
will be disconnected but present.'
- ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso).
Required if type is set C(iso).'
- ' - C(controller_type) (string): Default value is C(ide). Only C(ide) controller type for CD-ROM is supported for
now, will add SATA controller type in the future.'
- ' - C(controller_number) (int): For C(ide) controller, valid value is 0 or 1.'
- ' - C(unit_number) (int): For CD-ROM device attach to C(ide) controller, valid value is 0 or 1.
C(controller_number) and C(unit_number) are mandatory attributes.'
- ' - C(state) (string): Valid value is C(present) or C(absent). Default is C(present). If set to C(absent), then
the specified CD-ROM will be removed. For C(ide) controller, hot-add or hot-remove CD-ROM is not supported.'
version_added: '2.5'
resource_pool:
description:
- Use the given resource pool for virtual machine operation.
- This parameter is case sensitive.
- Resource pool should be child of the selected host parent.
version_added: '2.3'
wait_for_ip_address:
description:
- Wait until vCenter detects an IP address for the virtual machine.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
- "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
default: 'no'
type: bool
wait_for_ip_address_timeout:
description:
- Define a timeout (in seconds) for the wait_for_ip_address parameter.
default: '300'
type: int
version_added: '2.10'
wait_for_customization_timeout:
description:
- Define a timeout (in seconds) for the wait_for_customization parameter.
- Be careful when setting this value since the time guest customization took may differ among guest OSes.
default: '3600'
type: int
version_added: '2.10'
wait_for_customization:
description:
- Wait until vCenter detects all guest customizations as successfully completed.
- When enabled, the VM will automatically be powered on.
- "If vCenter does not detect guest customization start or succeed, failed events after time
C(wait_for_customization_timeout) parameter specified, warning message will be printed and task result is fail."
default: 'no'
type: bool
version_added: '2.8'
state_change_timeout:
description:
- If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
- If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
- The value sets a timeout in seconds for the module to wait for the state change.
default: 0
version_added: '2.6'
snapshot_src:
description:
- Name of the existing snapshot to use to create a clone of a virtual machine.
- This parameter is case sensitive.
- While creating linked clone using C(linked_clone) parameter, this parameter is required.
version_added: '2.4'
linked_clone:
description:
- Whether to create a linked clone from the snapshot specified.
- If specified, then C(snapshot_src) is required parameter.
default: 'no'
type: bool
version_added: '2.4'
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful while removing virtual machine which is powered on state.
- 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
default: 'no'
type: bool
delete_from_inventory:
description:
- Whether to delete Virtual machine from inventory or delete from disk.
default: False
type: bool
version_added: '2.10'
datacenter:
description:
- Destination datacenter for the deploy operation.
- This parameter is case sensitive.
default: ha-datacenter
cluster:
description:
- The cluster name where the virtual machine will run.
- This is a required parameter, if C(esxi_hostname) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
version_added: '2.3'
esxi_hostname:
description:
- The ESXi hostname where the virtual machine will run.
- This is a required parameter, if C(cluster) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
annotation:
description:
- A note or annotation to include in the virtual machine.
version_added: '2.3'
customvalues:
description:
- Define a list of custom values to set on virtual machine.
- A custom value object takes two fields C(key) and C(value).
- Incorrect key and values will be ignored.
version_added: '2.3'
networks:
description:
- A list of networks (in the order of the NICs).
- Removing NICs is not allowed, while reconfiguring the virtual machine.
- All parameters and VMware object names are case sensitive.
- 'One of the below parameters is required per entry:'
- ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
- ' - C(vlan) (integer): VLAN number for this interface.'
- 'Optional parameters per entry (used for virtual hardware):'
- ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
- ' - C(mac) (string): Customize MAC address.'
- ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
This value is required if multiple distributed portgroups exists with the same name. version_added 2.7'
- ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
- ' - C(ip) (string): Static IP address (implies C(type: static)).'
- ' - C(netmask) (string): Static netmask required for C(ip).'
- ' - C(gateway) (string): Static gateway.'
- ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
- ' - C(domain) (string): Domain name for this network interface (Windows).'
- ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
- ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
version_added: '2.3'
customization:
description:
- Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly.
- Not all operating systems are supported for customization with respective vCenter version,
please check VMware documentation for respective OS customization.
- For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
- All parameters and VMware object names are case sensitive.
- Linux based OSes requires Perl package to be installed for OS customizations.
- 'Common parameters (Linux/Windows):'
- ' - C(existing_vm) (bool): If set to C(True), do OS customization on the specified virtual machine directly.
If set to C(False) or not specified, do OS customization when cloning from the template or the virtual machine. version_added: 2.8'
- ' - C(dns_servers) (list): List of DNS servers to configure.'
- ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
- ' - C(domain) (string): DNS domain name to use.'
- ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
and minus, rest of the characters are dropped as per RFC 952.'
- 'Parameters related to Linux customization:'
- ' - C(timezone) (string): Timezone (See List of supported time zones for different vSphere versions in Linux/Unix
systems (2145518) U(https://kb.vmware.com/s/article/2145518)). version_added: 2.9'
- ' - C(hwclockUTC) (bool): Specifies whether the hardware clock is in UTC or local time.
True when the hardware clock is in UTC, False when the hardware clock is in local time. version_added: 2.9'
- 'Parameters related to Windows customization:'
- ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
- ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
- ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(fullname) (string): Server owner name (default: Administrator).'
- ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
- ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
- ' - C(orgname) (string): Organisation name (default: ACME).'
- ' - C(password) (string): Local administrator password.'
- ' - C(productid) (string): Product ID.'
- ' - C(runonce) (list): List of commands to run at first user logon.'
- ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
version_added: '2.3'
vapp_properties:
description:
- A list of vApp properties
- 'For full list of attributes and types refer to: U(https://github.com/vmware/pyvmomi/blob/master/docs/vim/vApp/PropertyInfo.rst)'
- 'Basic attributes are:'
- ' - C(id) (string): Property id - required.'
- ' - C(value) (string): Property value.'
- ' - C(type) (string): Value type, string type by default.'
- ' - C(operation): C(remove): This attribute is required only when removing properties.'
version_added: '2.6'
customization_spec:
description:
- Unique name identifying the requested customization specification.
- This parameter is case sensitive.
- If set, then overrides C(customization) parameter values.
version_added: '2.6'
datastore:
description:
- Specify datastore or datastore cluster to provision virtual machine.
- 'This parameter takes precedence over "disk.datastore" parameter.'
- 'This parameter can be used to override datastore or datastore cluster setting of the virtual machine when deployed
from the template.'
- Please see example for more usage.
version_added: '2.7'
convert:
description:
- Specify convert disk type while cloning template or virtual machine.
choices: [ thin, thick, eagerzeroedthick ]
version_added: '2.8'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create a virtual machine on given ESXi hostname
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /DC1/vm/
name: test_vm_0001
state: poweredon
guest_id: centos64Guest
# This is hostname of particular ESXi server on which user wants VM to be deployed
esxi_hostname: "{{ esxi_hostname }}"
disk:
- size_gb: 10
type: thin
datastore: datastore1
hardware:
memory_mb: 512
num_cpus: 4
scsi: paravirtual
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
ip: 10.10.10.100
netmask: 255.255.255.0
device_type: vmxnet3
wait_for_ip_address: yes
wait_for_ip_address_timeout: 600
delegate_to: localhost
register: deploy_vm
- name: Create a virtual machine from a template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /testvms
name: testvm_2
state: poweredon
template: template_el7
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
# Add another disk from an existing VMDK
- filename: "[datastore1] testvms/testvm_2_1/testvm_2_1.vmdk"
hardware:
memory_mb: 512
num_cpus: 6
num_cpu_cores_per_socket: 3
scsi: paravirtual
memory_reservation_lock: True
mem_limit: 8096
mem_reservation: 4096
cpu_limit: 8096
cpu_reservation: 4096
max_connections: 5
hotadd_cpu: True
hotremove_cpu: True
hotadd_memory: False
version: 12 # Hardware version of virtual machine
boot_firmware: "efi"
cdrom:
type: iso
iso_path: "[datastore1] livecd.iso"
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Clone a virtual machine from Windows template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: datacenter1
cluster: cluster
name: testvm-2
template: template_windows
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
gateway: 192.168.1.1
mac: aa:bb:dd:aa:00:14
domain: my_domain
dns_servers:
- 192.168.1.1
- 192.168.1.2
- vlan: 1234
type: dhcp
customization:
autologon: yes
dns_servers:
- 192.168.1.1
- 192.168.1.2
domain: my_domain
password: new_vm_password
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Clone a virtual machine from Linux template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter }}"
state: present
folder: /DC1/vm
template: "{{ template }}"
name: "{{ vm_name }}"
cluster: DC1_C1
networks:
- name: VM Network
ip: 192.168.10.11
netmask: 255.255.255.0
wait_for_ip_address: True
customization:
domain: "{{ guest_domain }}"
dns_servers:
- 8.9.9.9
- 7.8.8.9
dns_suffix:
- example.com
- example2.com
delegate_to: localhost
- name: Rename a virtual machine (requires the virtual machine's uuid)
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
name: new_name
state: present
delegate_to: localhost
- name: Remove a virtual machine by uuid
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: absent
delegate_to: localhost
- name: Remove a virtual machine from inventory
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: vm_name
delete_from_inventory: True
state: absent
delegate_to: localhost
- name: Manipulate vApp properties
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: vm_name
state: present
vapp_properties:
- id: remoteIP
category: Backup
label: Backup server IP
type: str
value: 10.10.10.1
- id: old_property
operation: remove
delegate_to: localhost
- name: Set powerstate of a virtual machine to poweroff by using UUID
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: poweredoff
delegate_to: localhost
- name: Deploy a virtual machine in a datastore different from the datastore of the template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: "{{ vm_name }}"
state: present
template: "{{ template_name }}"
# Here datastore can be different which holds template
datastore: "{{ virtual_machine_datastore }}"
hardware:
memory_mb: 512
num_cpus: 2
scsi: paravirtual
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import re
import time
import string
HAS_PYVMOMI = False
try:
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
pass
from random import randint
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.network import is_mac
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
compile_folder_path_for_object, serialize_spec,
vmware_argument_spec, set_vm_power_state, PyVmomi,
find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip,
wait_for_task, TaskError, quote_obj_name)
def list_or_dict(value):
if isinstance(value, list) or isinstance(value, dict):
return value
else:
raise ValueError("'%s' is not valid, valid type is 'list' or 'dict'." % value)
class PyVmomiDeviceHelper(object):
""" This class is a helper to create easily VMware Objects for PyVmomiHelper """
def __init__(self, module):
self.module = module
self.next_disk_unit_number = 0
self.scsi_device_type = {
'lsilogic': vim.vm.device.VirtualLsiLogicController,
'paravirtual': vim.vm.device.ParaVirtualSCSIController,
'buslogic': vim.vm.device.VirtualBusLogicController,
'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController,
}
def create_scsi_controller(self, scsi_type):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
scsi_ctl.device = scsi_device()
scsi_ctl.device.busNumber = 0
# While creating a new SCSI controller, temporary key value
# should be unique negative integers
scsi_ctl.device.key = -randint(1000, 9999)
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
def is_scsi_controller(self, device):
return isinstance(device, tuple(self.scsi_device_type.values()))
@staticmethod
def create_ide_controller(bus_number=0):
ide_ctl = vim.vm.device.VirtualDeviceSpec()
ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_ctl.device = vim.vm.device.VirtualIDEController()
ide_ctl.device.deviceInfo = vim.Description()
# While creating a new IDE controller, temporary key value
# should be unique negative integers
ide_ctl.device.key = -randint(200, 299)
ide_ctl.device.busNumber = bus_number
return ide_ctl
@staticmethod
def create_cdrom(ide_device, cdrom_type, iso_path=None, unit_number=0):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
cdrom_spec.device = vim.vm.device.VirtualCdrom()
cdrom_spec.device.controllerKey = ide_device.key
cdrom_spec.device.key = -randint(3000, 3999)
cdrom_spec.device.unitNumber = unit_number
cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_spec.device.connectable.allowGuestControl = True
cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
if cdrom_type in ["none", "client"]:
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_type == "iso":
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
return cdrom_spec
@staticmethod
def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
if cdrom_type == "none":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
not cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
elif cdrom_type == "client":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
elif cdrom_type == "iso":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
cdrom_device.backing.fileName == iso_path and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
@staticmethod
def update_cdrom_config(vm_obj, cdrom_spec, cdrom_device, iso_path=None):
# Updating an existing CD-ROM
if cdrom_spec["type"] in ["client", "none"]:
cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_spec["type"] == "iso" and iso_path is not None:
cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_device.connectable.allowGuestControl = True
cdrom_device.connectable.startConnected = (cdrom_spec["type"] != "none")
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_device.connectable.connected = (cdrom_spec["type"] != "none")
def remove_cdrom(self, cdrom_device):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
cdrom_spec.device = cdrom_device
return cdrom_spec
def create_scsi_disk(self, scsi_ctl, disk_index=None):
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.device = vim.vm.device.VirtualDisk()
diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskspec.device.controllerKey = scsi_ctl.device.key
if self.next_disk_unit_number == 7:
raise AssertionError()
if disk_index == 7:
raise AssertionError()
"""
Configure disk unit number.
"""
if disk_index is not None:
diskspec.device.unitNumber = disk_index
self.next_disk_unit_number = disk_index + 1
else:
diskspec.device.unitNumber = self.next_disk_unit_number
self.next_disk_unit_number += 1
# unit number 7 is reserved to SCSI controller, increase next index
if self.next_disk_unit_number == 7:
self.next_disk_unit_number += 1
return diskspec
def get_device(self, device_type, name):
nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
vmxnet2=vim.vm.device.VirtualVmxnet2(),
vmxnet3=vim.vm.device.VirtualVmxnet3(),
e1000=vim.vm.device.VirtualE1000(),
e1000e=vim.vm.device.VirtualE1000e(),
sriov=vim.vm.device.VirtualSriovEthernetCard(),
)
if device_type in nic_dict:
return nic_dict[device_type]
else:
self.module.fail_json(msg='Invalid device_type "%s"'
' for network "%s"' % (device_type, name))
def create_nic(self, device_type, device_label, device_infos):
nic = vim.vm.device.VirtualDeviceSpec()
nic.device = self.get_device(device_type, device_infos['name'])
nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = device_label
nic.device.deviceInfo.summary = device_infos['name']
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
nic.device.connectable.connected = True
if 'mac' in device_infos and is_mac(device_infos['mac']):
nic.device.addressType = 'manual'
nic.device.macAddress = device_infos['mac']
else:
nic.device.addressType = 'generated'
return nic
def integer_value(self, input_value, name):
"""
Function to return int value for given input, else return error
Args:
input_value: Input value to retrieve int value from
name: Name of the Input value (used to build error message)
Returns: (int) if integer value can be obtained, otherwise will send a error message.
"""
if isinstance(input_value, int):
return input_value
elif isinstance(input_value, str) and input_value.isdigit():
return int(input_value)
else:
self.module.fail_json(msg='"%s" attribute should be an'
' integer value.' % name)
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
self.networks = {}
self.clusters = {}
self.esx_hosts = {}
self.parent_datacenters = {}
def find_obj(self, content, types, name, confine_to_datacenter=True):
""" Wrapper around find_obj to set datacenter context """
result = find_obj(content, types, name)
if result and confine_to_datacenter:
if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
result = None
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
for obj in objects:
if name is None or to_text(obj.name) == to_text(name):
return obj
return result
def get_all_objs(self, content, types, confine_to_datacenter=True):
""" Wrapper around get_all_objs to set datacenter context """
objects = get_all_objs(content, types)
if confine_to_datacenter:
if hasattr(objects, 'items'):
# resource pools come back as a dictionary
# make a copy
for k, v in tuple(objects.items()):
parent_dc = self.get_parent_datacenter(k)
if parent_dc.name != self.dc_name:
del objects[k]
else:
# everything else should be a list
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
return objects
def get_network(self, network):
network = quote_obj_name(network)
if network not in self.networks:
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
return self.networks[network]
def get_cluster(self, cluster):
if cluster not in self.clusters:
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
return self.clusters[cluster]
def get_esx_host(self, host):
if host not in self.esx_hosts:
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
return self.esx_hosts[host]
def get_parent_datacenter(self, obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
if obj in self.parent_datacenters:
return self.parent_datacenters[obj]
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
self.parent_datacenters[obj] = datacenter
return datacenter
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.device_helper = PyVmomiDeviceHelper(self.module)
self.configspec = None
self.relospec = None
self.change_detected = False # a change was detected and needs to be applied through reconfiguration
self.change_applied = False # a change was applied meaning at least one task succeeded
self.customspec = None
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def remove_vm(self, vm, delete_from_inventory=False):
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
if vm.summary.runtime.powerState.lower() == 'poweredon':
self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
"please use 'force' parameter to remove or poweroff VM "
"and try removing VM again." % vm.name)
# Delete VM from Inventory
if delete_from_inventory:
try:
vm.UnregisterVM()
except (vim.fault.TaskInProgress,
vmodl.RuntimeFault) as e:
return {'changed': self.change_applied, 'failed': True, 'msg': e.msg, 'op': 'UnregisterVM'}
self.change_applied = True
return {'changed': self.change_applied, 'failed': False}
# Delete VM from Disk
task = vm.Destroy()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'}
else:
return {'changed': self.change_applied, 'failed': False}
def configure_guestid(self, vm_obj, vm_creation=False):
# guest_id is not required when using templates
if self.params['template']:
return
# guest_id is only mandatory on VM creation
if vm_creation and self.params['guest_id'] is None:
self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
if self.params['guest_id'] and \
(vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
self.change_detected = True
self.configspec.guestId = self.params['guest_id']
def configure_resource_alloc_info(self, vm_obj):
"""
Function to configure resource allocation information about virtual machine
:param vm_obj: VM object in case of reconfigure, None in case of deploy
:return: None
"""
rai_change_detected = False
memory_allocation = vim.ResourceAllocationInfo()
cpu_allocation = vim.ResourceAllocationInfo()
if 'hardware' in self.params:
if 'mem_limit' in self.params['hardware']:
mem_limit = None
try:
mem_limit = int(self.params['hardware'].get('mem_limit'))
except ValueError:
self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
memory_allocation.limit = mem_limit
if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
rai_change_detected = True
if 'mem_reservation' in self.params['hardware'] or 'memory_reservation' in self.params['hardware']:
mem_reservation = self.params['hardware'].get('mem_reservation')
if mem_reservation is None:
mem_reservation = self.params['hardware'].get('memory_reservation')
try:
mem_reservation = int(mem_reservation)
except ValueError:
self.module.fail_json(msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value.")
memory_allocation.reservation = mem_reservation
if vm_obj is None or \
memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
rai_change_detected = True
if 'cpu_limit' in self.params['hardware']:
cpu_limit = None
try:
cpu_limit = int(self.params['hardware'].get('cpu_limit'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
cpu_allocation.limit = cpu_limit
if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
rai_change_detected = True
if 'cpu_reservation' in self.params['hardware']:
cpu_reservation = None
try:
cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
cpu_allocation.reservation = cpu_reservation
if vm_obj is None or \
cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
rai_change_detected = True
if rai_change_detected:
self.configspec.memoryAllocation = memory_allocation
self.configspec.cpuAllocation = cpu_allocation
self.change_detected = True
def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
# set cpu/memory/etc
if 'hardware' in self.params:
if 'num_cpus' in self.params['hardware']:
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
# check VM power state and cpu hot-add/hot-remove state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, "
"cpuHotRemove is not enabled")
if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, "
"cpuHotAdd is not enabled")
if 'num_cpu_cores_per_socket' in self.params['hardware']:
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
"should be an integer value.")
if num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
"of hardware.num_cpu_cores_per_socket")
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
self.change_detected = True
self.configspec.numCPUs = num_cpus
if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
self.change_detected = True
# num_cpu is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
if 'memory_mb' in self.params['hardware']:
try:
memory_mb = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
" Please refer the documentation and provide"
" correct value.")
# check VM power state and memory hotadd state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="Configured memory is less than memory size of the VM, "
"operation is not supported")
elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="memoryHotAdd is not enabled")
self.configspec.memoryMB = memory_mb
if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
self.change_detected = True
# memory_mb is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
if 'hotadd_memory' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']):
self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on")
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
self.change_detected = True
if 'hotadd_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']):
self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on")
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
self.change_detected = True
if 'hotremove_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']):
self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on")
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
self.change_detected = True
if 'memory_reservation_lock' in self.params['hardware']:
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
self.change_detected = True
if 'boot_firmware' in self.params['hardware']:
# boot firmware re-config can cause boot issue
if vm_obj is not None:
return
boot_firmware = self.params['hardware']['boot_firmware'].lower()
if boot_firmware not in ('bios', 'efi'):
self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]."
" Need one of ['bios', 'efi']." % boot_firmware)
self.configspec.firmware = boot_firmware
self.change_detected = True
def sanitize_cdrom_params(self):
# cdroms {'ide': [{num: 0, cdrom: []}, {}], 'sata': [{num: 0, cdrom: []}, {}, ...]}
cdroms = {'ide': [], 'sata': []}
expected_cdrom_spec = self.params.get('cdrom')
if expected_cdrom_spec:
for cdrom_spec in expected_cdrom_spec:
cdrom_spec['controller_type'] = cdrom_spec.get('controller_type', 'ide').lower()
if cdrom_spec['controller_type'] not in ['ide', 'sata']:
self.module.fail_json(msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'."
% cdrom_spec['controller_type'])
cdrom_spec['state'] = cdrom_spec.get('state', 'present').lower()
if cdrom_spec['state'] not in ['present', 'absent']:
self.module.fail_json(msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'."
% cdrom_spec['state'])
if cdrom_spec['state'] == 'present':
if 'type' in cdrom_spec and cdrom_spec.get('type') not in ['none', 'client', 'iso']:
self.module.fail_json(msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'."
% cdrom_spec.get('type'))
if cdrom_spec.get('type') == 'iso' and not cdrom_spec.get('iso_path'):
self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
if cdrom_spec['controller_type'] == 'ide' and \
(cdrom_spec.get('controller_number') not in [0, 1] or cdrom_spec.get('unit_number') not in [0, 1]):
self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid"
" values are 0 or 1 for IDE controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
if cdrom_spec['controller_type'] == 'sata' and \
(cdrom_spec.get('controller_number') not in range(0, 4) or cdrom_spec.get('unit_number') not in range(0, 30)):
self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s,"
" valid controller_number value is 0-3, valid unit_number is 0-29"
" for SATA controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
ctl_exist = False
for exist_spec in cdroms.get(cdrom_spec['controller_type']):
if exist_spec['num'] == cdrom_spec['controller_number']:
ctl_exist = True
exist_spec['cdrom'].append(cdrom_spec)
break
if not ctl_exist:
cdroms.get(cdrom_spec['controller_type']).append({'num': cdrom_spec['controller_number'], 'cdrom': [cdrom_spec]})
return cdroms
def configure_cdrom(self, vm_obj):
# Configure the VM CD-ROM
if self.params.get('cdrom'):
if vm_obj and vm_obj.config.template:
# Changing CD-ROM settings on a template is not supported
return
if isinstance(self.params.get('cdrom'), dict):
self.configure_cdrom_dict(vm_obj)
elif isinstance(self.params.get('cdrom'), list):
self.configure_cdrom_list(vm_obj)
def configure_cdrom_dict(self, vm_obj):
if self.params["cdrom"].get('type') not in ['none', 'client', 'iso']:
self.module.fail_json(msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'.")
if self.params["cdrom"]['type'] == 'iso' and not self.params["cdrom"].get('iso_path'):
self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
cdrom_spec = None
cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
iso_path = self.params["cdrom"].get("iso_path")
if len(cdrom_devices) == 0:
# Creating new CD-ROM
ide_devices = self.get_vm_ide_devices(vm=vm_obj)
if len(ide_devices) == 0:
# Creating new IDE device
ide_ctl = self.device_helper.create_ide_controller()
ide_device = ide_ctl.device
self.change_detected = True
self.configspec.deviceChange.append(ide_ctl)
else:
ide_device = ide_devices[0]
if len(ide_device.device) > 3:
self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4"
" IDE devices of which none are a cdrom")
cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=self.params["cdrom"]["type"],
iso_path=iso_path)
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_devices[0],
cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
self.device_helper.update_cdrom_config(vm_obj, self.params["cdrom"], cdrom_devices[0], iso_path=iso_path)
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_devices[0]
if cdrom_spec:
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
def configure_cdrom_list(self, vm_obj):
configured_cdroms = self.sanitize_cdrom_params()
cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
# configure IDE CD-ROMs
if configured_cdroms['ide']:
ide_devices = self.get_vm_ide_devices(vm=vm_obj)
for expected_cdrom_spec in configured_cdroms['ide']:
ide_device = None
for device in ide_devices:
if device.busNumber == expected_cdrom_spec['num']:
ide_device = device
break
# if not find the matched ide controller or no existing ide controller
if not ide_device:
ide_ctl = self.device_helper.create_ide_controller(bus_number=expected_cdrom_spec['num'])
ide_device = ide_ctl.device
self.change_detected = True
self.configspec.deviceChange.append(ide_ctl)
for cdrom in expected_cdrom_spec['cdrom']:
cdrom_device = None
iso_path = cdrom.get('iso_path')
unit_number = cdrom.get('unit_number')
for target_cdrom in cdrom_devices:
if target_cdrom.controllerKey == ide_device.key and target_cdrom.unitNumber == unit_number:
cdrom_device = target_cdrom
break
# create new CD-ROM
if not cdrom_device and cdrom.get('state') != 'absent':
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-add.')
if len(ide_device.device) == 2:
self.module.fail_json(msg='Maximum number of CD-ROMs attached to IDE controller is 2.')
cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=cdrom['type'],
iso_path=iso_path, unit_number=unit_number)
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# re-configure CD-ROM
elif cdrom_device and cdrom.get('state') != 'absent' and \
not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device,
cdrom_type=cdrom['type'], iso_path=iso_path):
self.device_helper.update_cdrom_config(vm_obj, cdrom, cdrom_device, iso_path=iso_path)
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_device
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# delete CD-ROM
elif cdrom_device and cdrom.get('state') == 'absent':
if vm_obj and vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-remove.')
cdrom_spec = self.device_helper.remove_cdrom(cdrom_device)
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
# configure SATA CD-ROMs is not supported yet
if configured_cdroms['sata']:
pass
def configure_hardware_params(self, vm_obj):
"""
Function to configure hardware related configuration of virtual machine
Args:
vm_obj: virtual machine object
"""
if 'hardware' in self.params:
if 'max_connections' in self.params['hardware']:
# maxMksConnections == max_connections
self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.maxMksConnections:
self.change_detected = True
if 'nested_virt' in self.params['hardware']:
self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
self.change_detected = True
if 'version' in self.params['hardware']:
hw_version_check_failed = False
temp_version = self.params['hardware'].get('version', 10)
if isinstance(temp_version, str) and temp_version.lower() == 'latest':
# Check is to make sure vm_obj is not of type template
if vm_obj and not vm_obj.config.template:
try:
task = vm_obj.UpgradeVM_Task()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
else:
try:
temp_version = int(temp_version)
except ValueError:
hw_version_check_failed = True
if temp_version not in range(3, 16):
hw_version_check_failed = True
if hw_version_check_failed:
self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
" values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
# Hardware version is denoted as "vmx-10"
version = "vmx-%02d" % temp_version
self.configspec.version = version
if vm_obj is None or self.configspec.version != vm_obj.config.version:
self.change_detected = True
# Check is to make sure vm_obj is not of type template
if vm_obj and not vm_obj.config.template:
# VM exists and we need to update the hardware version
current_version = vm_obj.config.version
# current_version = "vmx-10"
version_digit = int(current_version.split("-", 1)[-1])
if temp_version < version_digit:
self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
" version '%d'. Downgrading hardware version is"
" not supported. Please specify version greater"
" than the current version." % (version_digit,
temp_version))
new_version = "vmx-%02d" % temp_version
try:
task = vm_obj.UpgradeVM_Task(new_version)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
if 'virt_based_security' in self.params['hardware']:
host_version = self.select_host().summary.config.product.version
if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7):
self.module.fail_json(msg="ESXi version %s not support VBS." % host_version)
guest_ids = ['windows9_64Guest', 'windows9Server64Guest']
if vm_obj is None:
guestid = self.configspec.guestId
else:
guestid = vm_obj.summary.config.guestId
if guestid not in guest_ids:
self.module.fail_json(msg="Guest '%s' not support VBS." % guestid)
if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \
(vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)):
self.configspec.flags = vim.vm.FlagInfo()
self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security'])
if bool(self.params['hardware']['virt_based_security']):
self.configspec.flags.vvtdEnabled = True
self.configspec.nestedHVEnabled = True
if (vm_obj is None and self.configspec.firmware == 'efi') or \
(vm_obj and vm_obj.config.firmware == 'efi'):
self.configspec.bootOptions = vim.vm.BootOptions()
self.configspec.bootOptions.efiSecureBootEnabled = True
else:
self.module.fail_json(msg="Not support VBS when firmware is BIOS.")
if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled:
self.change_detected = True
def get_device_by_type(self, vm=None, type=None):
device_list = []
if vm is None or type is None:
return device_list
for device in vm.config.hardware.device:
if isinstance(device, type):
device_list.append(device)
return device_list
def get_vm_cdrom_devices(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
def get_vm_ide_devices(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
def get_vm_network_interfaces(self, vm=None):
device_list = []
if vm is None:
return device_list
nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2,
vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000,
vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard)
for device in vm.config.hardware.device:
if isinstance(device, nw_device_types):
device_list.append(device)
return device_list
def sanitize_network_params(self):
"""
Sanitize user provided network provided params
Returns: A sanitized list of network params, else fails
"""
network_devices = list()
# Clean up user data here
for network in self.params['networks']:
if 'name' not in network and 'vlan' not in network:
self.module.fail_json(msg="Please specify at least a network name or"
" a VLAN name under VM network list.")
if 'name' in network and self.cache.get_network(network['name']) is None:
self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
elif 'vlan' in network:
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
for dvp in dvps:
if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
network['name'] = dvp.config.name
break
if 'dvswitch_name' in network and \
dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
if dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
else:
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
if 'type' in network:
if network['type'] not in ['dhcp', 'static']:
self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
" Valid parameters are ['dhcp', 'static']." % network)
if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
' but "type" is set to "%(type)s".' % network)
else:
# Type is optional parameter, if user provided IP or Subnet assume
# network type as 'static'
if 'ip' in network or 'netmask' in network:
network['type'] = 'static'
else:
# User wants network type as 'dhcp'
network['type'] = 'dhcp'
if network.get('type') == 'static':
if 'ip' in network and 'netmask' not in network:
self.module.fail_json(msg="'netmask' is required if 'ip' is"
" specified under VM network list.")
if 'ip' not in network and 'netmask' in network:
self.module.fail_json(msg="'ip' is required if 'netmask' is"
" specified under VM network list.")
validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
if 'device_type' in network and network['device_type'] not in validate_device_types:
self.module.fail_json(msg="Device type specified '%s' is not valid."
" Please specify correct device"
" type from ['%s']." % (network['device_type'],
"', '".join(validate_device_types)))
if 'mac' in network and not is_mac(network['mac']):
self.module.fail_json(msg="Device MAC address '%s' is invalid."
" Please provide correct MAC address." % network['mac'])
network_devices.append(network)
return network_devices
def configure_network(self, vm_obj):
# Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
if len(self.params['networks']) == 0:
return
network_devices = self.sanitize_network_params()
# List current device for Clone or Idempotency
current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
if len(network_devices) < len(current_net_devices):
self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
"Removing interfaces is not allowed"
% (len(network_devices), len(current_net_devices)))
for key in range(0, len(network_devices)):
nic_change_detected = False
network_name = network_devices[key]['name']
if key < len(current_net_devices) and (vm_obj or self.params['template']):
# We are editing existing network devices, this is either when
# are cloning from VM or Template
nic = vim.vm.device.VirtualDeviceSpec()
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic.device = current_net_devices[key]
if ('wake_on_lan' in network_devices[key] and
nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
nic_change_detected = True
if ('start_connected' in network_devices[key] and
nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
nic.device.connectable.startConnected = network_devices[key].get('start_connected')
nic_change_detected = True
if ('allow_guest_control' in network_devices[key] and
nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
nic_change_detected = True
if nic.device.deviceInfo.summary != network_name:
nic.device.deviceInfo.summary = network_name
nic_change_detected = True
if 'device_type' in network_devices[key]:
device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
device_class = type(device)
if not isinstance(nic.device, device_class):
self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
"The failing device type is %s" % network_devices[key]['device_type'])
# Changing mac address has no effect when editing interface
if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
"The failing new MAC address is %s" % nic.device.macAddress)
else:
# Default device type is vmxnet3, VMware best practice
device_type = network_devices[key].get('device_type', 'vmxnet3')
nic = self.device_helper.create_nic(device_type,
'Network Adapter %s' % (key + 1),
network_devices[key])
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_change_detected = True
if hasattr(self.cache.get_network(network_name), 'portKeys'):
# VDS switch
pg_obj = None
if 'dvswitch_name' in network_devices[key]:
dvs_name = network_devices[key]['dvswitch_name']
dvs_obj = find_dvs_by_name(self.content, dvs_name)
if dvs_obj is None:
self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
pg_obj = find_dvspg_by_name(dvs_obj, network_name)
if pg_obj is None:
self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
else:
pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
# TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
# For now, check if we are able to find distributed virtual switch
if not pg_obj.config.distributedVirtualSwitch:
self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
" distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
" the given distributed virtual portgroup. Also, check if user has correct"
" permission to access distributed virtual switch in the given portgroup." % pg_obj.name)
if (nic.device.backing and
(not hasattr(nic.device.backing, 'port') or
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = pg_obj.key
# If user specifies distributed port group without associating to the hostsystem on which
# virtual machine is going to be deployed then we get error. We can infer that there is no
# association between given distributed port group and host system.
host_system = self.params.get('esxi_hostname')
if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
" virtual portgroup '%s'. Please make sure host system is associated"
" with given distributed virtual portgroup" % (host_system, pg_obj.name))
dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic.device.backing.port = dvs_port_connection
elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
# NSX-T Logical Switch
nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
nic.device.backing.opaqueNetworkId = network_id
nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
nic_change_detected = True
else:
# vSwitch
if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_change_detected = True
net_obj = self.cache.get_network(network_name)
if nic.device.backing.network != net_obj:
nic.device.backing.network = net_obj
nic_change_detected = True
if nic.device.backing.deviceName != network_name:
nic.device.backing.deviceName = network_name
nic_change_detected = True
if nic_change_detected:
# Change to fix the issue found while configuring opaque network
# VMs cloned from a template with opaque network will get disconnected
# Replacing deprecated config parameter with relocation Spec
if isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
self.relospec.deviceChange.append(nic)
else:
self.configspec.deviceChange.append(nic)
self.change_detected = True
def configure_vapp_properties(self, vm_obj):
if len(self.params['vapp_properties']) == 0:
return
for x in self.params['vapp_properties']:
if not x.get('id'):
self.module.fail_json(msg="id is required to set vApp property")
new_vmconfig_spec = vim.vApp.VmConfigSpec()
if vm_obj:
# VM exists
# This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
# each property must have a unique key
# init key counter with max value + 1
all_keys = [x.key for x in orig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
for property_id, property_spec in vapp_properties_to_change.items():
is_property_changed = False
new_vapp_property_spec = vim.vApp.PropertySpec()
if property_id in vapp_properties_current:
if property_spec.get('operation') == 'remove':
new_vapp_property_spec.operation = 'remove'
new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
is_property_changed = True
else:
# this is 'edit' branch
new_vapp_property_spec.operation = 'edit'
new_vapp_property_spec.info = vapp_properties_current[property_id]
try:
for property_name, property_value in property_spec.items():
if property_name == 'operation':
# operation is not an info object property
# if set to anything other than 'remove' we don't fail
continue
# Updating attributes only if needed
if getattr(new_vapp_property_spec.info, property_name) != property_value:
setattr(new_vapp_property_spec.info, property_name, property_value)
is_property_changed = True
except Exception as e:
msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e))
self.module.fail_json(msg=msg)
else:
if property_spec.get('operation') == 'remove':
# attempt to delete non-existent property
continue
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
else:
# New VM
all_keys = [x.key for x in new_vmconfig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
is_property_changed = False
for property_id, property_spec in vapp_properties_to_change.items():
new_vapp_property_spec = vim.vApp.PropertySpec()
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
if new_vmconfig_spec.property:
self.configspec.vAppConfig = new_vmconfig_spec
self.change_detected = True
def customize_customvalues(self, vm_obj, config_spec):
if len(self.params['customvalues']) == 0:
return
vm_custom_spec = config_spec
vm_custom_spec.extraConfig = []
changed = False
facts = self.gather_facts(vm_obj)
for kv in self.params['customvalues']:
if 'key' not in kv or 'value' not in kv:
self.module.exit_json(msg="customvalues items required both 'key' and 'value' fields.")
# If kv is not kv fetched from facts, change it
if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
option = vim.option.OptionValue()
option.key = kv['key']
option.value = kv['value']
vm_custom_spec.extraConfig.append(option)
changed = True
if changed:
self.change_detected = True
def customize_vm(self, vm_obj):
# User specified customization specification
custom_spec_name = self.params.get('customization_spec')
if custom_spec_name:
cc_mgr = self.content.customizationSpecManager
if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
self.customspec = temp_spec.spec
return
else:
self.module.fail_json(msg="Unable to find customization specification"
" '%s' in given configuration." % custom_spec_name)
# Network settings
adaptermaps = []
for network in self.params['networks']:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
if 'ip' in network and 'netmask' in network:
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = str(network['ip'])
guest_map.adapter.subnetMask = str(network['netmask'])
elif 'type' in network and network['type'] == 'dhcp':
guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
if 'gateway' in network:
guest_map.adapter.gateway = network['gateway']
# On Windows, DNS domain and DNS servers can be set by network interface
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
if 'domain' in network:
guest_map.adapter.dnsDomain = network['domain']
elif 'domain' in self.params['customization']:
guest_map.adapter.dnsDomain = self.params['customization']['domain']
if 'dns_servers' in network:
guest_map.adapter.dnsServerList = network['dns_servers']
elif 'dns_servers' in self.params['customization']:
guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
adaptermaps.append(guest_map)
# Global DNS settings
globalip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in self.params['customization']:
globalip.dnsServerList = self.params['customization']['dns_servers']
# TODO: Maybe list the different domains from the interfaces here by default ?
if 'dns_suffix' in self.params['customization']:
dns_suffix = self.params['customization']['dns_suffix']
if isinstance(dns_suffix, list):
globalip.dnsSuffixList = " ".join(dns_suffix)
else:
globalip.dnsSuffixList = dns_suffix
elif 'domain' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization']['domain']
if self.params['guest_id']:
guest_id = self.params['guest_id']
else:
guest_id = vm_obj.summary.config.guestId
# For windows guest OS, use SysPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
if 'win' in guest_id:
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
# Setting hostName, orgName and fullName is mandatory, so we set some default when missing
ident.userData.computerName = vim.vm.customization.FixedName()
# computer name will be truncated to 15 characters if using VM name
default_name = self.params['name'].replace(' ', '')
punctuation = string.punctuation.replace('-', '')
default_name = ''.join([c for c in default_name if c not in punctuation])
ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15]))
ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
if 'productid' in self.params['customization']:
ident.userData.productId = str(self.params['customization']['productid'])
ident.guiUnattended = vim.vm.customization.GuiUnattended()
if 'autologon' in self.params['customization']:
ident.guiUnattended.autoLogon = self.params['customization']['autologon']
ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
if 'timezone' in self.params['customization']:
# Check if timezone value is a int before proceeding.
ident.guiUnattended.timeZone = self.device_helper.integer_value(
self.params['customization']['timezone'],
'customization.timezone')
ident.identification = vim.vm.customization.Identification()
if self.params['customization'].get('password', '') != '':
ident.guiUnattended.password = vim.vm.customization.Password()
ident.guiUnattended.password.value = str(self.params['customization']['password'])
ident.guiUnattended.password.plainText = True
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
ident.identification.joinDomain = str(self.params['customization']['joindomain'])
ident.identification.domainAdminPassword = vim.vm.customization.Password()
ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
ident.identification.domainAdminPassword.plainText = True
elif 'joinworkgroup' in self.params['customization']:
ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
if 'runonce' in self.params['customization']:
ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
ident.guiRunOnce.commandList = self.params['customization']['runonce']
else:
# FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
# For Linux guest OS, use LinuxPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
ident = vim.vm.customization.LinuxPrep()
# TODO: Maybe add domain from interface if missing ?
if 'domain' in self.params['customization']:
ident.domain = str(self.params['customization']['domain'])
ident.hostName = vim.vm.customization.FixedName()
hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
# Remove all characters except alphanumeric and minus which is allowed by RFC 952
valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
ident.hostName.name = valid_hostname
# List of supported time zones for different vSphere versions in Linux/Unix systems
# https://kb.vmware.com/s/article/2145518
if 'timezone' in self.params['customization']:
ident.timeZone = str(self.params['customization']['timezone'])
if 'hwclockUTC' in self.params['customization']:
ident.hwClockUTC = self.params['customization']['hwclockUTC']
self.customspec = vim.vm.customization.Specification()
self.customspec.nicSettingMap = adaptermaps
self.customspec.globalIPSettings = globalip
self.customspec.identity = ident
def get_vm_scsi_controller(self, vm_obj):
# If vm_obj doesn't exist there is no SCSI controller to find
if vm_obj is None:
return None
for device in vm_obj.config.hardware.device:
if self.device_helper.is_scsi_controller(device):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.device = device
return scsi_ctl
return None
def get_configured_disk_size(self, expected_disk_spec):
# what size is it?
if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
# size, size_tb, size_gb, size_mb, size_kb
if 'size' in expected_disk_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
disk_size_m = size_regex.match(expected_disk_spec['size'])
try:
if disk_size_m:
expected = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
if re.match(r'\d+\.\d+', expected):
# We found float value in string, let's typecast it
expected = float(expected)
else:
# We found int value in string, let's typecast it
expected = int(expected)
if not expected or not unit:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="Failed to parse disk size please review value"
" provided using documentation.")
else:
param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1].lower()
expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
expected = int(expected)
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
if unit in disk_units:
unit = unit.lower()
return expected * (1024 ** disk_units[unit])
else:
self.module.fail_json(msg="%s is not a supported unit for disk size."
" Supported units are ['%s']." % (unit,
"', '".join(disk_units.keys())))
# No size found but disk, fail
self.module.fail_json(
msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl):
"""
Adds vmdk file described by expected_disk_spec['filename'], retrieves the file
information and adds the correct spec to self.configspec.deviceChange.
"""
filename = expected_disk_spec['filename']
# If this is a new disk, or the disk file names are different
if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None:
diskspec.device.backing.fileName = filename
diskspec.device.key = -1
self.change_detected = True
self.configspec.deviceChange.append(diskspec)
def configure_disks(self, vm_obj):
# Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
if len(self.params['disk']) == 0:
return
scsi_ctl = self.get_vm_scsi_controller(vm_obj)
# Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
if vm_obj is None or scsi_ctl is None:
scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
self.change_detected = True
self.configspec.deviceChange.append(scsi_ctl)
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
if vm_obj is not None else None
if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
self.module.fail_json(msg="Provided disks configuration has less disks than "
"the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
disk_index = 0
for expected_disk_spec in self.params.get('disk'):
disk_modified = False
# If we are manipulating and existing objects which has disks and disk_index is in disks
if vm_obj is not None and disks is not None and disk_index < len(disks):
diskspec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
diskspec.device = disks[disk_index]
else:
diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
disk_modified = True
# increment index for next disk search
disk_index += 1
# index 7 is reserved to SCSI controller
if disk_index == 7:
disk_index += 1
if 'disk_mode' in expected_disk_spec:
disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower()
valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent']
if disk_mode not in valid_disk_mode:
self.module.fail_json(msg="disk_mode specified is not valid."
" Should be one of ['%s']" % "', '".join(valid_disk_mode))
if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
diskspec.device.backing.diskMode = disk_mode
disk_modified = True
else:
diskspec.device.backing.diskMode = "persistent"
# is it thin?
if 'type' in expected_disk_spec:
disk_type = expected_disk_spec.get('type', '').lower()
if disk_type == 'thin':
diskspec.device.backing.thinProvisioned = True
elif disk_type == 'eagerzeroedthick':
diskspec.device.backing.eagerlyScrub = True
if 'filename' in expected_disk_spec and expected_disk_spec['filename'] is not None:
self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl)
continue
elif vm_obj is None or self.params['template']:
# We are creating new VM or from Template
# Only create virtual device if not backed by vmdk in original template
if diskspec.device.backing.fileName == '':
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
# which datastore?
if expected_disk_spec.get('datastore'):
# TODO: This is already handled by the relocation spec,
# but it needs to eventually be handled for all the
# other disks defined
pass
kb = self.get_configured_disk_size(expected_disk_spec)
# VMware doesn't allow to reduce disk sizes
if kb < diskspec.device.capacityInKB:
self.module.fail_json(
msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
(kb, diskspec.device.capacityInKB))
if kb != diskspec.device.capacityInKB or disk_modified:
diskspec.device.capacityInKB = kb
self.configspec.deviceChange.append(diskspec)
self.change_detected = True
def select_host(self):
hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
if not hostsystem:
self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
return hostsystem
def autoselect_datastore(self):
datastore = None
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if not self.is_datastore_valid(datastore_obj=ds):
continue
if ds.summary.freeSpace > datastore_freespace:
datastore = ds
datastore_freespace = ds.summary.freeSpace
return datastore
def get_recommended_datastore(self, datastore_cluster_obj=None):
"""
Function to return Storage DRS recommended datastore from datastore cluster
Args:
datastore_cluster_obj: datastore cluster managed object
Returns: Name of recommended datastore from the given datastore cluster
"""
if datastore_cluster_obj is None:
return None
# Check if Datastore Cluster provided by user is SDRS ready
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
if sdrs_status:
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
pod_sel_spec.storagePod = datastore_cluster_obj
storage_spec = vim.storageDrs.StoragePlacementSpec()
storage_spec.podSelectionSpec = pod_sel_spec
storage_spec.type = 'create'
try:
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
rec_action = rec.recommendations[0].action[0]
return rec_action.destination.name
except Exception:
# There is some error so we fall back to general workflow
pass
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
if not self.is_datastore_valid(datastore_obj=ds):
continue
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
return datastore.name
return None
def select_datastore(self, vm_obj=None):
datastore = None
datastore_name = None
if len(self.params['disk']) != 0:
# TODO: really use the datastore for newly created disks
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
datastores = []
if self.params['cluster']:
cluster = self.find_cluster_by_name(self.params['cluster'], self.content)
for host in cluster.host:
for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
if mi.volume.type == "VMFS":
datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
elif self.params['esxi_hostname']:
host = self.find_hostsystem_by_name(self.params['esxi_hostname'])
for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
if mi.volume.type == "VMFS":
datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
else:
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
datastore_freespace = 0
for ds in datastores:
if not self.is_datastore_valid(datastore_obj=ds):
continue
if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
# If datastore field is provided, filter destination datastores
if 'datastore' in self.params['disk'][0] and \
isinstance(self.params['disk'][0]['datastore'], str) and \
ds.name.find(self.params['disk'][0]['datastore']) < 0:
continue
datastore = ds
datastore_name = datastore.name
datastore_freespace = ds.summary.freeSpace
elif 'datastore' in self.params['disk'][0]:
datastore_name = self.params['disk'][0]['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
if not datastore and self.params['template']:
# use the template's existing DS
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
if disks:
datastore = disks[0].backing.datastore
datastore_name = datastore.name
# validation
if datastore:
dc = self.cache.get_parent_datacenter(datastore)
if dc.name != self.params['datacenter']:
datastore = self.autoselect_datastore()
datastore_name = datastore.name
if not datastore:
if len(self.params['disk']) != 0 or self.params['template'] is None:
self.module.fail_json(msg="Unable to find the datastore with given parameters."
" This could mean, %s is a non-existent virtual machine and module tried to"
" deploy it as new virtual machine with no disk. Please specify disks parameter"
" or specify template to clone from." % self.params['name'])
self.module.fail_json(msg="Failed to find a matching datastore")
return datastore, datastore_name
def obj_has_parent(self, obj, parent):
if obj is None and parent is None:
raise AssertionError()
current_parent = obj
while True:
if current_parent.name == parent.name:
return True
# Check if we have reached till root folder
moid = current_parent._moId
if moid in ['group-d1', 'ha-folder-root']:
return False
current_parent = current_parent.parent
if current_parent is None:
return False
def get_scsi_type(self):
disk_controller_type = "paravirtual"
# set cpu/memory/etc
if 'hardware' in self.params:
if 'scsi' in self.params['hardware']:
if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
disk_controller_type = self.params['hardware']['scsi']
else:
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
return disk_controller_type
def find_folder(self, searchpath):
""" Walk inventory objects one position of the searchpath at a time """
# split the searchpath so we can iterate through it
paths = [x.replace('/', '') for x in searchpath.split('/')]
paths_total = len(paths) - 1
position = 0
# recursive walk while looking for next element in searchpath
root = self.content.rootFolder
while root and position <= paths_total:
change = False
if hasattr(root, 'childEntity'):
for child in root.childEntity:
if child.name == paths[position]:
root = child
position += 1
change = True
break
elif isinstance(root, vim.Datacenter):
if hasattr(root, 'vmFolder'):
if root.vmFolder.name == paths[position]:
root = root.vmFolder
position += 1
change = True
else:
root = None
if not change:
root = None
return root
def get_resource_pool(self, cluster=None, host=None, resource_pool=None):
""" Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """
cluster_name = cluster or self.params.get('cluster', None)
host_name = host or self.params.get('esxi_hostname', None)
resource_pool_name = resource_pool or self.params.get('resource_pool', None)
# get the datacenter object
datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if not datacenter:
self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter'])
# if cluster is given, get the cluster object
if cluster_name:
cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
if not cluster:
self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
# if host is given, get the cluster object using the host
elif host_name:
host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter)
if not host:
self.module.fail_json(msg='Unable to find host "%s"' % host_name)
cluster = host.parent
else:
cluster = None
# get resource pools limiting search to cluster or datacenter
resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter)
if not resource_pool:
if resource_pool_name:
self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name)
else:
self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
return resource_pool
def deploy_vm(self):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# FIXME:
# - static IPs
self.folder = self.params.get('folder', None)
if self.folder is None:
self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
# Prepend / if it was missing from the folder path, also strip trailing slashes
if not self.folder.startswith('/'):
self.folder = '/%(folder)s' % self.params
self.folder = self.folder.rstrip('/')
datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if datacenter is None:
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
dcpath = compile_folder_path_for_object(datacenter)
# Nested folder does not have trailing /
if not dcpath.endswith('/'):
dcpath += '/'
# Check for full path first in case it was already supplied
if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or
self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')):
fullpath = self.folder
elif self.folder.startswith('/vm/') or self.folder == '/vm':
fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder)
elif self.folder.startswith('/'):
fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder)
else:
fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder)
f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
# abort if no strategy was successful
if f_obj is None:
# Add some debugging values in failure.
details = {
'datacenter': datacenter.name,
'datacenter_path': dcpath,
'folder': self.folder,
'full_search_path': fullpath,
}
self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
details=details)
destfolder = f_obj
if self.params['template']:
vm_obj = self.get_vm_or_template(template_name=self.params['template'])
if vm_obj is None:
self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
else:
vm_obj = None
# always get a resource_pool
resource_pool = self.get_resource_pool()
# set the destination datastore for VM & disks
if self.params['datastore']:
# Give precedence to datastore value provided by user
# User may want to deploy VM to specific datastore.
datastore_name = self.params['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
(datastore, datastore_name) = self.select_datastore(vm_obj)
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
# create the relocation spec
self.relospec = vim.vm.RelocateSpec()
self.relospec.deviceChange = []
self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
self.configure_hardware_params(vm_obj=vm_obj)
self.configure_resource_alloc_info(vm_obj=vm_obj)
self.configure_vapp_properties(vm_obj=vm_obj)
self.configure_disks(vm_obj=vm_obj)
self.configure_network(vm_obj=vm_obj)
self.configure_cdrom(vm_obj=vm_obj)
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key == 'type' and nw['type'] == 'dhcp':
network_changes = True
break
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec') is not None:
self.customize_vm(vm_obj=vm_obj)
clonespec = None
clone_method = None
try:
if self.params['template']:
# Only select specific host when ESXi hostname is provided
if self.params['esxi_hostname']:
self.relospec.host = self.select_host()
self.relospec.datastore = datastore
# Convert disk present in template if is set
if self.params['convert']:
for device in vm_obj.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk):
disk_locator = vim.vm.RelocateSpec.DiskLocator()
disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
if self.params['convert'] in ['thin']:
disk_locator.diskBackingInfo.thinProvisioned = True
if self.params['convert'] in ['eagerzeroedthick']:
disk_locator.diskBackingInfo.eagerlyScrub = True
if self.params['convert'] in ['thick']:
disk_locator.diskBackingInfo.diskMode = "persistent"
disk_locator.diskId = device.key
disk_locator.datastore = datastore
self.relospec.disk.append(disk_locator)
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# > pool: For a clone operation from a template to a virtual machine, this argument is required.
self.relospec.pool = resource_pool
linked_clone = self.params.get('linked_clone')
snapshot_src = self.params.get('snapshot_src', None)
if linked_clone:
if snapshot_src is not None:
self.relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
else:
self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are"
" required together for linked clone operation.")
clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=self.relospec)
if self.customspec:
clonespec.customization = self.customspec
if snapshot_src is not None:
if vm_obj.snapshot is None:
self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params)
snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList,
snapname=snapshot_src)
if len(snapshot) != 1:
self.module.fail_json(msg='virtual machine "%(template)s" does not contain'
' snapshot named "%(snapshot_src)s"' % self.params)
clonespec.snapshot = snapshot[0].snapshot
clonespec.config = self.configspec
clone_method = 'Clone'
try:
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
except vim.fault.NoPermission as e:
self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
"due to permission issue: %s" % (self.params['name'],
destfolder,
to_native(e.msg)))
self.change_detected = True
else:
# ConfigSpec require name for VM creation
self.configspec.name = self.params['name']
self.configspec.files = vim.vm.FileInfo(logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName="[" + datastore_name + "]")
clone_method = 'CreateVM_Task'
try:
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
except vmodl.fault.InvalidRequest as e:
self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
"parameter %s" % to_native(e.msg))
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to create virtual machine due to "
"product versioning restrictions: %s" % to_native(e.msg))
self.change_detected = True
self.wait_for_task(task)
except TypeError as e:
self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
# provide these to the user for debugging
clonespec_json = serialize_spec(clonespec)
configspec_json = serialize_spec(self.configspec)
kwargs = {
'changed': self.change_applied,
'failed': True,
'msg': task.info.error.msg,
'clonespec': clonespec_json,
'configspec': configspec_json,
'clone_method': clone_method
}
return kwargs
else:
# set annotation
vm = task.info.result
if self.params['annotation']:
annotation_spec = vim.vm.ConfigSpec()
annotation_spec.annotation = str(self.params['annotation'])
task = vm.ReconfigVM_Task(annotation_spec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'}
if self.params['customvalues']:
vm_custom_spec = vim.vm.ConfigSpec()
self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec)
task = vm.ReconfigVM_Task(vm_custom_spec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customvalues'}
if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'restarted']:
set_vm_power_state(self.content, vm, 'poweredon', force=False)
if self.params['wait_for_ip_address']:
wait_for_vm_ip(self.content, vm, self.params['wait_for_ip_address_timeout'])
if self.params['wait_for_customization']:
is_customization_ok = self.wait_for_customization(vm=vm, timeout=self.params['wait_for_customization_timeout'])
if not is_customization_ok:
vm_facts = self.gather_facts(vm)
return {'changed': self.change_applied, 'failed': True, 'instance': vm_facts, 'op': 'customization'}
vm_facts = self.gather_facts(vm)
return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
def get_snapshots_by_name_recursively(self, snapshots, snapname):
snap_obj = []
for snapshot in snapshots:
if snapshot.name == snapname:
snap_obj.append(snapshot)
else:
snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
return snap_obj
def reconfigure_vm(self):
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
# create the relocation spec
self.relospec = vim.vm.RelocateSpec()
self.relospec.deviceChange = []
self.configure_guestid(vm_obj=self.current_vm_obj)
self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
self.configure_hardware_params(vm_obj=self.current_vm_obj)
self.configure_disks(vm_obj=self.current_vm_obj)
self.configure_network(vm_obj=self.current_vm_obj)
self.configure_cdrom(vm_obj=self.current_vm_obj)
self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec)
self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
self.configure_vapp_properties(vm_obj=self.current_vm_obj)
if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
self.configspec.annotation = str(self.params['annotation'])
self.change_detected = True
if self.params['resource_pool']:
self.relospec.pool = self.get_resource_pool()
if self.relospec.pool != self.current_vm_obj.resourcePool:
task = self.current_vm_obj.RelocateVM_Task(spec=self.relospec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'}
# Only send VMware task if we see a modification
if self.change_detected:
task = None
try:
task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
" product versioning restrictions: %s" % to_native(e.msg))
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'}
# Rename VM
if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
task = self.current_vm_obj.Rename_Task(self.params['name'])
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'}
# Mark VM as Template
if self.params['is_template'] and not self.current_vm_obj.config.template:
try:
self.current_vm_obj.MarkAsTemplate()
self.change_applied = True
except vmodl.fault.NotSupported as e:
self.module.fail_json(msg="Failed to mark virtual machine [%s] "
"as template: %s" % (self.params['name'], e.msg))
# Mark Template as VM
elif not self.params['is_template'] and self.current_vm_obj.config.template:
resource_pool = self.get_resource_pool()
kwargs = dict(pool=resource_pool)
if self.params.get('esxi_hostname', None):
host_system_obj = self.select_host()
kwargs.update(host=host_system_obj)
try:
self.current_vm_obj.MarkAsVirtualMachine(**kwargs)
self.change_applied = True
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(msg="Virtual machine is not marked"
" as template : %s" % to_native(invalid_state.msg))
except vim.fault.InvalidDatastore as invalid_ds:
self.module.fail_json(msg="Converting template to virtual machine"
" operation cannot be performed on the"
" target datastores: %s" % to_native(invalid_ds.msg))
except vim.fault.CannotAccessVmComponent as cannot_access:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" as operation unable access virtual machine"
" component: %s" % to_native(cannot_access.msg))
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to : %s" % to_native(invalid_argument.msg))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to generic error : %s" % to_native(generic_exc))
# Automatically update VMware UUID when converting template to VM.
# This avoids an interactive prompt during VM startup.
uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"]
if not uuid_action:
uuid_action_opt = vim.option.OptionValue()
uuid_action_opt.key = "uuid.action"
uuid_action_opt.value = "create"
self.configspec.extraConfig.append(uuid_action_opt)
self.change_detected = True
# add customize existing VM after VM re-configure
if 'existing_vm' in self.params['customization'] and self.params['customization']['existing_vm']:
if self.current_vm_obj.config.template:
self.module.fail_json(msg="VM is template, not support guest OS customization.")
if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.")
cus_result = self.customize_exist_vm()
if cus_result['failed']:
return cus_result
vm_facts = self.gather_facts(self.current_vm_obj)
return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
def customize_exist_vm(self):
task = None
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 1 or network_changes or self.params.get('customization_spec'):
self.customize_vm(vm_obj=self.current_vm_obj)
try:
task = self.current_vm_obj.CustomizeVM_Task(self.customspec)
except vim.fault.CustomizationFault as e:
self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg))
except vim.fault.RuntimeFault as e:
self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg))
except Exception as e:
self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg))
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'}
if self.params['wait_for_customization']:
set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False)
is_customization_ok = self.wait_for_customization(vm=self.current_vm_obj, timeout=self.params['wait_for_customization_timeout'])
if not is_customization_ok:
return {'changed': self.change_applied, 'failed': True,
'msg': 'Wait for customization failed due to timeout', 'op': 'wait_for_customize_exist'}
return {'changed': self.change_applied, 'failed': False}
def wait_for_task(self, task, poll_interval=1):
"""
Wait for a VMware task to complete. Terminal states are 'error' and 'success'.
Inputs:
- task: the task to wait for
- poll_interval: polling interval to check the task, in seconds
Modifies:
- self.change_applied
"""
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
# https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
while task.info.state not in ['error', 'success']:
time.sleep(poll_interval)
self.change_applied = self.change_applied or task.info.state == 'success'
def get_vm_events(self, vm, eventTypeIdList):
byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self")
filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList)
eventManager = self.content.eventManager
return eventManager.QueryEvent(filterSpec)
def wait_for_customization(self, vm, timeout=3600, sleep=10):
poll = int(timeout // sleep)
thispoll = 0
while thispoll <= poll:
eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent'])
if len(eventStarted):
thispoll = 0
while thispoll <= poll:
eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed'])
if len(eventsFinishedResult):
if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded):
self.module.warn("Customization failed with error {%s}:{%s}"
% (eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage))
return False
else:
return True
else:
time.sleep(sleep)
thispoll += 1
if len(eventsFinishedResult) == 0:
self.module.warn('Waiting for customization result event timed out.')
return False
else:
time.sleep(sleep)
thispoll += 1
if len(eventStarted):
self.module.warn('Waiting for customization result event timed out.')
else:
self.module.warn('Waiting for customization start event timed out.')
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']),
template=dict(type='str', aliases=['template_src']),
is_template=dict(type='bool', default=False),
annotation=dict(type='str', aliases=['notes']),
customvalues=dict(type='list', default=[]),
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
folder=dict(type='str'),
guest_id=dict(type='str'),
disk=dict(type='list', default=[]),
cdrom=dict(type=list_or_dict, default=[]),
hardware=dict(type='dict', default={}),
force=dict(type='bool', default=False),
datacenter=dict(type='str', default='ha-datacenter'),
esxi_hostname=dict(type='str'),
cluster=dict(type='str'),
wait_for_ip_address=dict(type='bool', default=False),
wait_for_ip_address_timeout=dict(type='int', default=300),
state_change_timeout=dict(type='int', default=0),
snapshot_src=dict(type='str'),
linked_clone=dict(type='bool', default=False),
networks=dict(type='list', default=[]),
resource_pool=dict(type='str'),
customization=dict(type='dict', default={}, no_log=True),
customization_spec=dict(type='str', default=None),
wait_for_customization=dict(type='bool', default=False),
wait_for_customization_timeout=dict(type='int', default=3600),
vapp_properties=dict(type='list', default=[]),
datastore=dict(type='str'),
convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']),
delete_from_inventory=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['cluster', 'esxi_hostname'],
],
required_one_of=[
['name', 'uuid'],
],
)
result = {'failed': False, 'changed': False}
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
# VM already exists
if vm:
if module.params['state'] == 'absent':
# destroy it
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='remove_vm',
)
module.exit_json(**result)
if module.params['force']:
# has to be poweredoff first
set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
result = pyv.remove_vm(vm, module.params['delete_from_inventory'])
elif module.params['state'] == 'present':
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
desired_operation='reconfigure_vm',
)
module.exit_json(**result)
result = pyv.reconfigure_vm()
elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='set_vm_power_state',
)
module.exit_json(**result)
# set powerstate
tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
if tmp_result['changed']:
result["changed"] = True
if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']:
wait_result = wait_for_vm_ip(pyv.content, vm, module.params['wait_for_ip_address_timeout'])
if not wait_result:
module.fail_json(msg='Waiting for IP address timed out')
tmp_result['instance'] = wait_result
if not tmp_result["failed"]:
result["failed"] = False
result['instance'] = tmp_result['instance']
if tmp_result["failed"]:
result["failed"] = True
result["msg"] = tmp_result["msg"]
else:
# This should not happen
raise AssertionError()
# VM doesn't exist
else:
if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
if module.check_mode:
result.update(
changed=True,
desired_operation='deploy_vm',
)
module.exit_json(**result)
result = pyv.deploy_vm()
if result['failed']:
module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
| 3,603,463,015,308,787,700 | -1,669,408,543,413,448,700 | 50.93603 | 159 | 0.590923 | false |
nexiles/odoo
|
addons/account/report/account_general_journal.py
|
381
|
7669
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class journal_print(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(journal_print, self).__init__(cr, uid, name, context=context)
self.period_ids = []
self.journal_ids = []
self.localcontext.update( {
'time': time,
'lines': self.lines,
'periods': self.periods,
'sum_debit_period': self._sum_debit_period,
'sum_credit_period': self._sum_credit_period,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_fiscalyear': self._get_fiscalyear,
'get_account': self._get_account,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_sortby': self._get_sortby,
'get_filter': self._get_filter,
'get_journal': self._get_journal,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'display_currency':self._display_currency,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
new_ids = ids
self.query_get_clause = ''
self.target_move = data['form'].get('target_move', 'all')
if (data['model'] == 'ir.ui.menu'):
new_ids = 'active_ids' in data['form'] and data['form']['active_ids'] or []
self.query_get_clause = 'AND '
self.query_get_clause += obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
objects = self.pool.get('account.journal.period').browse(self.cr, self.uid, new_ids)
if new_ids:
self.cr.execute('SELECT period_id, journal_id FROM account_journal_period WHERE id IN %s', (tuple(new_ids),))
res = self.cr.fetchall()
self.period_ids, self.journal_ids = zip(*res)
return super(journal_print, self).set_context(objects, data, ids, report_type=report_type)
# returns a list of period objs
def periods(self, journal_period_objs):
dic = {}
def filter_unique(o):
key = o.period_id.id
res = key in dic
if not res:
dic[key] = True
return not res
filtered_objs = filter(filter_unique, journal_period_objs)
return map(lambda x: x.period_id, filtered_objs)
def lines(self, period_id):
if not self.journal_ids:
return []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT j.code, j.name, l.amount_currency,c.symbol AS currency_code,l.currency_id, '
'SUM(l.debit) AS debit, SUM(l.credit) AS credit '
'FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'LEFT JOIN account_journal j ON (l.journal_id=j.id) '
'LEFT JOIN res_currency c on (l.currency_id=c.id)'
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' '
'GROUP BY j.id, j.code, j.name, l.amount_currency, c.symbol, l.currency_id ',
(tuple(move_state), period_id, tuple(self.journal_ids)))
return self.cr.dictfetchall()
def _set_get_account_currency_code(self, account_id):
self.cr.execute("SELECT c.symbol AS code "\
"FROM res_currency c, account_account AS ac "\
"WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id))
result = self.cr.fetchone()
if result:
self.account_currency = result[0]
else:
self.account_currency = False
def _get_account(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).company_id.name
return super(journal_print, self)._get_account(data)
def _get_fiscalyear(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).fiscalyear_id.name
return super(journal_print, self)._get_fiscalyear(data)
def _display_currency(self, data):
if data['model'] == 'account.journal.period':
return True
return data['form']['amount_currency']
def _sum_debit_period(self, period_id, journal_id=False):
if journal_id:
journals = [journal_id]
else:
journals = self.journal_ids
if not journals:
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(l.debit) FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' ' \
'AND l.state<>\'draft\'',
(tuple(move_state), period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit_period(self, period_id, journal_id=None):
if journal_id:
journals = [journal_id]
else:
journals = self.journal_ids
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if not journals:
return 0.0
self.cr.execute('SELECT SUM(l.credit) FROM account_move_line l '
'LEFT JOIN account_move am ON (l.move_id=am.id) '
'WHERE am.state IN %s AND l.period_id=%s AND l.journal_id IN %s '+ self.query_get_clause + ' ' \
'AND l.state<>\'draft\'',
(tuple(move_state), period_id, tuple(journals)))
return self.cr.fetchone()[0] or 0.0
class report_generaljournal(osv.AbstractModel):
_name = 'report.account.report_generaljournal'
_inherit = 'report.abstract_report'
_template = 'account.report_generaljournal'
_wrapped_report_class = journal_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 8,534,206,137,786,329,000 | 5,689,690,338,317,777,000 | 44.64881 | 130 | 0.565393 | false |
jblackburne/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
7
|
55471
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_less_equal(est.min_impurity_split, 1e-7,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
est.fit(X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3],[5],[3],[8],[5]],[6,7,3,4,3], [0.6,0.3,0.1,1.0,0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
|
bsd-3-clause
| -6,865,101,186,399,792,000 | -5,945,352,113,859,119,000 | 35.784483 | 94 | 0.584215 | false |
ChronoMonochrome/android_external_chromium_org
|
tools/valgrind/valgrind_test.py
|
24
|
46017
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs an exe through Valgrind and puts the intermediate files in a
directory.
"""
import datetime
import glob
import logging
import optparse
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import common
import drmemory_analyze
import memcheck_analyze
import tsan_analyze
class BaseTool(object):
"""Abstract class for running Valgrind-, PIN-based and other dynamic
error detector tools.
Always subclass this and implement ToolCommand with framework- and
tool-specific stuff.
"""
def __init__(self):
temp_parent_dir = None
self.log_parent_dir = ""
if common.IsWindows():
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
#
# TODO(bruening): if scripts die in middle and don't clean up temp
# dir, we'll accumulate files in profile dir. should remove
# really old files automatically.
profile = os.getenv("USERPROFILE")
if profile:
self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
if os.path.exists(self.log_parent_dir):
self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
temp_parent_dir = self.log_parent_dir
# Generated every time (even when overridden)
self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
self.log_dir = self.temp_dir # overridable by --keep_logs
self.option_parser_hooks = []
# TODO(glider): we may not need some of the env vars on some of the
# platforms.
self._env = {
"G_SLICE" : "always-malloc",
"NSS_DISABLE_UNLOAD" : "1",
"NSS_DISABLE_ARENA_FREE_LIST" : "1",
"GTEST_DEATH_TEST_USE_FORK": "1",
}
def ToolName(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Analyze(self, check_sanity=False):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def RegisterOptionParserHook(self, hook):
# Frameworks and tools can add their own flags to the parser.
self.option_parser_hooks.append(hook)
def CreateOptionParser(self):
# Defines Chromium-specific flags.
self._parser = optparse.OptionParser("usage: %prog [options] <program to "
"test>")
self._parser.disable_interspersed_args()
self._parser.add_option("-t", "--timeout",
dest="timeout", metavar="TIMEOUT", default=10000,
help="timeout in seconds for the run (default 10000)")
self._parser.add_option("", "--build-dir",
help="the location of the compiler output")
self._parser.add_option("", "--source-dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
self._parser.add_option("", "--gtest_filter", default="",
help="which test case to run")
self._parser.add_option("", "--gtest_repeat",
help="how many times to run each test")
self._parser.add_option("", "--gtest_print_time", action="store_true",
default=False,
help="show how long each test takes")
self._parser.add_option("", "--ignore_exit_code", action="store_true",
default=False,
help="ignore exit code of the test "
"(e.g. test failures)")
self._parser.add_option("", "--keep_logs", action="store_true",
default=False,
help="store memory tool logs in the <tool>.logs "
"directory instead of /tmp.\nThis can be "
"useful for tool developers/maintainers.\n"
"Please note that the <tool>.logs directory "
"will be clobbered on tool startup.")
# To add framework- or tool-specific flags, please add a hook using
# RegisterOptionParserHook in the corresponding subclass.
# See ValgrindTool and ThreadSanitizerBase for examples.
for hook in self.option_parser_hooks:
hook(self, self._parser)
def ParseArgv(self, args):
self.CreateOptionParser()
# self._tool_flags will store those tool flags which we don't parse
# manually in this script.
self._tool_flags = []
known_args = []
""" We assume that the first argument not starting with "-" is a program
name and all the following flags should be passed to the program.
TODO(timurrrr): customize optparse instead
"""
while len(args) > 0 and args[0][:1] == "-":
arg = args[0]
if (arg == "--"):
break
if self._parser.has_option(arg.split("=")[0]):
known_args += [arg]
else:
self._tool_flags += [arg]
args = args[1:]
if len(args) > 0:
known_args += args
self._options, self._args = self._parser.parse_args(known_args)
self._timeout = int(self._options.timeout)
self._source_dir = self._options.source_dir
if self._options.keep_logs:
# log_parent_dir has trailing slash if non-empty
self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
os.mkdir(self.log_dir)
logging.info("Logs are in " + self.log_dir)
self._ignore_exit_code = self._options.ignore_exit_code
if self._options.gtest_filter != "":
self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
if self._options.gtest_repeat:
self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_print_time:
self._args.append("--gtest_print_time")
return True
def Setup(self, args):
return self.ParseArgv(args)
def ToolCommand(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Cleanup(self):
# You may override it in the tool-specific subclass
pass
def Execute(self):
""" Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc."""
logging.info("starting execution...")
proc = self.ToolCommand()
for var in self._env:
common.PutEnvAndLog(var, self._env[var])
return common.RunSubprocess(proc, self._timeout)
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
analyze_retcode = self.Analyze(check_sanity)
if analyze_retcode:
logging.error("Analyze failed.")
logging.info("Search the log for '[ERROR]' to see the error reports.")
return analyze_retcode
if exec_retcode:
if self._ignore_exit_code:
logging.info("Test execution failed, but the exit code is ignored.")
else:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
if not analyze_retcode:
logging.info("Analysis completed successfully.")
return 0
def Main(self, args, check_sanity, min_runtime_in_seconds):
"""Call this to run through the whole process: Setup, Execute, Analyze"""
start_time = datetime.datetime.now()
retcode = -1
if self.Setup(args):
retcode = self.RunTestsAndAnalyze(check_sanity)
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.Cleanup()
else:
logging.error("Setup failed")
end_time = datetime.datetime.now()
runtime_in_seconds = (end_time - start_time).seconds
hours = runtime_in_seconds / 3600
seconds = runtime_in_seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
if (min_runtime_in_seconds > 0 and
runtime_in_seconds < min_runtime_in_seconds):
logging.error("Layout tests finished too quickly. "
"It should have taken at least %d seconds. "
"Something went wrong?" % min_runtime_in_seconds)
retcode = -1
return retcode
def Run(self, args, module, min_runtime_in_seconds=0):
MODULES_TO_SANITY_CHECK = ["base"]
# TODO(timurrrr): this is a temporary workaround for http://crbug.com/47844
if self.ToolName() == "tsan" and common.IsMac():
MODULES_TO_SANITY_CHECK = []
check_sanity = module in MODULES_TO_SANITY_CHECK
return self.Main(args, check_sanity, min_runtime_in_seconds)
class ValgrindTool(BaseTool):
"""Abstract class for running Valgrind tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def __init__(self):
super(ValgrindTool, self).__init__()
self.RegisterOptionParserHook(ValgrindTool.ExtendOptionParser)
def UseXML(self):
# Override if tool prefers nonxml output
return True
def SelfContained(self):
# Returns true iff the tool is distibuted as a self-contained
# .sh script (e.g. ThreadSanitizer)
return False
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a valgrind suppression file")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running valgrind directly")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running Dr. Memory "
"directly.")
parser.add_option("", "--trace_children", action="store_true",
default=False,
help="also trace child processes")
parser.add_option("", "--num-callers",
dest="num_callers", default=30,
help="number of callers to show in stack traces")
parser.add_option("", "--generate_dsym", action="store_true",
default=False,
help="Generate .dSYM file on Mac if needed. Slow!")
def Setup(self, args):
if not BaseTool.Setup(self, args):
return False
if common.IsMac():
self.PrepareForTestMac()
return True
def PrepareForTestMac(self):
"""Runs dsymutil if needed.
Valgrind for Mac OS X requires that debugging information be in a .dSYM
bundle generated by dsymutil. It is not currently able to chase DWARF
data into .o files like gdb does, so executables without .dSYM bundles or
with the Chromium-specific "fake_dsym" bundles generated by
build/mac/strip_save_dsym won't give source file and line number
information in valgrind.
This function will run dsymutil if the .dSYM bundle is missing or if
it looks like a fake_dsym. A non-fake dsym that already exists is assumed
to be up-to-date.
"""
test_command = self._args[0]
dsym_bundle = self._args[0] + '.dSYM'
dsym_file = os.path.join(dsym_bundle, 'Contents', 'Resources', 'DWARF',
os.path.basename(test_command))
dsym_info_plist = os.path.join(dsym_bundle, 'Contents', 'Info.plist')
needs_dsymutil = True
saved_test_command = None
if os.path.exists(dsym_file) and os.path.exists(dsym_info_plist):
# Look for the special fake_dsym tag in dsym_info_plist.
dsym_info_plist_contents = open(dsym_info_plist).read()
if not re.search('^\s*<key>fake_dsym</key>$', dsym_info_plist_contents,
re.MULTILINE):
# fake_dsym is not set, this is a real .dSYM bundle produced by
# dsymutil. dsymutil does not need to be run again.
needs_dsymutil = False
else:
# fake_dsym is set. dsym_file is a copy of the original test_command
# before it was stripped. Copy it back to test_command so that
# dsymutil has unstripped input to work with. Move the stripped
# test_command out of the way, it will be restored when this is
# done.
saved_test_command = test_command + '.stripped'
os.rename(test_command, saved_test_command)
shutil.copyfile(dsym_file, test_command)
shutil.copymode(saved_test_command, test_command)
if needs_dsymutil:
if self._options.generate_dsym:
# Remove the .dSYM bundle if it exists.
shutil.rmtree(dsym_bundle, True)
dsymutil_command = ['dsymutil', test_command]
# dsymutil is crazy slow. Ideally we'd have a timeout here,
# but common.RunSubprocess' timeout is only checked
# after each line of output; dsymutil is silent
# until the end, and is then killed, which is silly.
common.RunSubprocess(dsymutil_command)
if saved_test_command:
os.rename(saved_test_command, test_command)
else:
logging.info("No real .dSYM for test_command. Line numbers will "
"not be shown. Either tell xcode to generate .dSYM "
"file, or use --generate_dsym option to this tool.")
def ToolCommand(self):
"""Get the valgrind command to run."""
# Note that self._args begins with the exe to be run.
tool_name = self.ToolName()
# Construct the valgrind command.
if self.SelfContained():
proc = ["valgrind-%s.sh" % tool_name]
else:
if 'CHROME_VALGRIND' in os.environ:
path = os.path.join(os.environ['CHROME_VALGRIND'], "bin", "valgrind")
else:
path = "valgrind"
proc = [path, "--tool=%s" % tool_name]
proc += ["--num-callers=%i" % int(self._options.num_callers)]
if self._options.trace_children:
proc += ["--trace-children=yes"]
proc += ["--trace-children-skip='*dbus-daemon*'"]
proc += ["--trace-children-skip='*dbus-launch*'"]
proc += ["--trace-children-skip='*perl*'"]
proc += ["--trace-children-skip='*python*'"]
# This is really Python, but for some reason Valgrind follows it.
proc += ["--trace-children-skip='*lsb_release*'"]
proc += self.ToolSpecificFlags()
proc += self._tool_flags
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + ("/%s." % tool_name) + "%p"
if self.UseXML():
proc += ["--xml=yes", "--xml-file=" + logfilename]
else:
proc += ["--log-file=" + logfilename]
# The Valgrind command is constructed.
# Valgrind doesn't play nice with the Chrome sandbox. Empty this env var
# set by runtest.py to disable the sandbox.
if os.environ.get("CHROME_DEVEL_SANDBOX", None):
logging.info("Removing CHROME_DEVEL_SANDBOX fron environment")
os.environ["CHROME_DEVEL_SANDBOX"] = ''
# Handle --indirect_webkit_layout separately.
if self._options.indirect_webkit_layout:
# Need to create the wrapper before modifying |proc|.
wrapper = self.CreateBrowserWrapper(proc, webkit=True)
proc = self._args
proc.append("--wrapper")
proc.append(wrapper)
return proc
if self._options.indirect:
wrapper = self.CreateBrowserWrapper(proc)
os.environ["BROWSER_WRAPPER"] = wrapper
logging.info('export BROWSER_WRAPPER=' + wrapper)
proc = []
proc += self._args
return proc
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def CreateBrowserWrapper(self, proc, webkit=False):
"""The program being run invokes Python or something else that can't stand
to be valgrinded, and also invokes the Chrome browser. In this case, use a
magic wrapper to only valgrind the Chrome browser. Build the wrapper here.
Returns the path to the wrapper. It's up to the caller to use the wrapper
appropriately.
"""
command = " ".join(proc)
# Add the PID of the browser wrapper to the logfile names so we can
# separate log files for different UI tests at the analyze stage.
command = command.replace("%p", "$$.%p")
(fd, indirect_fname) = tempfile.mkstemp(dir=self.log_dir,
prefix="browser_wrapper.",
text=True)
f = os.fdopen(fd, "w")
f.write('#!/bin/bash\n'
'echo "Started Valgrind wrapper for this test, PID=$$" >&2\n')
f.write('DIR=`dirname $0`\n'
'TESTNAME_FILE=$DIR/testcase.$$.name\n\n')
if webkit:
# Webkit layout_tests pass the URL as the first line of stdin.
f.write('tee $TESTNAME_FILE | %s "$@"\n' % command)
else:
# Try to get the test case name by looking at the program arguments.
# i.e. Chromium ui_tests used --test-name arg.
# TODO(timurrrr): This doesn't handle "--test-name Test.Name"
# TODO(timurrrr): ui_tests are dead. Where do we use the non-webkit
# wrapper now? browser_tests? What do they do?
f.write('for arg in $@\ndo\n'
' if [[ "$arg" =~ --test-name=(.*) ]]\n then\n'
' echo ${BASH_REMATCH[1]} >$TESTNAME_FILE\n'
' fi\n'
'done\n\n'
'%s "$@"\n' % command)
f.close()
os.chmod(indirect_fname, stat.S_IRUSR|stat.S_IXUSR)
return indirect_fname
def CreateAnalyzer(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def GetAnalyzeResults(self, check_sanity=False):
# Glob all the files in the log directory
filenames = glob.glob(self.log_dir + "/" + self.ToolName() + ".*")
# If we have browser wrapper, the logfiles are named as
# "toolname.wrapper_PID.valgrind_PID".
# Let's extract the list of wrapper_PIDs and name it ppids
ppids = set([int(f.split(".")[-2]) \
for f in filenames if re.search("\.[0-9]+\.[0-9]+$", f)])
analyzer = self.CreateAnalyzer()
if len(ppids) == 0:
# Fast path - no browser wrapper was set.
return analyzer.Report(filenames, None, check_sanity)
ret = 0
for ppid in ppids:
testcase_name = None
try:
f = open(self.log_dir + ("/testcase.%d.name" % ppid))
testcase_name = f.read().strip()
f.close()
wk_layout_prefix="third_party/WebKit/LayoutTests/"
wk_prefix_at = testcase_name.rfind(wk_layout_prefix)
if wk_prefix_at != -1:
testcase_name = testcase_name[wk_prefix_at + len(wk_layout_prefix):]
except IOError:
pass
print "====================================================="
print " Below is the report for valgrind wrapper PID=%d." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
print " You can find the corresponding test"
print " by searching the above log for 'PID=%d'" % ppid
sys.stdout.flush()
ppid_filenames = [f for f in filenames \
if re.search("\.%d\.[0-9]+$" % ppid, f)]
# check_sanity won't work with browser wrappers
assert check_sanity == False
ret |= analyzer.Report(ppid_filenames, testcase_name)
print "====================================================="
sys.stdout.flush()
if ret != 0:
print ""
print "The Valgrind reports are grouped by test names."
print "Each test has its PID printed in the log when the test was run"
print "and at the beginning of its Valgrind report."
print "Hint: you can search for the reports by Ctrl+F -> `=#`"
sys.stdout.flush()
return ret
# TODO(timurrrr): Split into a separate file.
class Memcheck(ValgrindTool):
"""Memcheck
Dynamic memory error detector for Linux & Mac
http://valgrind.org/info/tools.html#memcheck
"""
def __init__(self):
super(Memcheck, self).__init__()
self.RegisterOptionParserHook(Memcheck.ExtendOptionParser)
def ToolName(self):
return "memcheck"
def ExtendOptionParser(self, parser):
parser.add_option("--leak-check", "--leak_check", type="string",
default="yes", # --leak-check=yes is equivalent of =full
help="perform leak checking at the end of the run")
parser.add_option("", "--show_all_leaks", action="store_true",
default=False,
help="also show less blatant leaks")
parser.add_option("", "--track_origins", action="store_true",
default=False,
help="Show whence uninitialized bytes came. 30% slower.")
def ToolSpecificFlags(self):
ret = ["--gen-suppressions=all", "--demangle=no"]
ret += ["--leak-check=%s" % self._options.leak_check]
if self._options.show_all_leaks:
ret += ["--show-reachable=yes"]
else:
ret += ["--show-possibly-lost=no"]
if self._options.track_origins:
ret += ["--track-origins=yes"]
# TODO(glider): this is a temporary workaround for http://crbug.com/51716
# Let's see whether it helps.
if common.IsMac():
ret += ["--smc-check=all"]
return ret
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return memcheck_analyze.MemcheckAnalyzer(self._source_dir,
self._options.show_all_leaks,
use_gdb=use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-valgrind for the info on Memcheck/Valgrind")
return ret
class PinTool(BaseTool):
"""Abstract class for running PIN tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def PrepareForTest(self):
pass
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def ToolCommand(self):
"""Get the PIN command to run."""
# Construct the PIN command.
pin_cmd = os.getenv("PIN_COMMAND")
if not pin_cmd:
raise RuntimeError, "Please set PIN_COMMAND environment variable " \
"with the path to pin.exe"
proc = pin_cmd.split(" ")
proc += self.ToolSpecificFlags()
# The PIN command is constructed.
# PIN requires -- to separate PIN flags from the executable name.
# self._args begins with the exe to be run.
proc += ["--"]
proc += self._args
return proc
class ThreadSanitizerBase(object):
"""ThreadSanitizer
Dynamic data race detector for Linux, Mac and Windows.
http://code.google.com/p/data-race-test/wiki/ThreadSanitizer
Since TSan works on both Valgrind (Linux, Mac) and PIN (Windows), we need
to have multiple inheritance
"""
INFO_MESSAGE="Please see http://dev.chromium.org/developers/how-tos/" \
"using-valgrind/threadsanitizer for the info on " \
"ThreadSanitizer"
def __init__(self):
super(ThreadSanitizerBase, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerBase.ExtendOptionParser)
def ToolName(self):
return "tsan"
def UseXML(self):
return False
def SelfContained(self):
return True
def ExtendOptionParser(self, parser):
parser.add_option("", "--hybrid", default="no",
dest="hybrid",
help="Finds more data races, may give false positive "
"reports unless the code is annotated")
parser.add_option("", "--announce-threads", default="yes",
dest="announce_threads",
help="Show the the stack traces of thread creation")
parser.add_option("", "--free-is-write", default="no",
dest="free_is_write",
help="Treat free()/operator delete as memory write. "
"This helps finding more data races, but (currently) "
"this may give false positive reports on std::string "
"internals, see http://code.google.com/p/data-race-test"
"/issues/detail?id=40")
def EvalBoolFlag(self, flag_value):
if (flag_value in ["1", "true", "yes"]):
return True
elif (flag_value in ["0", "false", "no"]):
return False
raise RuntimeError, "Can't parse flag value (%s)" % flag_value
def ToolSpecificFlags(self):
ret = []
ignore_files = ["ignores.txt"]
for platform_suffix in common.PlatformNames():
ignore_files.append("ignores_%s.txt" % platform_suffix)
for ignore_file in ignore_files:
fullname = os.path.join(self._source_dir,
"tools", "valgrind", "tsan", ignore_file)
if os.path.exists(fullname):
fullname = common.NormalizeWindowsPath(fullname)
ret += ["--ignore=%s" % fullname]
# This should shorten filepaths for local builds.
ret += ["--file-prefix-to-cut=%s/" % self._source_dir]
# This should shorten filepaths on bots.
ret += ["--file-prefix-to-cut=build/src/"]
ret += ["--file-prefix-to-cut=out/Release/../../"]
# This should shorten filepaths for functions intercepted in TSan.
ret += ["--file-prefix-to-cut=scripts/tsan/tsan/"]
ret += ["--file-prefix-to-cut=src/tsan/tsan/"]
ret += ["--gen-suppressions=true"]
if self.EvalBoolFlag(self._options.hybrid):
ret += ["--hybrid=yes"] # "no" is the default value for TSAN
if self.EvalBoolFlag(self._options.announce_threads):
ret += ["--announce-threads"]
if self.EvalBoolFlag(self._options.free_is_write):
ret += ["--free-is-write=yes"]
else:
ret += ["--free-is-write=no"]
# --show-pc flag is needed for parsing the error logs on Darwin.
if platform_suffix == 'mac':
ret += ["--show-pc=yes"]
ret += ["--show-pid=no"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
for bc in boring_callers:
ret += ["--cut_stack_below=%s" % bc]
return ret
class ThreadSanitizerPosix(ThreadSanitizerBase, ValgrindTool):
def ToolSpecificFlags(self):
proc = ThreadSanitizerBase.ToolSpecificFlags(self)
# The -v flag is needed for printing the list of used suppressions and
# obtaining addresses for loaded shared libraries on Mac.
proc += ["-v"]
return proc
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return tsan_analyze.TsanAnalyzer(use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info(self.INFO_MESSAGE)
return ret
class ThreadSanitizerWindows(ThreadSanitizerBase, PinTool):
def __init__(self):
super(ThreadSanitizerWindows, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerWindows.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to TSan suppression file")
def ToolSpecificFlags(self):
add_env = {
"CHROME_ALLOCATOR" : "WINHEAP",
}
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
proc = ThreadSanitizerBase.ToolSpecificFlags(self)
# On PIN, ThreadSanitizer has its own suppression mechanism
# and --log-file flag which work exactly on Valgrind.
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
suppression_file = common.NormalizeWindowsPath(suppression_file)
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + "/tsan.%p"
proc += ["--log-file=" + common.NormalizeWindowsPath(logfilename)]
# TODO(timurrrr): Add flags for Valgrind trace children analog when we
# start running complex tests (e.g. UI) under TSan/Win.
return proc
def Analyze(self, check_sanity=False):
filenames = glob.glob(self.log_dir + "/tsan.*")
analyzer = tsan_analyze.TsanAnalyzer()
ret = analyzer.Report(filenames, None, check_sanity)
if ret != 0:
logging.info(self.INFO_MESSAGE)
return ret
class DrMemory(BaseTool):
"""Dr.Memory
Dynamic memory error detector for Windows.
http://dev.chromium.org/developers/how-tos/using-drmemory
It is not very mature at the moment, some things might not work properly.
"""
def __init__(self, full_mode, pattern_mode):
super(DrMemory, self).__init__()
self.full_mode = full_mode
self.pattern_mode = pattern_mode
self.RegisterOptionParserHook(DrMemory.ExtendOptionParser)
def ToolName(self):
return "drmemory"
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a drmemory suppression file")
parser.add_option("", "--follow_python", action="store_true",
default=False, dest="follow_python",
help="Monitor python child processes. If off, neither "
"python children nor any children of python children "
"will be monitored.")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running Dr. Memory directly on the harness")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running valgrind "
"directly.")
parser.add_option("", "--use_debug", action="store_true",
default=False, dest="use_debug",
help="Run Dr. Memory debug build")
parser.add_option("", "--trace_children", action="store_true",
default=True,
help="TODO: default value differs from Valgrind")
def ToolCommand(self):
"""Get the tool command to run."""
# WINHEAP is what Dr. Memory supports as there are issues w/ both
# jemalloc (http://code.google.com/p/drmemory/issues/detail?id=320) and
# tcmalloc (http://code.google.com/p/drmemory/issues/detail?id=314)
add_env = {
"CHROME_ALLOCATOR" : "WINHEAP",
"JSIMD_FORCEMMX" : "1", # http://code.google.com/p/drmemory/issues/detail?id=540
}
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
drmem_cmd = os.getenv("DRMEMORY_COMMAND")
if not drmem_cmd:
raise RuntimeError, "Please set DRMEMORY_COMMAND environment variable " \
"with the path to drmemory.exe"
proc = drmem_cmd.split(" ")
# By default, don't run python (this will exclude python's children as well)
# to reduce runtime. We're not really interested in spending time finding
# bugs in the python implementation.
# With file-based config we must update the file every time, and
# it will affect simultaneous drmem uses by this user. While file-based
# config has many advantages, here we may want this-instance-only
# (http://code.google.com/p/drmemory/issues/detail?id=334).
drconfig_cmd = [ proc[0].replace("drmemory.exe", "drconfig.exe") ]
drconfig_cmd += ["-quiet"] # suppress errors about no 64-bit libs
run_drconfig = True
if self._options.follow_python:
logging.info("Following python children")
# -unreg fails if not already registered so query for that first
query_cmd = drconfig_cmd + ["-isreg", "python.exe"]
query_proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE,
shell=True)
(query_out, query_err) = query_proc.communicate()
if re.search("exe not registered", query_out):
run_drconfig = False # all set
else:
drconfig_cmd += ["-unreg", "python.exe"]
else:
logging.info("Excluding python children")
drconfig_cmd += ["-reg", "python.exe", "-norun"]
if run_drconfig:
drconfig_retcode = common.RunSubprocess(drconfig_cmd, self._timeout)
if drconfig_retcode:
logging.error("Configuring whether to follow python children failed " \
"with %d.", drconfig_retcode)
raise RuntimeError, "Configuring python children failed "
suppression_count = 0
supp_files = self._options.suppressions
if self.full_mode:
supp_files += [s.replace(".txt", "_full.txt") for s in supp_files]
for suppression_file in supp_files:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["-suppress", common.NormalizeWindowsPath(suppression_file)]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
# Un-comment to dump Dr.Memory events on error
#proc += ["-dr_ops", "-dumpcore_mask", "-dr_ops", "0x8bff"]
# Un-comment and comment next line to debug Dr.Memory
#proc += ["-dr_ops", "-no_hide"]
#proc += ["-dr_ops", "-msgbox_mask", "-dr_ops", "15"]
#Proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "15"]
# Ensure we see messages about Dr. Memory crashing!
proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "12"]
if self._options.use_debug:
proc += ["-debug"]
proc += ["-logdir", common.NormalizeWindowsPath(self.log_dir)]
if self.log_parent_dir:
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
symcache_dir = os.path.join(self.log_parent_dir, "drmemory.symcache")
elif self._options.build_dir:
# The other case is only possible with -t cmdline.
# Anyways, if we omit -symcache_dir the -logdir's value is used which
# should be fine.
symcache_dir = os.path.join(self._options.build_dir, "drmemory.symcache")
if symcache_dir:
if not os.path.exists(symcache_dir):
try:
os.mkdir(symcache_dir)
except OSError:
logging.warning("Can't create symcache dir?")
if os.path.exists(symcache_dir):
proc += ["-symcache_dir", common.NormalizeWindowsPath(symcache_dir)]
# Use -no_summary to suppress DrMemory's summary and init-time
# notifications. We generate our own with drmemory_analyze.py.
proc += ["-batch", "-no_summary"]
# Un-comment to disable interleaved output. Will also suppress error
# messages normally printed to stderr.
#proc += ["-quiet", "-no_results_to_stderr"]
proc += ["-callstack_max_frames", "40"]
# disable leak scan for now
proc += ["-no_count_leaks", "-no_leak_scan"]
# make callstacks easier to read
proc += ["-callstack_srcfile_prefix",
"build\\src,chromium\\src,crt_build\\self_x86"]
proc += ["-callstack_modname_hide",
"*drmemory*,chrome.dll"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
proc += ["-callstack_truncate_below", ",".join(boring_callers)]
if self.pattern_mode:
proc += ["-pattern", "0xf1fd", "-no_count_leaks", "-redzone_size", "0x20"]
elif not self.full_mode:
proc += ["-light"]
proc += self._tool_flags
# DrM i#850/851: The new -callstack_use_top_fp_selectively has bugs.
proc += ["-no_callstack_use_top_fp_selectively"]
# Dr.Memory requires -- to separate tool flags from the executable name.
proc += ["--"]
if self._options.indirect or self._options.indirect_webkit_layout:
# TODO(timurrrr): reuse for TSan on Windows
wrapper_path = os.path.join(self._source_dir,
"tools", "valgrind", "browser_wrapper_win.py")
wrapper = " ".join(["python", wrapper_path] + proc)
self.CreateBrowserWrapper(wrapper)
logging.info("browser wrapper = " + " ".join(proc))
if self._options.indirect_webkit_layout:
proc = self._args
# Layout tests want forward slashes.
wrapper = wrapper.replace('\\', '/')
proc += ["--wrapper", wrapper]
return proc
else:
proc = []
# Note that self._args begins with the name of the exe to be run.
self._args[0] = common.NormalizeWindowsPath(self._args[0])
proc += self._args
return proc
def CreateBrowserWrapper(self, command):
os.putenv("BROWSER_WRAPPER", command)
def Analyze(self, check_sanity=False):
# Use one analyzer for all the log files to avoid printing duplicate reports
#
# TODO(timurrrr): unify this with Valgrind and other tools when we have
# http://code.google.com/p/drmemory/issues/detail?id=684
analyzer = drmemory_analyze.DrMemoryAnalyzer()
ret = 0
if not self._options.indirect and not self._options.indirect_webkit_layout:
filenames = glob.glob(self.log_dir + "/*/results.txt")
ret = analyzer.Report(filenames, None, check_sanity)
else:
testcases = glob.glob(self.log_dir + "/testcase.*.logs")
# If we have browser wrapper, the per-test logdirs are named as
# "testcase.wrapper_PID.name".
# Let's extract the list of wrapper_PIDs and name it ppids.
# NOTE: ppids may contain '_', i.e. they are not ints!
ppids = set([f.split(".")[-2] for f in testcases])
for ppid in ppids:
testcase_name = None
try:
f = open("%s/testcase.%s.name" % (self.log_dir, ppid))
testcase_name = f.read().strip()
f.close()
except IOError:
pass
print "====================================================="
print " Below is the report for drmemory wrapper PID=%s." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
# TODO(timurrrr): hm, the PID line is suppressed on Windows...
print " You can find the corresponding test"
print " by searching the above log for 'PID=%s'" % ppid
sys.stdout.flush()
ppid_filenames = glob.glob("%s/testcase.%s.logs/*/results.txt" %
(self.log_dir, ppid))
ret |= analyzer.Report(ppid_filenames, testcase_name, False)
print "====================================================="
sys.stdout.flush()
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-drmemory for the info on Dr. Memory")
return ret
# RaceVerifier support. See
# http://code.google.com/p/data-race-test/wiki/RaceVerifier for more details.
class ThreadSanitizerRV1Analyzer(tsan_analyze.TsanAnalyzer):
""" TsanAnalyzer that saves race reports to a file. """
TMP_FILE = "rvlog.tmp"
def __init__(self, source_dir, use_gdb):
super(ThreadSanitizerRV1Analyzer, self).__init__(use_gdb)
self.out = open(self.TMP_FILE, "w")
def Report(self, files, testcase, check_sanity=False):
reports = self.GetReports(files)
for report in reports:
print >>self.out, report
if len(reports) > 0:
logging.info("RaceVerifier pass 1 of 2, found %i reports" % len(reports))
return -1
return 0
def CloseOutputFile(self):
self.out.close()
class ThreadSanitizerRV1Mixin(object):
"""RaceVerifier first pass.
Runs ThreadSanitizer as usual, but hides race reports and collects them in a
temporary file"""
def __init__(self):
super(ThreadSanitizerRV1Mixin, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerRV1Mixin.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.set_defaults(hybrid="yes")
def CreateAnalyzer(self):
use_gdb = common.IsMac()
self.analyzer = ThreadSanitizerRV1Analyzer(self._source_dir, use_gdb)
return self.analyzer
def Cleanup(self):
super(ThreadSanitizerRV1Mixin, self).Cleanup()
self.analyzer.CloseOutputFile()
class ThreadSanitizerRV2Mixin(object):
"""RaceVerifier second pass."""
def __init__(self):
super(ThreadSanitizerRV2Mixin, self).__init__()
self.RegisterOptionParserHook(ThreadSanitizerRV2Mixin.ExtendOptionParser)
def ExtendOptionParser(self, parser):
parser.add_option("", "--race-verifier-sleep-ms",
dest="race_verifier_sleep_ms", default=10,
help="duration of RaceVerifier delays")
def ToolSpecificFlags(self):
proc = super(ThreadSanitizerRV2Mixin, self).ToolSpecificFlags()
proc += ['--race-verifier=%s' % ThreadSanitizerRV1Analyzer.TMP_FILE,
'--race-verifier-sleep-ms=%d' %
int(self._options.race_verifier_sleep_ms)]
return proc
def Cleanup(self):
super(ThreadSanitizerRV2Mixin, self).Cleanup()
os.unlink(ThreadSanitizerRV1Analyzer.TMP_FILE)
class ThreadSanitizerRV1Posix(ThreadSanitizerRV1Mixin, ThreadSanitizerPosix):
pass
class ThreadSanitizerRV2Posix(ThreadSanitizerRV2Mixin, ThreadSanitizerPosix):
pass
class ThreadSanitizerRV1Windows(ThreadSanitizerRV1Mixin,
ThreadSanitizerWindows):
pass
class ThreadSanitizerRV2Windows(ThreadSanitizerRV2Mixin,
ThreadSanitizerWindows):
pass
class RaceVerifier(object):
"""Runs tests under RaceVerifier/Valgrind."""
MORE_INFO_URL = "http://code.google.com/p/data-race-test/wiki/RaceVerifier"
def RV1Factory(self):
if common.IsWindows():
return ThreadSanitizerRV1Windows()
else:
return ThreadSanitizerRV1Posix()
def RV2Factory(self):
if common.IsWindows():
return ThreadSanitizerRV2Windows()
else:
return ThreadSanitizerRV2Posix()
def ToolName(self):
return "tsan"
def Main(self, args, check_sanity, min_runtime_in_seconds):
logging.info("Running a TSan + RaceVerifier test. For more information, " +
"see " + self.MORE_INFO_URL)
cmd1 = self.RV1Factory()
ret = cmd1.Main(args, check_sanity, min_runtime_in_seconds)
# Verify race reports, if there are any.
if ret == -1:
logging.info("Starting pass 2 of 2. Running the same binary in " +
"RaceVerifier mode to confirm possible race reports.")
logging.info("For more information, see " + self.MORE_INFO_URL)
cmd2 = self.RV2Factory()
ret = cmd2.Main(args, check_sanity, min_runtime_in_seconds)
else:
logging.info("No reports, skipping RaceVerifier second pass")
logging.info("Please see " + self.MORE_INFO_URL + " for more information " +
"on RaceVerifier")
return ret
def Run(self, args, module, min_runtime_in_seconds=0):
return self.Main(args, False, min_runtime_in_seconds)
class EmbeddedTool(BaseTool):
"""Abstract class for tools embedded directly into the test binary.
"""
# TODO(glider): need to override Execute() and support process chaining here.
def ToolCommand(self):
# In the simplest case just the args of the script.
return self._args
class Asan(EmbeddedTool):
"""AddressSanitizer, a memory error detector.
More information at
http://dev.chromium.org/developers/testing/addresssanitizer
"""
def __init__(self):
super(Asan, self).__init__()
self._timeout = 1200
if common.IsMac():
self._env["DYLD_NO_PIE"] = "1"
def ToolName(self):
return "asan"
def ToolCommand(self):
# TODO(glider): use pipes instead of the ugly wrapper here once they
# are supported.
procs = [os.path.join(self._source_dir, "tools", "valgrind",
"asan", "asan_wrapper.sh")]
procs.extend(self._args)
return procs
def Analyze(sels, unused_check_sanity):
return 0
class ToolFactory:
def Create(self, tool_name):
if tool_name == "memcheck":
return Memcheck()
if tool_name == "tsan":
if common.IsWindows():
return ThreadSanitizerWindows()
else:
return ThreadSanitizerPosix()
if tool_name == "drmemory" or tool_name == "drmemory_light":
# TODO(timurrrr): remove support for "drmemory" when buildbots are
# switched to drmemory_light OR make drmemory==drmemory_full the default
# mode when the tool is mature enough.
return DrMemory(False, False)
if tool_name == "drmemory_full":
return DrMemory(True, False)
if tool_name == "drmemory_pattern":
return DrMemory(False, True)
if tool_name == "tsan_rv":
return RaceVerifier()
if tool_name == "asan":
return Asan()
try:
platform_name = common.PlatformNames()[0]
except common.NotImplementedError:
platform_name = sys.platform + "(Unknown)"
raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
platform_name)
def CreateTool(tool):
return ToolFactory().Create(tool)
|
bsd-3-clause
| 3,513,879,468,446,510,000 | 776,321,861,119,581,400 | 36.260729 | 89 | 0.61988 | false |
QuantScientist/Deep-Learning-Boot-Camp
|
Kaggle-PyTorch/PyTorch-Ensembler/nnmodels/simplenet.py
|
1
|
3522
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import math
use_gpu = torch.cuda.is_available()
# class SimpleNet(nn.Module):
# def __init__(self, num_classes=1, n_dim=3):
# super(SimpleNet, self).__init__()
# self.conv1 = nn.Conv2d(n_dim, 32, 3, stride=1)
# self.conv2 = nn.Conv2d(32, 32, kernel_size=3)
#
# self.conv3 = nn.Conv2d(32, 64, kernel_size=3)
# self.conv4 = nn.Conv2d(64, 64, kernel_size=3)
# self.dense1 = nn.Linear(179776, out_features=512)
# self.dense1_bn = nn.BatchNorm1d(512)
# self.dense2 = nn.Linear(512, (num_classes))
#
# def forward(self, x):
# x = F.relu(self.conv1(x))
# x = F.relu(F.dropout(F.max_pool2d(self.conv2(x), 2), 0.25))
# x = F.relu(self.conv3(x))
# x = F.relu(F.dropout(F.max_pool2d(self.conv4(x), 2), 0.25))
# x = x.view(x.size(0), -1)
# # print (x.data.shape)
# x = F.relu(self.dense1_bn(self.dense1(x)))
# x = x.view(x.size(0), -1)
# # print (x.data.shape)
# x = self.dense2(x)
#
# return x
dropout = torch.nn.Dropout(p=0.30)
relu=torch.nn.LeakyReLU()
pool = nn.MaxPool2d(2, 2)
class ConvRes(nn.Module):
def __init__(self, insize, outsize):
super(ConvRes, self).__init__()
drate = .3
self.math = nn.Sequential(
nn.BatchNorm2d(insize),
nn.Dropout(drate),
torch.nn.Conv2d(insize, outsize, kernel_size=2, padding=2),
nn.PReLU(),
)
def forward(self, x):
return self.math(x)
class ConvCNN(nn.Module):
def __init__(self, insize, outsize, kernel_size=7, padding=2, pool=2, avg=True):
super(ConvCNN, self).__init__()
self.avg = avg
self.math = torch.nn.Sequential(
torch.nn.Conv2d(insize, outsize, kernel_size=kernel_size, padding=padding),
torch.nn.BatchNorm2d(outsize),
torch.nn.LeakyReLU(),
torch.nn.MaxPool2d(pool, pool),
)
self.avgpool = torch.nn.AvgPool2d(pool, pool)
def forward(self, x):
x = self.math(x)
if self.avg is True:
x = self.avgpool(x)
return x
class SimpleNet(nn.Module):
def __init__(self,num_classes, n_dim):
super(SimpleNet, self).__init__()
self.num_classes=num_classes
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cnn1 = ConvCNN (n_dim,32, kernel_size=7, pool=4, avg=False)
self.cnn2 = ConvCNN (32,32, kernel_size=5, pool=2, avg=True)
self.cnn3 = ConvCNN (32,32, kernel_size=5, pool=2, avg=True)
self.res1 = ConvRes (32,64)
self.features = nn.Sequential(
self.cnn1, dropout,
self.cnn2,
self.cnn3,
self.res1,
)
self.classifier = torch.nn.Sequential(
nn.Linear(1024, (num_classes)),
)
self.sig=nn.Sigmoid()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
# print (x.data.shape)
x = self.classifier(x)
if (self.num_classes == 1):
x = self.sig(x)
return x
# return F.log_softmax(x)
def simpleXX_generic(num_classes, imgDim):
# depth, num_classes = 1, widen_factor = 1, dropRate = 0.0
model = SimpleNet(num_classes=num_classes, n_dim=imgDim) # 56
return model
|
mit
| 470,975,649,692,065,660 | 8,812,469,660,230,703,000 | 29.102564 | 87 | 0.561897 | false |
showerst/openstates
|
openstates/ma/bills.py
|
1
|
5391
|
import re
import time
import itertools
from datetime import datetime
import lxml.html
from billy.scrape.bills import BillScraper, Bill
from .actions import Categorizer
class MABillScraper(BillScraper):
jurisdiction = 'ma'
categorizer = Categorizer()
def __init__(self, *args, **kwargs):
super(MABillScraper, self).__init__(*args, **kwargs)
# forcing these values so that 500s come back as skipped bills
# self.retry_attempts = 0
self.raise_errors = False
def scrape(self, chamber, session):
# for the chamber of the action
chamber_map = {'House': 'lower', 'Senate': 'upper', 'Joint': 'joint',
'Governor': 'executive'}
session_slug = session[:-2]
chamber_slug = 'House' if chamber == 'lower' else 'Senate'
# keep track of how many we've had to skip
skipped = 0
for n in itertools.count(1):
bill_id = '%s%d' % (chamber_slug[0], n)
bill_url = 'http://www.malegislature.gov/Bills/%s/%s/%s' % (
session_slug, chamber_slug, bill_id)
# lets assume if 10 bills are missing we're done
if skipped == 10:
break
html = self.get(bill_url, verify=False).text
if 'Unable to find the Bill' in html:
self.warning('skipping %s' % bill_url)
skipped += 1
continue
# sometimes the site breaks, missing vital data
if 'billShortDesc' not in html:
self.warning('truncated page on %s' % bill_url)
time.sleep(1)
html = self.get(bill_url, verify=False).text
if 'billShortDesc' not in html:
self.warning('skipping %s' % bill_url)
skipped += 1
continue
else:
skipped = 0
else:
skipped = 0
doc = lxml.html.fromstring(html)
doc.make_links_absolute('http://www.malegislature.gov/')
title = doc.xpath('//h2/span/text()')[0].strip()
desc = doc.xpath('//p[@class="billShortDesc"]/text()')[0]
#for resoltions that do not always have a typical title
if (title == ''):
title = doc.xpath('//*[@id="billDetail"]/div[2]/p')[0].text_content().strip()
# create bill
bill = Bill(session, chamber, bill_id, title, summary=desc)
bill.add_source(bill_url)
# actions
for act_row in doc.xpath('//tbody[@class="bgwht"]/tr'):
date = act_row.xpath('./td[@headers="bDate"]/text()')[0]
date = datetime.strptime(date, "%m/%d/%Y")
actor_txt = act_row.xpath('./td[@headers="bBranch"]')[0].text_content().strip()
if actor_txt:
actor = chamber_map[actor_txt]
action = act_row.xpath('./td[@headers="bAction"]')[0].text_content().strip()
# from here (MABillScraper namespace) we import categorizer from actions.py which
# imports categorizer from billy.scrape.actions.BaseCategorizer
attrs = self.categorizer.categorize(action)
bill.add_action(actor, action, date, **attrs)
# I tried to, as I was finding the sponsors, detect whether a
# sponsor was already known. One has to do this because an author
# is listed in the "Sponsors:" section and then the same person
# will be listed with others in the "Petitioners:" section. We are
# guessing that "Sponsors" are authors and "Petitioners" are
# co-authors. Does this make sense?
sponsors = dict((a.get('href'), a.text) for a in
doc.xpath('//p[@class="billReferral"]/a'))
petitioners = dict((a.get('href'), a.text) for a in
doc.xpath('//div[@id="billSummary"]/p[1]/a'))
if len(sponsors) == 0:
spons = doc.xpath('//p[@class="billReferral"]')[0].text_content()
spons = spons.strip()
spons = spons.split("\n")
cspons = []
for s in spons:
if s and s.strip() != "":
cspons.append(s)
sponsors = dict((s, s) for s in cspons)
# remove sponsors from petitioners
for k in sponsors:
petitioners.pop(k, None)
for sponsor in sponsors.values():
if sponsor == 'NONE':
continue
if sponsor is None:
continue
bill.add_sponsor('primary', sponsor)
for petitioner in petitioners.values():
if sponsor == 'NONE':
continue
bill.add_sponsor('cosponsor', petitioner)
bill_text_url = doc.xpath(
'//a[contains(@href, "/Document/Bill/{}/")]/@href'.
format(session_slug))
if bill_text_url:
assert bill_text_url[0].endswith('.pdf'), "Handle other mimetypes"
bill.add_version('Current Text', bill_text_url[0],
mimetype='application/pdf')
self.save_bill(bill)
|
gpl-3.0
| -7,916,427,936,748,685,000 | 4,591,539,113,344,988,700 | 38.065217 | 97 | 0.515674 | false |
peterjoel/servo
|
tests/wpt/web-platform-tests/tools/third_party/html5lib/html5lib/tests/test_encoding.py
|
30
|
4801
|
from __future__ import absolute_import, division, unicode_literals
import os
import pytest
from .support import get_data_files, test_dir, errorMessage, TestData as _TestData
from html5lib import HTMLParser, _inputstream
def test_basic_prescan_length():
data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode('utf-8')
pad = 1024 - len(data) + 1
data = data.replace(b"-a-", b"-" + (b"a" * pad) + b"-")
assert len(data) == 1024 # Sanity
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
assert 'utf-8' == stream.charEncoding[0].name
def test_parser_reparse():
data = "<title>Caf\u00E9</title><!--a--><meta charset='utf-8'>".encode('utf-8')
pad = 10240 - len(data) + 1
data = data.replace(b"-a-", b"-" + (b"a" * pad) + b"-")
assert len(data) == 10240 # Sanity
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
assert 'windows-1252' == stream.charEncoding[0].name
p = HTMLParser(namespaceHTMLElements=False)
doc = p.parse(data, useChardet=False)
assert 'utf-8' == p.documentEncoding
assert doc.find(".//title").text == "Caf\u00E9"
@pytest.mark.parametrize("expected,data,kwargs", [
("utf-16le", b"\xFF\xFE", {"override_encoding": "iso-8859-2"}),
("utf-16be", b"\xFE\xFF", {"override_encoding": "iso-8859-2"}),
("utf-8", b"\xEF\xBB\xBF", {"override_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"override_encoding": "iso-8859-2", "transport_encoding": "iso-8859-3"}),
("iso-8859-2", b"<meta charset=iso-8859-3>", {"transport_encoding": "iso-8859-2"}),
("iso-8859-2", b"<meta charset=iso-8859-2>", {"same_origin_parent_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "iso-8859-2", "likely_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16be", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"same_origin_parent_encoding": "utf-16le", "likely_encoding": "iso-8859-2"}),
("iso-8859-2", b"", {"likely_encoding": "iso-8859-2", "default_encoding": "iso-8859-3"}),
("iso-8859-2", b"", {"default_encoding": "iso-8859-2"}),
("windows-1252", b"", {"default_encoding": "totally-bogus-string"}),
("windows-1252", b"", {}),
])
def test_parser_args(expected, data, kwargs):
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False, **kwargs)
assert expected == stream.charEncoding[0].name
p = HTMLParser()
p.parse(data, useChardet=False, **kwargs)
assert expected == p.documentEncoding
@pytest.mark.parametrize("kwargs", [
{"override_encoding": "iso-8859-2"},
{"override_encoding": None},
{"transport_encoding": "iso-8859-2"},
{"transport_encoding": None},
{"same_origin_parent_encoding": "iso-8859-2"},
{"same_origin_parent_encoding": None},
{"likely_encoding": "iso-8859-2"},
{"likely_encoding": None},
{"default_encoding": "iso-8859-2"},
{"default_encoding": None},
{"foo_encoding": "iso-8859-2"},
{"foo_encoding": None},
])
def test_parser_args_raises(kwargs):
with pytest.raises(TypeError) as exc_info:
p = HTMLParser()
p.parse("", useChardet=False, **kwargs)
assert exc_info.value.args[0].startswith("Cannot set an encoding with a unicode input")
def runParserEncodingTest(data, encoding):
p = HTMLParser()
assert p.documentEncoding is None
p.parse(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
assert encoding == p.documentEncoding, errorMessage(data, encoding, p.documentEncoding)
def runPreScanEncodingTest(data, encoding):
stream = _inputstream.HTMLBinaryInputStream(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
# Very crude way to ignore irrelevant tests
if len(data) > stream.numBytesMeta:
return
assert encoding == stream.charEncoding[0].name, errorMessage(data, encoding, stream.charEncoding[0].name)
def test_encoding():
for filename in get_data_files("encoding"):
tests = _TestData(filename, b"data", encoding=None)
for test in tests:
yield (runParserEncodingTest, test[b'data'], test[b'encoding'])
yield (runPreScanEncodingTest, test[b'data'], test[b'encoding'])
# pylint:disable=wrong-import-position
try:
import chardet # noqa
except ImportError:
print("chardet not found, skipping chardet tests")
else:
def test_chardet():
with open(os.path.join(test_dir, "encoding", "chardet", "test_big5.txt"), "rb") as fp:
encoding = _inputstream.HTMLInputStream(fp.read()).charEncoding
assert encoding[0].name == "big5"
# pylint:enable=wrong-import-position
|
mpl-2.0
| 4,444,106,909,494,047,000 | -9,042,699,342,789,780,000 | 40.387931 | 109 | 0.647573 | false |
Novasoft-India/OperERP-AM-Motors
|
openerp/tools/misc.py
|
16
|
39088
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#.apidoc title: Utilities: tools.misc
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import zipfile
from collections import defaultdict
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
def find_in_path(name):
try:
return which(name)
except IOError:
return None
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
return None
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (prog,) + args
return subprocess.call(args2)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis ([email protected])
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
#----------------------------------------------------------
# SMS
#----------------------------------------------------------
# text must be latin-1 encoded
def sms_send(user, password, api_id, text, to):
import urllib
url = "http://api.urlsms.com/SendSMS.aspx"
#url = "http://196.7.150.220/http/sendmsg"
params = urllib.urlencode({'UserID': user, 'Password': password, 'SenderID': api_id, 'MsgText': text, 'RecipientMobileNo':to})
urllib.urlopen(url+"?"+params)
# FIXME: Use the logger if there is an error
return True
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'ab_RU': u'Abkhazian / аҧсуа',
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BS': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_CA': u'English (CA)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_HN': u'Spanish (HN) / Español (HN)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_NI': u'Spanish (NI) / Español (NI)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PR': u'Spanish (PR) / Español (PR)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_SV': u'Spanish (SV) / Español (SV)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ',
'ja_JP': u'Japanese / 日本語',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'ml_IN': u'Malayalam / മലയാളം',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Flemish (BE) / Vlaams (BE)',
'oc_FR': u'Occitan (FR, post 1500) / Occitan',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'si_LK': u'Sinhalese / සිංහල',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'ur_PK': u'Urdu / اردو',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
'tlh_TLH': u'Klingon',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def extract_zip_file(zip_file, outdirectory):
zf = zipfile.ZipFile(zip_file, 'r')
out = outdirectory
for path in zf.namelist():
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
if not tgt.endswith(os.sep):
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -79,623,774,613,373,680 | -8,918,853,109,202,067,000 | 34.396536 | 133 | 0.583441 | false |
vetu11/piloco
|
telegram/inlinequeryresultcontact.py
|
1
|
3739
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultContact"""
from telegram import InlineQueryResult, InlineKeyboardMarkup, InputMessageContent
class InlineQueryResultContact(InlineQueryResult):
"""Represents a contact with a phone number. By default, this contact will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the contact.
Attributes:
phone_number (str): Contact's phone number.
first_name (str): Contact's first name.
last_name (Optional[str]): Contact's last name.
reply_markup (Optional[:class:`telegram.InlineKeyboardMarkup`]): Inline keyboard attached
to the message.
input_message_content (Optional[:class:`telegram.InputMessageContent`]): Content of the
message to be sent instead of the contact.
thumb_url (Optional[str]): Url of the thumbnail for the result.
thumb_width (Optional[int]): Thumbnail width.
thumb_height (Optional[int]): Thumbnail height.
Args:
id (str):
phone_number (str):
first_name (str):
last_name (Optional[str]):
reply_markup (Optional[:class:`telegram.InlineKeyboardMarkup`]):
input_message_content (Optional[:class:`telegram.InputMessageContent`]):
thumb_url (Optional[str]): Url of the thumbnail for the result.
thumb_width (Optional[int]):
thumb_height (Optional[int]):
**kwargs (dict): Arbitrary keyword arguments.
"""
def __init__(self,
id,
phone_number,
first_name,
last_name=None,
reply_markup=None,
input_message_content=None,
thumb_url=None,
thumb_width=None,
thumb_height=None,
**kwargs):
# Required
super(InlineQueryResultContact, self).__init__('contact', id)
self.phone_number = phone_number
self.first_name = first_name
# Optionals
if last_name:
self.last_name = last_name
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
if thumb_url:
self.thumb_url = thumb_url
if thumb_width:
self.thumb_width = thumb_width
if thumb_height:
self.thumb_height = thumb_height
@staticmethod
def de_json(data, bot):
data = super(InlineQueryResultContact, InlineQueryResultContact).de_json(data, bot)
data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'), bot)
data['input_message_content'] = InputMessageContent.de_json(
data.get('input_message_content'), bot)
return InlineQueryResultContact(**data)
|
gpl-3.0
| -3,099,794,796,993,785,000 | -5,163,298,452,172,347,000 | 39.204301 | 99 | 0.651511 | false |
ddurst/zamboni
|
mkt/features/tests/test_utils_.py
|
6
|
2321
|
from django.test.client import RequestFactory
import mock
from nose.tools import eq_
import mkt.site.tests
from mkt.constants.features import FeatureProfile
from mkt.features.utils import load_feature_profile
class TestLoadFeatureProfile(mkt.site.tests.TestCase):
def setUp(self):
super(TestLoadFeatureProfile, self).setUp()
self.profile = FeatureProfile(apps=True)
self.signature = self.profile.to_signature()
def test_does_nothing_on_desktop(self):
request = RequestFactory().get('/?dev=desktop&pro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile, None)
def test_does_nothing_without_dev_param(self):
request = RequestFactory().get('/?pro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile, None)
request = RequestFactory().get(
'/?device=mobilepro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile, None)
def test_does_nothing_without_profile_signature(self):
request = RequestFactory().get('/?dev=firefoxos')
load_feature_profile(request)
eq_(request.feature_profile, None)
def test_does_nothing_if_invalid_profile_signature_is_passed(self):
request = RequestFactory().get('/?dev=firefoxos&pro=whatever')
load_feature_profile(request)
eq_(request.feature_profile, None)
def test_works(self):
request = RequestFactory().get(
'/?dev=firefoxos&pro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile.to_list(), self.profile.to_list())
@mock.patch('mkt.features.utils.FeatureProfile.from_signature')
def test_caching_on_request_property(self, from_signature_mock):
fake_feature_profile = object()
from_signature_mock.return_value = fake_feature_profile
request = RequestFactory().get(
'/?dev=firefoxos&pro=%s' % self.signature)
load_feature_profile(request)
eq_(request.feature_profile, fake_feature_profile)
from_signature_mock.return_value = None
load_feature_profile(request)
# Should not be None thanks to the property caching.
eq_(request.feature_profile, fake_feature_profile)
|
bsd-3-clause
| -1,524,312,343,083,348,000 | 3,779,511,126,360,757,000 | 38.338983 | 79 | 0.675571 | false |
RanadeepPolavarapu/kuma
|
vendor/packages/pygments/lexers/php.py
|
72
|
9769
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.php
~~~~~~~~~~~~~~~~~~~
Lexers for PHP and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['ZephirLexer', 'PhpLexer']
class ZephirLexer(RegexLexer):
"""
For `Zephir language <http://zephir-lang.com/>`_ source code.
Zephir is a compiled high level language aimed
to the creation of C-extensions for PHP.
.. versionadded:: 2.0
"""
name = 'Zephir'
aliases = ['zephir']
filenames = ['*.zep']
zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
zephir_type = ['bit', 'bits', 'string']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
r'empty)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][\w\\]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_php_builtins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._php_builtins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<([\'"]?)(' + _ident_inner + r')\1\n.*?\n\s*\2;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)(' + _ident_inner + ')',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/@-]+', Operator),
(r'\?', Operator), # don't add to the charclass above!
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)(' + _ident_inner + ')',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait|yield|'
r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+' + _ident_inner + '\}', Name.Variable),
(r'\$+' + _ident_inner, Name.Variable),
(_ident_inner, Name.Other),
(r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float),
(r'\d+e[+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0x[a-f0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'0b[01]+', Number.Bin),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(_ident_inner, Name.Class, '#pop')
],
'functionname': [
(_ident_inner, Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
(r'\$' + _ident_inner + '(\[\S+?\]|->' + _ident_inner + ')?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._php_builtins import MODULES
for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
return rv
|
mpl-2.0
| -7,764,422,802,782,343,000 | 6,680,880,437,553,900,000 | 38.873469 | 89 | 0.509776 | false |
windofthesky/ansible
|
contrib/inventory/spacewalk.py
|
24
|
8569
|
#!/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.9.2 and spacewalk 2.3
"""
#
# Author:: Jon Miller <[email protected]>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# Extended for support of multiple organizations and
# adding the "_meta" dictionary to --list output by
# Bernhard Lichtinger <[email protected]> 2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import time
from optparse import OptionParser
import subprocess
import ConfigParser
try:
import json
except:
import simplejson as json
base_dir = os.path.dirname(os.path.realpath(__file__))
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
INI_FILE = os.path.join(base_dir, "spacewalk.ini")
# Sanity check
if not os.path.exists(SW_REPORT):
print >> sys.stderr, 'Error: %s is required for operation.' % (SW_REPORT)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 2775)
# Helper functions
#------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = [ 'spacewalk_' + key for key in keys ]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
#------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
parser.add_option('-o', '--org', default=None, dest="org_number",
help="Limit to spacewalk organization number")
parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
help="Prefix the group name with the organization number")
(options, args) = parser.parse_args()
# read spacewalk.ini if present
#------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk' , 'cache_age'):
CACHE_AGE = config.get('spacewalk' , 'cache_age')
if not options.org_number and config.has_option('spacewalk' , 'org_number'):
options.org_number = config.get('spacewalk' , 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk' , 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk' , 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
#------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s system-groups": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
# List out the known server from Spacewalk
#------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() )
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s inventory": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
groups = {}
meta = { "hostvars" : {} }
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[ system['spacewalk_group_id'] ]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
prefix = org_id + "-"
group_name = prefix + system['spacewalk_group_name']
else:
group_name = system['spacewalk_group_name']
# if we are limited to one organization:
if options.org_number:
if org_id == options.org_number:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s system-groups-systems": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
if options.human:
for group, systems in groups.iteritems():
print '[%s]\n%s\n' % (group, '\n'.join(systems))
else:
final = dict( [ (k, list(s)) for k, s in groups.iteritems() ] )
final["_meta"] = meta
print json.dumps( final )
#print json.dumps(groups)
sys.exit(0)
# Return a details information concerning the spacewalk server
#------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['spacewalk_hostname'] == options.host:
host_details = system
break
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s inventory": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
if options.human:
print 'Host: %s' % options.host
for k, v in host_details.iteritems():
print ' %s: %s' % (k, '\n '.join(v.split(';')))
else:
print json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) )
sys.exit(0)
else:
parser.print_help()
sys.exit(1)
|
gpl-3.0
| -8,757,482,710,578,416,000 | -3,717,878,608,452,711,400 | 36.095238 | 152 | 0.617225 | false |
failys/CAIRIS
|
cairis/test/test_TraceAPI.py
|
1
|
4597
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
if (sys.version_info > (3,)):
from urllib.parse import quote
else:
from urllib import quote
from io import StringIO
import os
import jsonpickle
from cairis.core.Trace import Trace
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.mio.ModelImport import importModelFile
from cairis.tools.JsonConverter import json_deserialize
import os
__author__ = 'Shamal Faily'
class TraceAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
def setUp(self):
self.logger = logging.getLogger(__name__)
self.new_tr = Trace(
fObjt = 'requirement',
fName = 'Dataset policy',
tObjt = 'vulnerability',
tName = 'Certificate ubiquity',
lbl = 'supports')
self.new_tr_dict = {
'session_id' : 'test',
'object': self.new_tr
}
def test_get_trace_dimensions(self):
method = 'test_get_trace_dimensions'
url = '/api/traces/dimensions/requirement/is_from/1?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
fromDims = jsonpickle.decode(responseData)
self.assertIsNotNone(fromDims, 'No results after deserialization')
self.logger.info('[%s] Traces found: %d', method, len(fromDims))
self.assertEqual(len(fromDims),6)
url = '/api/traces/dimensions/requirement/is_from/0?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
toDims = jsonpickle.decode(responseData)
self.assertIsNotNone(toDims, 'No results after deserialization')
self.logger.info('[%s] Traces found: %d', method, len(toDims))
self.assertEqual(len(toDims),2)
def test_get_all(self):
method = 'test_get_traces'
url = '/api/traces/environment/Psychosis?session_id=test'
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.get(url)
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
trs = jsonpickle.decode(responseData)
self.assertIsNotNone(trs, 'No results after deserialization')
self.logger.info('[%s] Traces found: %d', method, len(trs))
self.assertEqual(len(trs),2)
def test_post(self):
method = 'test_post_new'
rv = self.app.post('/api/traces', content_type='application/json', data=jsonpickle.encode(self.new_tr_dict))
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg, 'Dataset policy / Certificate ubiquity created')
def test_delete(self):
method = 'test_delete'
rv = self.app.delete('/api/traces/from_type/requirement/from_name/Dataset%20policy/to_type/vulnerability/to_name/Certificate%20ubiquity?session_id=test', content_type='application/json')
if (sys.version_info > (3,)):
responseData = rv.data.decode('utf-8')
else:
responseData = rv.data
self.logger.debug('[%s] Response data: %s', method, responseData)
json_resp = json_deserialize(responseData)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg, 'Dataset policy / Certificate ubiquity deleted')
|
apache-2.0
| -7,420,382,674,511,554,000 | 8,655,199,403,645,160,000 | 36.991736 | 190 | 0.694583 | false |
sunilghai/avahi-llmnr
|
avahi-python/avahi-discover/SimpleGladeApp.py
|
14
|
11794
|
"""
SimpleGladeApp.py
Module that provides an object oriented abstraction to pygtk and libglade.
Copyright (C) 2004 Sandino Flores Moreno
"""
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os
import sys
import re
import tokenize
import gtk
import gtk.glade
import weakref
import inspect
__version__ = "1.0"
__author__ = 'Sandino "tigrux" Flores-Moreno'
def bindtextdomain(app_name, locale_dir=None):
"""
Bind the domain represented by app_name to the locale directory locale_dir.
It has the effect of loading translations, enabling applications for different
languages.
app_name:
a domain to look for translations, tipically the name of an application.
locale_dir:
a directory with locales like locale_dir/lang_isocode/LC_MESSAGES/app_name.mo
If omitted or None, then the current binding for app_name is used.
"""
try:
import locale
import gettext
locale.setlocale(locale.LC_ALL, "")
gtk.glade.bindtextdomain(app_name, locale_dir)
gettext.install(app_name, locale_dir, unicode=1)
except (IOError,locale.Error), e:
print "Warning", app_name, e
__builtins__.__dict__["_"] = lambda x : x
class SimpleGladeApp:
def __init__(self, path, root=None, domain=None, **kwargs):
"""
Load a glade file specified by glade_filename, using root as
root widget and domain as the domain for translations.
If it receives extra named arguments (argname=value), then they are used
as attributes of the instance.
path:
path to a glade filename.
If glade_filename cannot be found, then it will be searched in the
same directory of the program (sys.argv[0])
root:
the name of the widget that is the root of the user interface,
usually a window or dialog (a top level widget).
If None or ommited, the full user interface is loaded.
domain:
A domain to use for loading translations.
If None or ommited, no translation is loaded.
**kwargs:
a dictionary representing the named extra arguments.
It is useful to set attributes of new instances, for example:
glade_app = SimpleGladeApp("ui.glade", foo="some value", bar="another value")
sets two attributes (foo and bar) to glade_app.
"""
if os.path.isfile(path):
self.glade_path = path
else:
glade_dir = os.path.dirname( sys.argv[0] )
self.glade_path = os.path.join(glade_dir, path)
for key, value in kwargs.items():
try:
setattr(self, key, weakref.proxy(value) )
except TypeError:
setattr(self, key, value)
self.glade = None
self.install_custom_handler(self.custom_handler)
self.glade = self.create_glade(self.glade_path, root, domain)
if root:
self.main_widget = self.get_widget(root)
else:
self.main_widget = None
self.normalize_names()
self.add_callbacks(self)
self.new()
def __repr__(self):
class_name = self.__class__.__name__
if self.main_widget:
root = gtk.Widget.get_name(self.main_widget)
repr = '%s(path="%s", root="%s")' % (class_name, self.glade_path, root)
else:
repr = '%s(path="%s")' % (class_name, self.glade_path)
return repr
def new(self):
"""
Method called when the user interface is loaded and ready to be used.
At this moment, the widgets are loaded and can be refered as self.widget_name
"""
pass
def add_callbacks(self, callbacks_proxy):
"""
It uses the methods of callbacks_proxy as callbacks.
The callbacks are specified by using:
Properties window -> Signals tab
in glade-2 (or any other gui designer like gazpacho).
Methods of classes inheriting from SimpleGladeApp are used as
callbacks automatically.
callbacks_proxy:
an instance with methods as code of callbacks.
It means it has methods like on_button1_clicked, on_entry1_activate, etc.
"""
self.glade.signal_autoconnect(callbacks_proxy)
def normalize_names(self):
"""
It is internally used to normalize the name of the widgets.
It means a widget named foo:vbox-dialog in glade
is refered self.vbox_dialog in the code.
It also sets a data "prefixes" with the list of
prefixes a widget has for each widget.
"""
for widget in self.get_widgets():
widget_name = gtk.Widget.get_name(widget)
prefixes_name_l = widget_name.split(":")
prefixes = prefixes_name_l[ : -1]
widget_api_name = prefixes_name_l[-1]
widget_api_name = "_".join( re.findall(tokenize.Name, widget_api_name) )
gtk.Widget.set_name(widget, widget_api_name)
if hasattr(self, widget_api_name):
raise AttributeError("instance %s already has an attribute %s" % (self,widget_api_name))
else:
setattr(self, widget_api_name, widget)
if prefixes:
gtk.Widget.set_data(widget, "prefixes", prefixes)
def add_prefix_actions(self, prefix_actions_proxy):
"""
By using a gui designer (glade-2, gazpacho, etc)
widgets can have a prefix in theirs names
like foo:entry1 or foo:label3
It means entry1 and label3 has a prefix action named foo.
Then, prefix_actions_proxy must have a method named prefix_foo which
is called everytime a widget with prefix foo is found, using the found widget
as argument.
prefix_actions_proxy:
An instance with methods as prefix actions.
It means it has methods like prefix_foo, prefix_bar, etc.
"""
prefix_s = "prefix_"
prefix_pos = len(prefix_s)
is_method = lambda t : callable( t[1] )
is_prefix_action = lambda t : t[0].startswith(prefix_s)
drop_prefix = lambda (k,w): (k[prefix_pos:],w)
members_t = inspect.getmembers(prefix_actions_proxy)
methods_t = filter(is_method, members_t)
prefix_actions_t = filter(is_prefix_action, methods_t)
prefix_actions_d = dict( map(drop_prefix, prefix_actions_t) )
for widget in self.get_widgets():
prefixes = gtk.Widget.get_data(widget, "prefixes")
if prefixes:
for prefix in prefixes:
if prefix in prefix_actions_d:
prefix_action = prefix_actions_d[prefix]
prefix_action(widget)
def custom_handler(self,
glade, function_name, widget_name,
str1, str2, int1, int2):
"""
Generic handler for creating custom widgets, internally used to
enable custom widgets (custom widgets of glade).
The custom widgets have a creation function specified in design time.
Those creation functions are always called with str1,str2,int1,int2 as
arguments, that are values specified in design time.
Methods of classes inheriting from SimpleGladeApp are used as
creation functions automatically.
If a custom widget has create_foo as creation function, then the
method named create_foo is called with str1,str2,int1,int2 as arguments.
"""
try:
handler = getattr(self, function_name)
return handler(str1, str2, int1, int2)
except AttributeError:
return None
def gtk_widget_show(self, widget, *args):
"""
Predefined callback.
The widget is showed.
Equivalent to widget.show()
"""
widget.show()
def gtk_widget_hide(self, widget, *args):
"""
Predefined callback.
The widget is hidden.
Equivalent to widget.hide()
"""
widget.hide()
def gtk_widget_grab_focus(self, widget, *args):
"""
Predefined callback.
The widget grabs the focus.
Equivalent to widget.grab_focus()
"""
widget.grab_focus()
def gtk_widget_destroy(self, widget, *args):
"""
Predefined callback.
The widget is destroyed.
Equivalent to widget.destroy()
"""
widget.destroy()
def gtk_window_activate_default(self, window, *args):
"""
Predefined callback.
The default widget of the window is activated.
Equivalent to window.activate_default()
"""
widget.activate_default()
def gtk_true(self, *args):
"""
Predefined callback.
Equivalent to return True in a callback.
Useful for stopping propagation of signals.
"""
return True
def gtk_false(self, *args):
"""
Predefined callback.
Equivalent to return False in a callback.
"""
return False
def gtk_main_quit(self, *args):
"""
Predefined callback.
Equivalent to self.quit()
"""
self.quit()
def main(self):
"""
Starts the main loop of processing events.
The default implementation calls gtk.main()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with gst.main()
Do not directly call this method in your programs.
Use the method run() instead.
"""
gtk.main()
def quit(self):
"""
Quit processing events.
The default implementation calls gtk.main_quit()
Useful for applications that needs a non gtk main loop.
For example, applications based on gstreamer needs to override
this method with gst.main_quit()
"""
gtk.main_quit()
def run(self):
"""
Starts the main loop of processing events checking for Control-C.
The default implementation checks wheter a Control-C is pressed,
then calls on_keyboard_interrupt().
Use this method for starting programs.
"""
try:
self.main()
except KeyboardInterrupt:
self.on_keyboard_interrupt()
def on_keyboard_interrupt(self):
"""
This method is called by the default implementation of run()
after a program is finished by pressing Control-C.
"""
pass
def install_custom_handler(self, custom_handler):
gtk.glade.set_custom_handler(custom_handler)
def create_glade(self, glade_path, root, domain):
return gtk.glade.XML(self.glade_path, root, domain)
def get_widget(self, widget_name):
return self.glade.get_widget(widget_name)
def get_widgets(self):
return self.glade.get_widget_prefix("")
|
lgpl-2.1
| -6,769,401,730,784,383,000 | -4,240,655,300,541,478,000 | 33.58651 | 104 | 0.61031 | false |
edx/ansible
|
v2/ansible/utils/path.py
|
14
|
1306
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
__all__ = ['is_executable', 'unfrackpath']
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
|
gpl-3.0
| -1,988,925,445,133,629,700 | -3,007,730,885,752,748,500 | 36.314286 | 147 | 0.716692 | false |
RiccardoPecora/MP
|
Lib/distutils/bcppcompiler.py
|
59
|
15335
|
"""distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
__revision__ = "$Id$"
import os
from distutils.errors import (DistutilsExecError, CompileError, LibError,
LinkError, UnknownFileError)
from distutils.ccompiler import CCompiler, gen_preprocess_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()
|
gpl-3.0
| 3,165,801,749,781,191,000 | 7,805,848,436,276,815,000 | 36.92132 | 80 | 0.513401 | false |
hs634/algorithms
|
python/test.py
|
1
|
1435
|
#
# inputs outputs
# single sin
# simple sim
# solution so
# a a
#
class Node:
def __init__(self, val):
self.val = val
self.children = [0] * 26
self.is_end = False
self.word_count = 1
def get_unique_prefixes(words):
root = Node(0)
root.word_count += 1
cur = root
for word in words:
cur = root
for ch in word:
index = ord(ch) - 97
if cur.children[index] == 0:
n = Node(ch)
cur.children[index] = n
cur = n
else:
cur.word_count += 1
cur = cur.children[index]
cur.is_end = True
# print root.children[ord('s')-97].word_count
output = []
for word in words:
prefix = ''
cur = root
for ch in word:
prefix += ch
if cur.word_count <= 1:
break
cur = cur.children[ord(ch) - 97]
output.append(prefix)
return output
words = ['single', 'simple', 'solution', 'a']
print get_unique_prefixes(words)
words = ['single', 'simple']
print get_unique_prefixes(words)
words = ['abcd', 'geft', 'aaaa']
print get_unique_prefixes(words)
words = ['abcd', 'abcx']
print get_unique_prefixes(words)
# /usr/bin/python /Users/harsh/giths634/algorithms/python/test.py
# ['si', 'si', 'so', 'a']
# ['si', 'si']
# ['a', 'g', 'a']
# ['abc', 'abc']
|
mit
| 1,093,866,798,140,242,700 | -1,807,689,148,924,231,200 | 19.5 | 65 | 0.502439 | false |
credativ/pulp
|
server/test/unit/plugins/file/test_distributor.py
|
4
|
13411
|
from os import readlink
import copy
import csv
import errno
import os
import shutil
import tempfile
import unittest
from mock import Mock, MagicMock, patch
from pulp.common.plugins.distributor_constants import MANIFEST_FILENAME
from pulp.devel.mock_distributor import get_publish_conduit
from pulp.plugins.file.distributor import FileDistributor, FilePublishProgressReport, BUILD_DIRNAME
from pulp.plugins.model import Repository, Unit
DATA_DIR = os.path.realpath("../../../data/")
SAMPLE_RPM = 'pulp-test-package-0.3.1-1.fc11.x86_64.rpm'
SAMPLE_FILE = 'test-override-pulp.conf'
class FileDistributorTest(unittest.TestCase):
"""
Tests the file distributor base class
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.target_dir = os.path.join(self.temp_dir, "target")
self.repo = MagicMock(spec=Repository)
self.repo.id = "foo"
self.repo.working_dir = self.temp_dir
self.unit = Unit('RPM', {'name': SAMPLE_RPM, 'size': 1, 'checksum': 'sum1'}, {},
os.path.join(DATA_DIR, SAMPLE_RPM))
self.publish_conduit = get_publish_conduit(existing_units=[self.unit, ])
def tearDown(self):
shutil.rmtree(self.temp_dir)
def create_distributor_with_mocked_api_calls(self):
distributor = FileDistributor()
distributor.get_hosting_locations = Mock()
distributor.get_hosting_locations.return_value = [self.target_dir, ]
distributor.post_repo_publish = Mock()
return distributor
def test_metadata_not_implemented(self):
self.assertRaises(NotImplementedError, FileDistributor.metadata)
def test_validate_config_not_implemented(self):
distributor = FileDistributor()
self.assertRaises(NotImplementedError, distributor.validate_config, None, None, None)
def test_get_hosting_locations_not_implemented(self):
distributor = FileDistributor()
host_locations = distributor.get_hosting_locations(None, None)
self.assertEquals(0, len(host_locations))
def test_post_repo_publish_not_implemented(self):
distributor = FileDistributor()
# ensure that this doesn't raise an error
distributor.post_repo_publish(None, None)
def test_repo_publish_api_calls(self):
distributor = self.create_distributor_with_mocked_api_calls()
result = distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(result.success_flag)
self.assertTrue(distributor.get_hosting_locations.called)
self.assertTrue(distributor.post_repo_publish.called)
# The publish_conduit should have had two set_progress calls. One to start the IN_PROGRESS
# state, and the second to mark it as complete
self.assertEqual(self.publish_conduit.set_progress.call_count, 2)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[0][1][0]['state'],
FilePublishProgressReport.STATE_IN_PROGRESS)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[1][1][0]['state'],
FilePublishProgressReport.STATE_COMPLETE)
def test_repo_publish_files_placed_properly(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
target_file = os.path.join(self.target_dir, SAMPLE_RPM)
# test if the link was created
self.assertTrue(os.path.islink(target_file))
# test if the link points to the correct place
link_target = os.readlink(target_file)
self.assertEquals(link_target, os.path.join(DATA_DIR, SAMPLE_RPM))
def test_repo_publish_metadata_writing(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
with open(os.path.join(self.target_dir, MANIFEST_FILENAME), 'rb') as f:
reader = csv.reader(f)
row = reader.next()
self.assertEquals(row[0], self.unit.unit_key['name'])
self.assertEquals(row[1], self.unit.unit_key['checksum'])
self.assertEquals(row[2], str(self.unit.unit_key['size']))
def test_repo_publish_handles_errors(self):
"""
Make sure that publish() does the right thing with the report when there is an error.
"""
distributor = self.create_distributor_with_mocked_api_calls()
distributor.post_repo_publish.side_effect = Exception('Rawr!')
report = distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertFalse(report.success_flag)
self.assertEqual(report.summary['state'], FilePublishProgressReport.STATE_FAILED)
self.assertEqual(report.summary['error_message'], 'Rawr!')
self.assertTrue('Rawr!' in report.summary['traceback'])
# The publish_conduit should have had two set_progress calls. One to start the IN_PROGRESS
# state, and the second to mark it as failed
self.assertEqual(self.publish_conduit.set_progress.call_count, 2)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[0][1][0]['state'],
FilePublishProgressReport.STATE_IN_PROGRESS)
self.assertEqual(self.publish_conduit.set_progress.mock_calls[1][1][0]['state'],
FilePublishProgressReport.STATE_FAILED)
def test_republish_after_unit_removal(self):
"""
This test checks for an issue[0] we had where publishing an ISO repository, removing an ISO,
and then republishing would leave that removed ISO's symlink in the repository even though
it had been removed from the manifest. This test asserts that the republished repository no
longer contains the removed ISO.
[0] https://bugzilla.redhat.com/show_bug.cgi?id=970795
:param delete_protected_repo: The mocked version of delete_protected_repo
:type delete_protected_repo: function
"""
# Publish a repository
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
target_file = os.path.join(self.target_dir, SAMPLE_RPM)
# test if the link was created
self.assertTrue(os.path.islink(target_file))
# publish a new repo with a different unit in it
cloned_unit = copy.deepcopy(self.unit)
cloned_unit.unit_key['name'] = 'foo.rpm'
new_conduit = get_publish_conduit(existing_units=[cloned_unit, ])
distributor.publish_repo(self.repo, new_conduit, {})
# Make sure the new rpm is linked
self.assertTrue(os.path.islink(os.path.join(self.target_dir, 'foo.rpm')))
# Ensure the old rpm is no longer included
self.assertFalse(os.path.islink(target_file))
def test_distributor_removed_calls_unpublish(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.unpublish_repo = Mock()
distributor.distributor_removed(self.repo, {})
self.assertTrue(distributor.unpublish_repo.called)
def test_unpublish_repo(self):
distributor = self.create_distributor_with_mocked_api_calls()
distributor.publish_repo(self.repo, self.publish_conduit, {})
self.assertTrue(os.path.exists(self.target_dir))
distributor.unpublish_repo(self.repo, {})
self.assertFalse(os.path.exists(self.target_dir))
def test__rmtree_if_exists(self):
"""
Let's just make sure this simple thing doesn't barf.
"""
a_directory = os.path.join(self.temp_dir, 'a_directory')
test_filename = os.path.join(a_directory, 'test.txt')
os.makedirs(a_directory)
with open(test_filename, 'w') as test:
test.write("Please don't barf.")
# This should not cause any problems, and test.txt should still exist
distributor = self.create_distributor_with_mocked_api_calls()
distributor._rmtree_if_exists(os.path.join(self.temp_dir, 'fake_path'))
self.assertTrue(os.path.exists(test_filename))
# Now let's remove a_directory
distributor._rmtree_if_exists(a_directory)
self.assertFalse(os.path.exists(a_directory))
def test__symlink_units(self):
"""
Make sure that the _symlink_units creates all the correct symlinks.
"""
distributor = self.create_distributor_with_mocked_api_calls()
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
os.symlink('/some/weird/path',
os.path.join(build_dir, self.unit.unit_key['name']))
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name'], ])
expected_symlink_path = os.path.join(build_dir, self.unit.unit_key['name'])
self.assertTrue(os.path.islink(expected_symlink_path))
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
self.assertEqual(os.path.realpath(expected_symlink_path), expected_symlink_destination)
@patch('os.symlink', side_effect=os.symlink)
def test__symlink_units_existing_correct_link(self, symlink):
"""
Make sure that the _symlink_units handles an existing correct link well.
"""
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
os.symlink(expected_symlink_destination,
os.path.join(build_dir, self.unit.unit_key['name']))
# Now let's reset the Mock so that we can make sure it doesn't get called during _symlink
symlink.reset_mock()
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
# The call count for symlink should be 0, because the _symlink_units call should have
# noticed that the symlink was already correct and thus should have skipped it
self.assertEqual(symlink.call_count, 0)
expected_symlink_path = os.path.join(build_dir, self.unit.unit_key['name'])
self.assertTrue(os.path.islink(expected_symlink_path))
self.assertEqual(os.path.realpath(expected_symlink_path),
os.path.realpath(expected_symlink_destination))
@patch('os.readlink')
def test__symlink_units_os_error(self, readlink):
"""
Make sure that the _symlink_units handles an OSError correctly, for the case where it
doesn't raise EINVAL. We already have a test that raises EINVAL (test__symlink_units places
an ordinary file there.)
"""
os_error = OSError()
# This would be an unexpected error for reading a symlink!
os_error.errno = errno.ENOSPC
readlink.side_effect = os_error
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
expected_symlink_destination = os.path.join(DATA_DIR, self.unit.unit_key['name'])
os.symlink(expected_symlink_destination,
os.path.join(build_dir, self.unit.unit_key['name']))
try:
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
self.fail('An OSError should have been raised, but was not!')
except OSError, e:
self.assertEqual(e.errno, errno.ENOSPC)
@patch('os.readlink')
def test__symlink_units_EINVAL_os_error(self, mock_readlink):
"""
Make sure that the _symlink_units handles an OSError correctly, for the case where it
raises EINVAL. We already have a test that raises EINVAL (test__symlink_units places
an ordinary file there.)
"""
os_error = OSError()
# This would be an unexpected error for reading a symlink!
os_error.errno = errno.EINVAL
mock_readlink.side_effect = os_error
# There's some logic in _symlink_units to handle preexisting files and symlinks, so let's
# create some fakes to see if it does the right thing
build_dir = os.path.join(self.temp_dir, BUILD_DIRNAME)
os.makedirs(build_dir)
original_link = os.path.join(build_dir, self.unit.unit_key['name'])
old_target = os.path.join(DATA_DIR, SAMPLE_FILE)
os.symlink(old_target, original_link)
distributor = self.create_distributor_with_mocked_api_calls()
distributor._symlink_unit(build_dir, self.unit, [self.unit.unit_key['name']])
# make sure the symlink was deleted
self.assertTrue(os.path.islink(original_link))
created_link = readlink(original_link)
self.assertNotEqual(old_target, created_link)
|
gpl-2.0
| 8,854,755,446,488,009,000 | 729,011,492,783,981,600 | 46.388693 | 100 | 0.665946 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.