repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
JackKelly/neuralnilm_prototype
|
scripts/e127.py
|
2
|
4534
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
501: {
'remove_from': -3,
'new_layers':
[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
|
mit
|
devaha/archagent
|
node_modules/grunt-plugin/node_modules/npm/node_modules/node-gyp/gyp/test/library/gyptest-static.py
|
430
|
2241
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple build of a "Hello, world!" program with static libraries,
including verifying that libraries are rebuilt correctly when functions
move between libraries.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib1',
chdir='src')
test.relocate('src', 'relocate/src')
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib1_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib2',
chdir='relocate/src')
# Update program.c to force a rebuild.
test.sleep()
contents = test.read('relocate/src/program.c')
contents = contents.replace('Hello', 'Hello again')
test.write('relocate/src/program.c', contents)
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello again from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib2_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.run_gyp('library.gyp',
'-Dlibrary=static_library',
'-Dmoveable_function=lib1',
chdir='relocate/src')
# Update program.c and lib2.c to force a rebuild.
test.sleep()
contents = test.read('relocate/src/program.c')
contents = contents.replace('again', 'again again')
test.write('relocate/src/program.c', contents)
# TODO(sgk): we have to force a rebuild of lib2 so that it weeds out
# the "moved" module. This should be done in gyp by adding a dependency
# on the generated .vcproj file itself.
test.touch('relocate/src/lib2.c')
test.build('library.gyp', test.ALL, chdir='relocate/src')
expect = """\
Hello again again from program.c
Hello from lib1.c
Hello from lib2.c
Hello from lib1_moveable.c
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.pass_test()
|
gpl-2.0
|
badock/nova
|
nova/tests/api/ec2/test_apirequest.py
|
11
|
3502
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the API Request internals."""
import copy
from oslo.utils import timeutils
from nova.api.ec2 import apirequest
from nova import test
class APIRequestTestCase(test.NoDBTestCase):
def setUp(self):
super(APIRequestTestCase, self).setUp()
self.req = apirequest.APIRequest("FakeController", "FakeAction",
"FakeVersion", {})
self.resp = {
'string': 'foo',
'int': 1,
'long': long(1),
'bool': False,
'dict': {
'string': 'foo',
'int': 1,
}
}
# The previous will produce an output that looks like the
# following (excusing line wrap for 80 cols):
#
# <FakeActionResponse xmlns="http://ec2.amazonaws.com/doc/\
# FakeVersion/">
# <requestId>uuid</requestId>
# <int>1</int>
# <dict>
# <int>1</int>
# <string>foo</string>
# </dict>
# <bool>false</bool>
# <string>foo</string>
# </FakeActionResponse>
#
# We don't attempt to ever test for the full document because
# hash seed order might impact it's rendering order. The fact
# that running the function doesn't explode is a big part of
# the win.
def test_render_response_ascii(self):
data = self.req._render_response(self.resp, 'uuid')
self.assertIn('<FakeActionResponse xmlns="http://ec2.amazonaws.com/'
'doc/FakeVersion/', data)
self.assertIn('<int>1</int>', data)
self.assertIn('<string>foo</string>', data)
def test_render_response_utf8(self):
resp = copy.deepcopy(self.resp)
resp['utf8'] = unichr(40960) + u'abcd' + unichr(1972)
data = self.req._render_response(resp, 'uuid')
self.assertIn('<utf8>ꀀabcd޴</utf8>', data)
# Tests for individual data element format functions
def test_return_valid_isoformat(self):
"""Ensure that the ec2 api returns datetime in xs:dateTime
(which apparently isn't datetime.isoformat())
NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
"""
conv = apirequest._database_to_isoformat
# sqlite database representation with microseconds
time_to_convert = timeutils.parse_strtime("2011-02-21 20:14:10.634276",
"%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(conv(time_to_convert), '2011-02-21T20:14:10.634Z')
# mysqlite database representation
time_to_convert = timeutils.parse_strtime("2011-02-21 19:56:18",
"%Y-%m-%d %H:%M:%S")
self.assertEqual(conv(time_to_convert), '2011-02-21T19:56:18.000Z')
|
apache-2.0
|
kingsfordgroup/armatus
|
scripts/HiCvis.py
|
1
|
7843
|
#!/usr/env python
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
import argparse
import math
from scipy.sparse import coo_matrix
def plotall(datamat,domains1,domains2,bounds,legendname1,legendname2,outputname):
""" Show heatmap of Hi-C data along with any domain sets given
:param datamat: Hi-C data matrix as numpy array
:param domains1: nx2 list of domains (optional, use [] to just see heatmap of Hi-C matrix)
:param domains2: nx2 list of domains (optional, use [] if no second set of domains)
:param bounds: (x,y) to view only bins between x and y (optional, use () to see entire chromosome)
:param legendname1: legend label for first set of domains
:param legendname2: legend label for second set of domains
:param outputname: filename of image to be saved (optional - use [] to view instead of save)
:return: either show image (if outputname == []) or save image as outputname
"""
if bounds == (): # plot full Hi-C matrix with all TADs
logdata = np.ma.log(datamat)
logdata = logdata.filled(0)
labelspacing = int(math.floor(round(len(logdata),-int(math.floor(math.log10(len(logdata)))))/10))
ax = sb.heatmap(logdata,cbar=False,xticklabels=labelspacing,yticklabels=labelspacing)
if domains1 != []:
for interval in domains1: # plot outline of each domain
plt.plot((interval[0]-1,interval[1]),(len(logdata)-interval[0]+1,len(logdata)-interval[0]+1),'g')
plt.plot((interval[1],interval[1]),(len(logdata)-interval[0]+1,len(logdata)-interval[1]),'g')
dom1Artist = plt.Line2D((0,1),(0,0), color='green', linestyle='solid')
if domains2 != []:
for interval in domains2:
plt.plot((interval[0]-1,interval[1]),(len(logdata)-interval[1],len(logdata)-interval[1]),'b')
plt.plot((interval[0]-1,interval[0]-1),(len(logdata)-interval[0]+1,len(logdata)-interval[1]),'b')
dom2Artist = plt.Line2D((0,1),(0,0), color='blue', linestyle='solid')
else: # show only range of matrix between bounds
logdata = np.ma.log(datamat[bounds[0]:bounds[1],bounds[0]:bounds[1]])
logdata = logdata.filled(0)
labelspacing = int(math.floor(round(len(logdata),-int(math.floor(math.log10(len(logdata)))))/10))
ax = sb.heatmap(logdata,cbar=False,xticklabels=labelspacing,yticklabels=labelspacing)
if domains1 != []:
for interval in domains1:
if interval[0] >= bounds[0] and interval[1] <= bounds[1]:
interval -= bounds[0]
plt.plot((interval[0]-1,interval[1]),(len(logdata)-interval[0]+1,len(logdata)-interval[0]+1),'g')
plt.plot((interval[1],interval[1]),(len(logdata)-interval[0]+1,len(logdata)-interval[1]),'g')
dom1Artist = plt.Line2D((0,1),(0,0), color='green', linestyle='solid')
if domains2 != []:
for interval in domains2:
if interval[0] >= bounds[0] and interval[1] <= bounds[1]:
interval -= bounds[0]
plt.plot((interval[0]-1,interval[1]),(len(logdata)-interval[1],len(logdata)-interval[1]),'b')
plt.plot((interval[0]-1,interval[0]-1),(len(logdata)-interval[0]+1,len(logdata)-interval[1]),'b')
dom2Artist = plt.Line2D((0,1),(0,0), color='blue', linestyle='solid')
if legendname1 and legendname2:
legend = ax.legend([dom1Artist,dom2Artist], [legendname1, legendname2],frameon = 1)
legendframe = legend.get_frame()
legendframe.set_facecolor('white')
legendframe.set_edgecolor('black')
elif legendname1:
legend = ax.legend([dom1Artist],[legendname1])
legendframe = legend.get_frame()
legendframe.set_facecolor('white')
legendframe.set_edgecolor('black')
# save image to file if filename was given, .png is default if no extension given
if outputname:
plt.savefig(outputname)
else: # just display image
plt.show()
def parseRaoFormat(datamat,res):
""" turn sparse Rao data format into dense matrix for heatmap
:param datamat: Hi-C data in sparse format as numpy array (n x 3)
:param res: resolution of data
:return: dense Hi-C data matrix
"""
datamat[:,0:2] = datamat[:,0:2]/res
datamat = coo_matrix((datamat[:,2], (datamat[:,0],datamat[:,1]) ))
datamat = datamat.todense()
if datamat.shape[0] > datamat.shape[1]:
# add column(s) of zeros to make square matrix
ncols = datamat.shape[0] - datamat.shape[1]
sqmat = np.zeros((datamat.shape[0],datamat.shape[0]))
sqmat[:,:-1*ncols] = datamat
datamat = sqmat
elif datamat.shape[1] > datamat.shape[0]:
# add row(s) of zeros to make square matrix
nrows = datamat.shape[1] - datamat.shape[0]
sqmat = np.zeros((datamat.shape[1],datamat.shape[1]))
sqmat[:-1*nrows,:] = datamat
datamat = sqmat
datamat = datamat + np.transpose(datamat) - np.diagonal(datamat)*np.identity(len(datamat))
return datamat
def main(datafile, res, domainfile1, domainfile2, domainres1, domainres2, windowbounds, legendname1, legendname2, outputname):
datamat = np.genfromtxt(datafile,delimiter='\t')
if datamat.shape[1] == 3: # Rao format
datamat = parseRaoFormat(datamat, res)
else: # remove any NaNs
datamat = datamat[~np.isnan(datamat)]
datamat = np.reshape(datamat,(np.sqrt(len(datamat)), np.sqrt(len(datamat))))
if domainfile1:
domains1 = np.genfromtxt(domainfile1,delimiter='\t')
domains1 = domains1[~np.isnan(domains1)]/domainres1
domains1 = np.reshape(domains1,(len(domains1)/2,2))
else:
domains1 = []
if domainfile2:
domains2 = np.genfromtxt(domainfile2,delimiter='\t')
domains2 = domains2[~np.isnan(domains2)]/domainres2
domains2 = np.reshape(domains2,(len(domains2)/2,2))
else:
domains2 = []
if windowbounds:
bounds = (int(windowbounds[0]),int(windowbounds[1]))
else:
bounds = ()
if not legendname1: # make filenames legend entry, if none is given
legendname1 = domainfile1
if not legendname2:
legendname2 = domainfile2
plotall(datamat,domains1,domains2,bounds,legendname1,legendname2,outputname)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Domain visualization tool for Hi-C data.')
parser.add_argument('-i', metavar='inputFile', help='raw Hi-C data filename (tab-delimited text file of Hi-C data or Rao format)')
parser.add_argument('-r', metavar='Resolution', default = [], type=int, help='Hi-C Resolution (only needed if using Rao data format)')
parser.add_argument('-b', metavar=('startBound','endBound'), nargs=2, default=(), help='Bounds for viewing window (optional)')
parser.add_argument('-d1', metavar='domainFile1', default=[], help='TAD file')
parser.add_argument('-d2', metavar='domainFile2', default=[], help='second TAD file (optional)')
parser.add_argument('-dr1', metavar='domainResolution1', type=int, default=1, help='Resolution of domains in domainFile1')
parser.add_argument('-dr2', metavar='domainResolution2', type=int, default=1, help='Resolution of domains in domainFile2')
parser.add_argument('-l1', metavar='legendName1', default=[], type=str, help='Legend name for first set of domains')
parser.add_argument('-l2', metavar='legendName2', default=[], type=str, help='Legend name for second set of domains')
parser.add_argument('-o', metavar='outputFile', default=[], type=str, help='Filename for saved image file')
args = parser.parse_args()
main(args.i, args.r, args.d1, args.d2, args.dr1, args.dr2, args.b, args.l1, args.l2, args.o)
|
bsd-2-clause
|
xpansa/server-tools
|
users_ldap_populate/model/users_ldap.py
|
25
|
3768
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2012 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp.osv import orm
import logging
_logger = logging.getLogger(__name__)
try:
from ldap.filter import filter_format
except ImportError:
_logger.debug('Can not `from ldap.filter import filter_format`.')
class CompanyLDAP(orm.Model):
_inherit = 'res.company.ldap'
def action_populate(self, cr, uid, ids, context=None):
"""
Prepopulate the user table from one or more LDAP resources.
Obviously, the option to create users must be toggled in
the LDAP configuration.
Return the number of users created (as far as we can tell).
"""
if isinstance(ids, (int, float)):
ids = [ids]
users_pool = self.pool.get('res.users')
users_no_before = users_pool.search(
cr, uid, [], context=context, count=True)
logger = logging.getLogger('orm.ldap')
logger.debug("action_populate called on res.company.ldap ids %s", ids)
for conf in self.get_ldap_dicts(cr, ids):
if not conf['create_user']:
continue
attribute_match = re.search(
r'([a-zA-Z_]+)=\%s', conf['ldap_filter'])
if attribute_match:
login_attr = attribute_match.group(1)
else:
raise orm.except_orm(
"No login attribute found",
"Could not extract login attribute from filter %s" %
conf['ldap_filter'])
ldap_filter = filter_format(conf['ldap_filter'] % '*', ())
for result in self.query(conf, ldap_filter):
self.get_or_create_user(
cr, uid, conf, result[1][login_attr][0], result)
users_no_after = users_pool.search(
cr, uid, [], context=context, count=True)
users_created = users_no_after - users_no_before
logger.debug("%d users created", users_created)
return users_created
def populate_wizard(self, cr, uid, ids, context=None):
"""
GUI wrapper for the populate method that reports back
the number of users created.
"""
if not ids:
return
if isinstance(ids, (int, float)):
ids = [ids]
wizard_obj = self.pool.get('res.company.ldap.populate_wizard')
res_id = wizard_obj.create(
cr, uid, {'ldap_id': ids[0]}, context=context)
return {
'name': wizard_obj._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': wizard_obj._name,
'domain': [],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': res_id,
'nodestroy': True,
}
|
agpl-3.0
|
alexzoo/python
|
selenium_tests/env/lib/python3.6/site-packages/pip/_vendor/requests/compat.py
|
327
|
1687
|
# -*- coding: utf-8 -*-
"""
requests.compat
~~~~~~~~~~~~~~~
This module handles import compatibility issues between Python 2 and
Python 3.
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
# Note: We've patched out simplejson support in pip because it prevents
# upgrading simplejson on Windows.
# try:
# import simplejson as json
# except (ImportError, SyntaxError):
# # simplejson does not support Python 3.2, it throws a SyntaxError
# # because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
apache-2.0
|
xiangel/hue
|
apps/metastore/src/metastore/settings.py
|
29
|
1051
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = ['metastore']
NICE_NAME = "Metastore Manager"
REQUIRES_HADOOP = True
ICON = "metastore/art/icon_metastore_48.png"
MENU_INDEX = 20
IS_URL_NAMESPACED = True
PERMISSION_ACTIONS = (
("write", "Allow DDL operations. Need the app access too."),
)
|
apache-2.0
|
dnozay/lettuce
|
tests/integration/lib/Django-1.3/django/contrib/messages/storage/session.py
|
456
|
1213
|
from django.contrib.messages.storage.base import BaseStorage
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.request.session.get(self.session_key), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = messages
else:
self.request.session.pop(self.session_key, None)
return []
|
gpl-3.0
|
sunyi00/jenkins-job-builder
|
jenkins_jobs/sphinx/yaml.py
|
42
|
4925
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Most of this code originated in sphinx.domains.python and
# sphinx.ext.autodoc and has been only slightly adapted for use in
# subclasses here.
# :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
# :license: BSD, see LICENSE for details.
import re
from sphinx.ext.autodoc import Documenter, FunctionDocumenter
from sphinx.domains.python import PyModulelevel, _pseudo_parse_arglist
from sphinx import addnodes
from sphinx.locale import _
yaml_sig_re = re.compile('yaml:\s*(.*)')
class PyYAMLFunction(PyModulelevel):
def handle_signature(self, sig, signode):
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
name_prefix = None
name = sig
arglist = None
retann = None
# determine module and class name (if applicable), as well as full name
modname = self.options.get(
'module', self.env.temp_data.get('py:module'))
classname = self.env.temp_data.get('py:class')
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
_pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name_cls):
return _('%s (in module %s)') % (name_cls[0], modname)
class YAMLFunctionDocumenter(FunctionDocumenter):
priority = FunctionDocumenter.priority + 10
objtype = 'yamlfunction'
directivetype = 'yamlfunction'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
if not FunctionDocumenter.can_document_member(member, membername,
isattr, parent):
return False
if member.__doc__ is not None and yaml_sig_re.match(member.__doc__):
return True
return False
def _find_signature(self, encoding=None):
docstrings = Documenter.get_doc(self, encoding, 2)
if len(docstrings) != 1:
return
doclines = docstrings[0]
setattr(self, '__new_doclines', doclines)
if not doclines:
return
# match first line of docstring against signature RE
match = yaml_sig_re.match(doclines[0])
if not match:
return
name = match.group(1)
# ok, now jump over remaining empty lines and set the remaining
# lines as the new doclines
i = 1
while i < len(doclines) and not doclines[i].strip():
i += 1
setattr(self, '__new_doclines', doclines[i:])
return name
def get_doc(self, encoding=None, ignore=1):
lines = getattr(self, '__new_doclines', None)
if lines is not None:
return [lines]
return Documenter.get_doc(self, encoding, ignore)
def format_signature(self):
result = self._find_signature()
self._name = result
return ''
def format_name(self):
return self._name
def setup(app):
app.add_autodocumenter(YAMLFunctionDocumenter)
app.add_directive_to_domain('py', 'yamlfunction', PyYAMLFunction)
|
apache-2.0
|
teltek/edx-platform
|
common/test/acceptance/tests/studio/test_studio_course_team.py
|
4
|
15689
|
"""
Acceptance tests for course in studio
"""
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.index import DashboardPage
from common.test.acceptance.pages.studio.users import CourseTeamPage
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
class CourseTeamPageTest(StudioCourseTest):
""" As a course author, I want to be able to add others to my team """
shard = 2
def _make_user(self, username):
""" Registers user and returns user representation dictionary as expected by `log_in` function """
user = {
'username': username,
'email': username + "@example.com",
'password': username + '123$%^'
}
AutoAuthPage(
self.browser, no_login=True,
username=user.get('username'), email=user.get('email'), password=user.get('password')
).visit()
return user
def _update_user(self, user_info):
"""
Update user with provided `user_info`
Arguments:
`user_info`: dictionary containing values of attributes to be updated
"""
AutoAuthPage(
self.browser, no_login=True, **user_info
).visit()
def setUp(self, is_staff=False):
"""
Install a course with no content using a fixture.
"""
super(CourseTeamPageTest, self).setUp(is_staff)
self.other_user = self._make_user('other')
self.dashboard_page = DashboardPage(self.browser)
self.page = CourseTeamPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self._go_to_course_team_page()
def _go_to_course_team_page(self):
""" Opens Course Team page """
self.page.visit()
self.page.wait_until_no_loading_indicator()
def _refresh_page(self):
"""
Reload the page.
"""
self.page = CourseTeamPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self._go_to_course_team_page()
def _assert_current_course(self, visible=True):
""" Checks if current course is accessible to current user """
self.dashboard_page.visit()
courses = self.dashboard_page.list_courses()
def check_course_equality(course1, course2):
""" Compares to course dictionaries using org, number and run as keys"""
return (
course1['org'] == course2['display_organization'] and
course1['number'] == course2['display_coursenumber'] and
course1['run'] == course2['run']
)
actual_visible = any((check_course_equality(course, self.course_info) for course in courses))
self.assertEqual(actual_visible, visible)
def _assert_user_present(self, user, present=True):
""" Checks if specified user present on Course Team page """
if present:
self.page.wait_for(
lambda: user.get('username') in self.page.usernames,
description="Wait for user to be present"
)
else:
self.page.wait_for(
lambda: user.get('username') not in self.page.usernames,
description="Wait for user to be absent"
)
def _should_see_dialog(self, dialog_type, dialog_message):
""" Asserts dialog with specified message is shown """
self.page.modal_dialog_visible(dialog_type)
self.assertIn(dialog_message, self.page.modal_dialog_text(dialog_type))
def _assert_is_staff(self, user, can_manage=True):
""" Checks if user have staff permissions, can be promoted and can't be demoted """
self.assertIn("staff", user.role_label.lower())
if can_manage:
self.assertTrue(user.can_promote)
self.assertFalse(user.can_demote)
self.assertIn("Add Admin Access", user.promote_button_text)
def _assert_is_admin(self, user):
""" Checks if user have admin permissions, can't be promoted and can be demoted """
self.assertIn("admin", user.role_label.lower())
self.assertFalse(user.can_promote)
self.assertTrue(user.can_demote)
self.assertIn("Remove Admin Access", user.demote_button_text)
def _assert_can_manage_users(self):
""" Checks if current user can manage course team """
self.assertTrue(self.page.has_add_button)
for user in self.page.users:
self.assertTrue(user.can_promote or user.can_demote) # depending on actual user role
self.assertTrue(user.can_delete)
def _assert_can_not_manage_users(self):
""" Checks if current user can't manage course team """
self.assertFalse(self.page.has_add_button)
for user in self.page.users:
self.assertFalse(user.can_promote)
self.assertFalse(user.can_demote)
self.assertFalse(user.can_delete)
def test_admins_can_add_other_users(self):
"""
Scenario: Admins can add other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And other user logs in
Then he does see the course on her page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.log_in(self.other_user)
self._assert_current_course(visible=True)
def test_added_users_cannot_add_or_delete_other_users(self):
"""
Scenario: Added users cannot delete or add other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And other user logs in
And he selects the new course
And he views the course team settings
Then he cannot manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.log_in(self.other_user)
self._assert_current_course(visible=True)
self._go_to_course_team_page()
bob = self.page.get_user(self.other_user.get('email'))
self.assertTrue(bob.is_current_user)
self.assertFalse(self.page.has_add_button)
self._assert_can_not_manage_users()
def test_admins_can_delete_other_users(self):
"""
Scenario: Admins can delete other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And I delete other user from the course team
And other user logs in
Then he does not see the course on her page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.page.delete_user_from_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=False)
self.log_in(self.other_user)
self._assert_current_course(visible=False)
def test_admins_can_delete_other_inactive_users(self):
"""
Scenario: Admins can delete other inactive users
Given I have opened a new course in Studio
And I am viewing the course team settings.
When I add other user to the course team,
And then delete that other user from the course team.
And other user logs in
Then he/she does not see the course on page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
# inactivate user
user_info = {
'username': self.other_user.get('username'),
'email': self.other_user.get('email'),
'password': self.other_user.get('password'),
'is_active': False
}
self._update_user(user_info)
# go to course team page to perform delete operation
self._go_to_course_team_page()
self.page.delete_user_from_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=False)
def test_admins_cannot_add_users_that_do_not_exist(self):
"""
Scenario: Admins cannot add users that do not exist
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add "dennis" to the course team
Then I should see "Could not find user by email address" somewhere on the page
"""
self.page.add_user_to_course("[email protected]")
self._should_see_dialog('error', "Could not find user by email address")
def test_admins_should_be_able_to_make_other_people_into_admins(self):
"""
Scenario: Admins should be able to make other people into admins
Given I have opened a new course in Studio
And I am viewing the course team settings
And I add other user to the course team
When I make other user a course team admin
And other user logs in
And he selects the new course
And he views the course team settings
Then other user should be marked as an admin
And he can manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
other.click_promote()
self._refresh_page()
self._assert_is_admin(other)
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_manage_users()
def test_admins_should_be_able_to_remove_other_admins(self):
"""
Scenario: Admins should be able to remove other admins
Given I have opened a new course in Studio
And I grant admin rights to other user
Then he can add, delete, promote and demote users
And I am viewing the course team settings
When I remove admin rights from other user
And other user logs in
And he selects the new course
And he views the course team settings
Then other user should not be marked as an admin
And he cannot manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
# precondition check - frank is an admin and can add/delete/promote/demote users
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_manage_users()
self.log_in(self.user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
other.click_demote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_not_manage_users()
def test_admins_should_be_able_to_remove_themself_if_other_admin_exists(self):
"""
Scenario: Admins should be able to give course ownership to someone else
Given I have opened a new course in Studio
And I am viewing the course team settings
And I'm the only course admin
Then I cannot delete or demote myself
When I add other user to the course team
And I make other user a course team admin
Then I can delete or demote myself
When I delete myself from the course team
And I am logged into studio
Then I do not see the course on my page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
current = self.page.get_user(self.user.get('email'))
self.assertFalse(current.can_demote)
self.assertFalse(current.can_delete)
self.assertIn("Promote another member to Admin to remove your admin rights", current.no_change_warning_text)
other = self.page.get_user(self.other_user.get('email'))
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
current = self.page.get_user(self.user.get('email'))
self.assertTrue(current.can_demote)
self.assertTrue(current.can_delete)
current.click_delete()
self.log_in(self.user)
self._assert_current_course(visible=False)
def test_admins_should_be_able_to_give_course_ownership_to_someone_else(self):
"""
Scenario: Admins should be able to give course ownership to someone else
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And I make other user a course team admin
When I remove admin rights from myself
Then I should not be marked as an admin
And I cannot manage users
And I cannot make myself a course team admin
When other user logs in
And he selects the new course
And he views the course team settings
And he deletes me from the course team
And I am logged into studio
Then I do not see the course on my page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
current = self.page.get_user(self.user.get('email'))
self.assertFalse(current.can_demote)
self.assertFalse(current.can_delete)
self.assertIn("Promote another member to Admin to remove your admin rights", current.no_change_warning_text)
other = self.page.get_user(self.other_user.get('email'))
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
current = self.page.get_user(self.user.get('email'))
self.assertTrue(current.can_demote)
self.assertTrue(current.can_delete)
current.click_demote()
self._refresh_page()
current = self.page.get_user(self.user.get('email'))
self._assert_is_staff(current, can_manage=False)
self._assert_can_not_manage_users()
self.assertFalse(current.can_promote)
self.log_in(self.other_user)
self._go_to_course_team_page()
current = self.page.get_user(self.user.get('email'))
current.click_delete()
self._refresh_page()
self._assert_user_present(self.user, present=False)
self.log_in(self.user)
self._assert_current_course(visible=False)
|
agpl-3.0
|
Dhivyap/ansible
|
lib/ansible/plugins/httpapi/restconf.py
|
17
|
2852
|
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
httpapi: restconf
short_description: HttpApi Plugin for devices supporting Restconf API
description:
- This HttpApi plugin provides methods to connect to Restconf API
endpoints.
version_added: "2.8"
options:
root_path:
type: str
description:
- Specifies the location of the Restconf root.
default: '/restconf'
vars:
- name: ansible_httpapi_restconf_root
"""
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.plugins.httpapi import HttpApiBase
CONTENT_TYPE = 'application/yang-data+json'
class HttpApi(HttpApiBase):
def send_request(self, data, **message_kwargs):
if data:
data = json.dumps(data)
path = '/'.join([self.get_option('root_path').rstrip('/'), message_kwargs.get('path', '').lstrip('/')])
headers = {
'Content-Type': message_kwargs.get('content_type') or CONTENT_TYPE,
'Accept': message_kwargs.get('accept') or CONTENT_TYPE,
}
response, response_data = self.connection.send(path, data, headers=headers, method=message_kwargs.get('method'))
return handle_response(response, response_data)
def handle_response(response, response_data):
try:
response_data = json.loads(response_data.read())
except ValueError:
response_data = response_data.read()
if isinstance(response, HTTPError):
if response_data:
if 'errors' in response_data:
errors = response_data['errors']['error']
error_text = '\n'.join((error['error-message'] for error in errors))
else:
error_text = response_data
raise ConnectionError(error_text, code=response.code)
raise ConnectionError(to_text(response), code=response.code)
return response_data
|
gpl-3.0
|
salaria/odoo
|
addons/hr_payroll/res_config.py
|
441
|
1294
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class human_resources_configuration(osv.osv_memory):
_inherit = 'hr.config.settings'
_columns = {
'module_hr_payroll_account': fields.boolean('Link your payroll to accounting system',
help ="""Create journal entries from payslips"""),
}
|
agpl-3.0
|
uclouvain/osis
|
education_group/tests/ddd/factories/domain/co_graduation.py
|
1
|
1610
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import factory.fuzzy
from education_group.ddd.domain._co_graduation import CoGraduation
class CoGraduationFactory(factory.Factory):
class Meta:
model = CoGraduation
abstract = False
code_inter_cfb = factory.Sequence(lambda n: '%02d' % n)
coefficient = factory.fuzzy.FuzzyDecimal(0, 10, precision=1)
|
agpl-3.0
|
notmyname/swift
|
swift/proxy/controllers/account.py
|
4
|
8385
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import unquote
from swift import gettext_ as _
from swift.account.utils import account_listing_response
from swift.common.request_helpers import get_listing_content_type
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.utils import public
from swift.common.constraints import check_metadata
from swift.common import constraints
from swift.common.http import HTTP_NOT_FOUND, HTTP_GONE
from swift.proxy.controllers.base import Controller, clear_info_cache, \
set_info_cache
from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed
from swift.common.request_helpers import get_sys_meta_prefix
class AccountController(Controller):
"""WSGI controller for account requests"""
server_type = 'Account'
def __init__(self, app, account_name, **kwargs):
super(AccountController, self).__init__(app)
self.account_name = unquote(account_name)
if not self.app.allow_account_management:
self.allowed_methods.remove('PUT')
self.allowed_methods.remove('DELETE')
def add_acls_from_sys_metadata(self, resp):
if resp.environ['REQUEST_METHOD'] in ('HEAD', 'GET', 'PUT', 'POST'):
prefix = get_sys_meta_prefix('account') + 'core-'
name = 'access-control'
(extname, intname) = ('x-account-' + name, prefix + name)
acl_dict = parse_acl(version=2, data=resp.headers.pop(intname))
if acl_dict: # treat empty dict as empty header
resp.headers[extname] = format_acl(
version=2, acl_dict=acl_dict)
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
if len(self.account_name) > constraints.MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name),
constraints.MAX_ACCOUNT_NAME_LENGTH)
# Don't cache this. We know the account doesn't exist because
# the name is bad; we don't need to cache that because it's
# really cheap to recompute.
return resp
partition = self.app.account_ring.get_part(self.account_name)
concurrency = self.app.account_ring.replica_count \
if self.app.concurrent_gets else 1
node_iter = self.app.iter_nodes(self.app.account_ring, partition)
resp = self.GETorHEAD_base(
req, _('Account'), node_iter, partition,
req.swift_entity_path.rstrip('/'), concurrency)
if resp.status_int == HTTP_NOT_FOUND:
if resp.headers.get('X-Account-Status', '').lower() == 'deleted':
resp.status = HTTP_GONE
elif self.app.account_autocreate:
# This is kind of a lie; we pretend like the account is
# there, but it's not. We'll create it as soon as something
# tries to write to it, but we don't need databases on disk
# to tell us that nothing's there.
#
# We set a header so that certain consumers can tell it's a
# fake listing. The important one is the PUT of a container
# to an autocreate account; the proxy checks to see if the
# account exists before actually performing the PUT and
# creates the account if necessary. If we feed it a perfect
# lie, it'll just try to create the container without
# creating the account, and that'll fail.
resp = account_listing_response(self.account_name, req,
get_listing_content_type(req))
resp.headers['X-Backend-Fake-Account-Listing'] = 'yes'
# Cache this. We just made a request to a storage node and got
# up-to-date information for the account.
resp.headers['X-Backend-Recheck-Account-Existence'] = str(
self.app.recheck_account_existence)
set_info_cache(self.app, req.environ, self.account_name, None, resp)
if req.environ.get('swift_owner'):
self.add_acls_from_sys_metadata(resp)
else:
for header in self.app.swift_owner_headers:
resp.headers.pop(header, None)
return resp
@public
def PUT(self, req):
"""HTTP PUT request handler."""
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
error_response = check_metadata(req, 'account')
if error_response:
return error_response
if len(self.account_name) > constraints.MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name),
constraints.MAX_ACCOUNT_NAME_LENGTH)
return resp
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'PUT',
req.swift_entity_path, [headers] * len(accounts))
self.add_acls_from_sys_metadata(resp)
return resp
@public
def POST(self, req):
"""HTTP POST request handler."""
if len(self.account_name) > constraints.MAX_ACCOUNT_NAME_LENGTH:
resp = HTTPBadRequest(request=req)
resp.body = 'Account name length of %d longer than %d' % \
(len(self.account_name),
constraints.MAX_ACCOUNT_NAME_LENGTH)
return resp
error_response = check_metadata(req, 'account')
if error_response:
return error_response
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.swift_entity_path, [headers] * len(accounts))
if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate:
self.autocreate_account(req, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.swift_entity_path, [headers] * len(accounts))
self.add_acls_from_sys_metadata(resp)
return resp
@public
def DELETE(self, req):
"""HTTP DELETE request handler."""
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
if req.query_string:
return HTTPBadRequest(request=req)
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'DELETE',
req.swift_entity_path, [headers] * len(accounts))
return resp
|
apache-2.0
|
noelevans/sandpit
|
interview_challenges/koans/answer_template.py
|
1
|
4601
|
'''
Python questions to work on. To invoke with pytest use
$ python3 answer_template.py
or
$ python3 -m pytest answer_template.py
'''
import pytest
def inclusive_range(n):
# For n = 5, return [1, 2, 3, 4, 5]
return [1, 2, 3, 4, 5]
def test_inclusive_range():
assert list(inclusive_range(5)) == [1, 2, 3, 4, 5]
def average(ol):
# The mean for a series of numbers
pass
# def test_average():
# assert average([2, 2, 2, 3, 4]) == 2.6
def no_whitespace(t):
# Remove all whitespace from the start and end of the string
pass
# def test_no_whitespace():
# assert no_whitespace(' hello ') == 'hello'
def minus_to_plus(t):
# Replace all - symbols with + characters
pass
# def test_minus_to_plus():
# assert minus_to_plus('hello-world') == 'hello+world'
def sum_bar_last(a, b, c):
# a + b - c and then do it generically for arbitrary arguments
pass
# def test_sum_bar_last():
# assert sum_bar_last(4, 5, 6) == 3
# assert sum_bar_last(4, 5, 1, 6) == 4
def mirror(s):
# Turn a string around so 1st char is the last, 2nd becomes penultimate
pass
# def test_mirror():
# assert mirror('smart') == 'trams'
def odd_idxs(ol):
# For ol = [4, 5, 6, 7, 8, 9] return [5, 7, 9] the odd indicies of ol
pass
# def test_odd_idxs():
# assert odd_idxs([1, 2, 3, 4]) == [2, 4]
def pairs(ol):
# For a list, return a list of the items in pairs
# Eg [2, 3, 4, 5, 6, 7, 8, 9] -> [[2, 3], [4, 5], [6, 7], [8, 9]]
pass
# def test_pairs():
# assert list(pairs([0, 1, 2, 3, 4, 5])) == [[0, 1], [2, 3], [4, 5]]
# # Bonus
# assert list(pairs([0, 1, 2, 3, 4])) == [[0, 1], [2, 3], [4]]
def blank_count(ol):
# Count the amount of not True elements in ol
pass
# def test_blank_count():
# assert blank_count([7, 0, None, 1, 'hi', '', 88, 0]) == 4
def flatten(ol_of_ol):
# For [[1, 2, 3], [4, 5, 6], [7, 8]] -> [1, 2, 3, 4, 5, 6, 7, 8]
pass
# def test_flatten():
# assert flatten([[1, 2, 3], [4, 5], [6, 7]]) == [1, 2, 3, 4, 5, 6, 7]
def element_divisible_by_3(ol):
# Is there 1 or more elements divisible by 3 in the input
pass
# def test_element_divisible_by_3():
# assert element_divisible_by_3([1, 2, 4, 5]) == False
# assert element_divisible_by_3([1, 2, 6, 5]) == True
def most_common(ol):
# Return the most common element in the input list
pass
# def test_most_common():
# assert most_common([3, 3, 4, 4, 4, 4, 2]) == 4
def dict_reverse(d):
# For {'a': 3, 'b': 4, 'c': 9} -> {3: 'a', 4: 'b', 9: 'c'}
pass
# def test_dict_reverse():
# assert dict_reverse({'a': 3, 'b': 4, 'c': 9}) == {3: 'a', 4: 'b', 9: 'c'}
def atomic_weight(formula):
def weight(element='Na'):
import mendeleev
return getattr(mendeleev, element).atomic_weight
pass
# def test_atomic_weight():
# assert atomic_weight('NaCl') == pytest.approx(58.4, 0.01)
# assert atomic_weight('CCl4') == pytest.approx(153.8, 0.01)
# assert atomic_weight('H2O') == pytest.approx(18.0, 0.01)
# assert atomic_weight('H2SO4') == pytest.approx(98.1, 0.01)
# assert atomic_weight('C6H12COH') == pytest.approx(113.2, 0.01)
def sequences(max_len):
'''
For a given max_len return all combinations of ACGT, first of
length 1, then 2, until max_len is reached
eg.
3 -> ['A', 'C', 'G', 'T', 'AC', 'AG', 'AT', 'CG', 'CT', 'GT', 'ACG',
'ACT', 'AGT', 'CGT']
'''
pass
# def test_sequences():
# assert list(sequences(3)) == ['A', 'C', 'G', 'T', 'AC', 'AG', 'AT', 'CG',
# 'CT', 'GT', 'ACG', 'ACT', 'AGT', 'CGT']
def stock_prices(ticker):
prices = {
'2019-02-14': {
'MSFT': 106,
'GOOG': 1120,
},
'2019-02-15': {
'MSFT': 108,
'NFLX': 351,
},
'2019-02-18': {
'MSFT': 108,
'GOOG': 1119,
'NFLX': 352,
},
'2019-02-19': {
'MSFT': 109,
'GOOG': 1122,
'NFLX': 107,
},
}
# def test_stock_prices():
# assert set(stock_prices('MSFT')) == set([ 106, 108, 108, 109])
# assert list(stock_prices('MSFT')) == [ 106, 108, 108, 109]
# assert list(stock_prices('NFLX')) == [None, 351, 352, 107]
# assert list(stock_prices('GOOG')) == [1120, 1120, 1119, 1122]
def main():
import os
filename = os.path.basename(__file__)
pytest.main([filename])
if __name__ == '__main__':
main()
|
mit
|
tmpgit/intellij-community
|
python/helpers/coverage/summary.py
|
215
|
2972
|
"""Summary reporting"""
import sys
from coverage.report import Reporter
from coverage.results import Numbers
from coverage.misc import NotPython
class SummaryReporter(Reporter):
"""A reporter for writing the summary report."""
def __init__(self, coverage, config):
super(SummaryReporter, self).__init__(coverage, config)
self.branches = coverage.data.has_arcs()
def report(self, morfs, outfile=None):
"""Writes a report summarizing coverage statistics per module.
`outfile` is a file object to write the summary to.
"""
self.find_code_units(morfs)
# Prepare the formatting strings
max_name = max([len(cu.name) for cu in self.code_units] + [5])
fmt_name = "%%- %ds " % max_name
fmt_err = "%s %s: %s\n"
header = (fmt_name % "Name") + " Stmts Miss"
fmt_coverage = fmt_name + "%6d %6d"
if self.branches:
header += " Branch BrMiss"
fmt_coverage += " %6d %6d"
width100 = Numbers.pc_str_width()
header += "%*s" % (width100+4, "Cover")
fmt_coverage += "%%%ds%%%%" % (width100+3,)
if self.config.show_missing:
header += " Missing"
fmt_coverage += " %s"
rule = "-" * len(header) + "\n"
header += "\n"
fmt_coverage += "\n"
if not outfile:
outfile = sys.stdout
# Write the header
outfile.write(header)
outfile.write(rule)
total = Numbers()
for cu in self.code_units:
try:
analysis = self.coverage._analyze(cu)
nums = analysis.numbers
args = (cu.name, nums.n_statements, nums.n_missing)
if self.branches:
args += (nums.n_branches, nums.n_missing_branches)
args += (nums.pc_covered_str,)
if self.config.show_missing:
args += (analysis.missing_formatted(),)
outfile.write(fmt_coverage % args)
total += nums
except KeyboardInterrupt: # pragma: not covered
raise
except:
report_it = not self.config.ignore_errors
if report_it:
typ, msg = sys.exc_info()[:2]
if typ is NotPython and not cu.should_be_python():
report_it = False
if report_it:
outfile.write(fmt_err % (cu.name, typ.__name__, msg))
if total.n_files > 1:
outfile.write(rule)
args = ("TOTAL", total.n_statements, total.n_missing)
if self.branches:
args += (total.n_branches, total.n_missing_branches)
args += (total.pc_covered_str,)
if self.config.show_missing:
args += ("",)
outfile.write(fmt_coverage % args)
return total.pc_covered
|
apache-2.0
|
lemarcudal/sha_thedivision
|
test/Lib/encodings/uu_codec.py
|
383
|
3738
|
""" Python 'uu_codec' Codec - UU content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]). Some details were
adapted from uu.py which was written by Lance Ellinghouse and
modified by Jack Jansen and Fredrik Lundh.
"""
import codecs, binascii
### Codec APIs
def uu_encode(input,errors='strict',filename='<data>',mode=0666):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import b2a_uu
# using str() because of cStringIO's Unicode undesired Unicode behavior.
infile = StringIO(str(input))
outfile = StringIO()
read = infile.read
write = outfile.write
# Encode
write('begin %o %s\n' % (mode & 0777, filename))
chunk = read(45)
while chunk:
write(b2a_uu(chunk))
chunk = read(45)
write(' \nend\n')
return (outfile.getvalue(), len(input))
def uu_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
Note: filename and file mode information in the input data is
ignored.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import a2b_uu
infile = StringIO(str(input))
outfile = StringIO()
readline = infile.readline
write = outfile.write
# Find start of encoded data
while 1:
s = readline()
if not s:
raise ValueError, 'Missing "begin" line in input data'
if s[:5] == 'begin':
break
# Decode
while 1:
s = readline()
if not s or \
s == 'end\n':
break
try:
data = a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
data = a2b_uu(s[:nbytes])
#sys.stderr.write("Warning: %s\n" % str(v))
write(data)
if not s:
raise ValueError, 'Truncated input data'
return (outfile.getvalue(), len(input))
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return uu_encode(input,errors)
def decode(self,input,errors='strict'):
return uu_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return uu_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return uu_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='uu',
encode=uu_encode,
decode=uu_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
apache-2.0
|
mancoast/CPythonPyc_test
|
cpython/252_test_profile.py
|
19
|
3289
|
"""Test suite for the profile module."""
import profile, pstats, sys
# In order to have reproducible time, we simulate a timer in the global
# variable 'ticks', which represents simulated time in milliseconds.
# (We can't use a helper function increment the timer since it would be
# included in the profile and would appear to consume all the time.)
ticks = 0
# IMPORTANT: this is an output test. *ALL* NUMBERS in the expected
# output are relevant. If you change the formatting of pstats,
# please don't just regenerate output/test_profile without checking
# very carefully that not a single number has changed.
def test_main():
global ticks
ticks = 42000
prof = profile.Profile(timer)
prof.runctx("testfunc()", globals(), locals())
assert ticks == 43000, ticks
st = pstats.Stats(prof)
st.strip_dirs().sort_stats('stdname').print_stats()
st.print_callees()
st.print_callers()
def timer():
return ticks*0.001
def testfunc():
# 1 call
# 1000 ticks total: 270 ticks local, 730 ticks in subfunctions
global ticks
ticks += 99
helper() # 300
helper() # 300
ticks += 171
factorial(14) # 130
def factorial(n):
# 23 calls total
# 170 ticks total, 150 ticks local
# 3 primitive calls, 130, 20 and 20 ticks total
# including 116, 17, 17 ticks local
global ticks
if n > 0:
ticks += n
return mul(n, factorial(n-1))
else:
ticks += 11
return 1
def mul(a, b):
# 20 calls
# 1 tick, local
global ticks
ticks += 1
return a * b
def helper():
# 2 calls
# 300 ticks total: 20 ticks local, 260 ticks in subfunctions
global ticks
ticks += 1
helper1() # 30
ticks += 2
helper1() # 30
ticks += 6
helper2() # 50
ticks += 3
helper2() # 50
ticks += 2
helper2() # 50
ticks += 5
helper2_indirect() # 70
ticks += 1
def helper1():
# 4 calls
# 30 ticks total: 29 ticks local, 1 tick in subfunctions
global ticks
ticks += 10
hasattr(C(), "foo") # 1
ticks += 19
lst = []
lst.append(42) # 0
sys.exc_info() # 0
def helper2_indirect():
helper2() # 50
factorial(3) # 20
def helper2():
# 8 calls
# 50 ticks local: 39 ticks local, 11 ticks in subfunctions
global ticks
ticks += 11
hasattr(C(), "bar") # 1
ticks += 13
subhelper() # 10
ticks += 15
def subhelper():
# 8 calls
# 10 ticks total: 8 ticks local, 2 ticks in subfunctions
global ticks
ticks += 2
for i in range(2): # 0
try:
C().foo # 1 x 2
except AttributeError:
ticks += 3 # 3 x 2
class C:
def __getattr__(self, name):
# 28 calls
# 1 tick, local
global ticks
ticks += 1
raise AttributeError
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
mahak/ansible
|
test/units/parsing/test_dataloader.py
|
57
|
9955
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from units.compat import unittest
from units.compat.mock import patch, mock_open
from ansible.errors import AnsibleParserError, yaml_strings, AnsibleFileNotFound
from ansible.parsing.vault import AnsibleVaultError
from ansible.module_utils._text import to_text
from ansible.module_utils.six import PY3
from units.mock.vault_helper import TextVaultSecret
from ansible.parsing.dataloader import DataLoader
from units.mock.path import mock_unfrackpath_noop
class TestDataLoader(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
@patch('os.path.exists')
def test__is_role(self, p_exists):
p_exists.side_effect = lambda p: p == b'test_path/tasks/main.yml'
self.assertTrue(self._loader._is_role('test_path/tasks'))
self.assertTrue(self._loader._is_role('test_path/'))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_json_from_file(self, mock_def):
mock_def.return_value = (b"""{"a": 1, "b": 2, "c": 3}""", True)
output = self._loader.load_from_file('dummy_json.txt')
self.assertEqual(output, dict(a=1, b=2, c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_yaml_from_file(self, mock_def):
mock_def.return_value = (b"""
a: 1
b: 2
c: 3
""", True)
output = self._loader.load_from_file('dummy_yaml.txt')
self.assertEqual(output, dict(a=1, b=2, c=3))
@patch.object(DataLoader, '_get_file_contents')
def test_parse_fail_from_file(self, mock_def):
mock_def.return_value = (b"""
TEXT:
***
NOT VALID
""", True)
self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
@patch('ansible.errors.AnsibleError._get_error_lines_from_file')
@patch.object(DataLoader, '_get_file_contents')
def test_tab_error(self, mock_def, mock_get_error_lines):
mock_def.return_value = (u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""", True)
mock_get_error_lines.return_value = ('''\tblip: baz''', '''..foo: bar''')
with self.assertRaises(AnsibleParserError) as cm:
self._loader.load_from_file('dummy_yaml_text.txt')
self.assertIn(yaml_strings.YAML_COMMON_LEADING_TAB_ERROR, str(cm.exception))
self.assertIn('foo: bar', str(cm.exception))
@patch('ansible.parsing.dataloader.unfrackpath', mock_unfrackpath_noop)
@patch.object(DataLoader, '_is_role')
def test_path_dwim_relative(self, mock_is_role):
"""
simulate a nested dynamic include:
playbook.yml:
- hosts: localhost
roles:
- { role: 'testrole' }
testrole/tasks/main.yml:
- include: "include1.yml"
static: no
testrole/tasks/include1.yml:
- include: include2.yml
static: no
testrole/tasks/include2.yml:
- debug: msg="blah"
"""
mock_is_role.return_value = False
with patch('os.path.exists') as mock_os_path_exists:
mock_os_path_exists.return_value = False
self._loader.path_dwim_relative('/tmp/roles/testrole/tasks', 'tasks', 'included2.yml')
# Fetch first args for every call
# mock_os_path_exists.assert_any_call isn't used because os.path.normpath must be used in order to compare paths
called_args = [os.path.normpath(to_text(call[0][0])) for call in mock_os_path_exists.call_args_list]
# 'path_dwim_relative' docstrings say 'with or without explicitly named dirname subdirs':
self.assertIn('/tmp/roles/testrole/tasks/included2.yml', called_args)
self.assertIn('/tmp/roles/testrole/tasks/tasks/included2.yml', called_args)
# relative directories below are taken in account too:
self.assertIn('tasks/included2.yml', called_args)
self.assertIn('included2.yml', called_args)
def test_path_dwim_root(self):
self.assertEqual(self._loader.path_dwim('/'), '/')
def test_path_dwim_home(self):
self.assertEqual(self._loader.path_dwim('~'), os.path.expanduser('~'))
def test_path_dwim_tilde_slash(self):
self.assertEqual(self._loader.path_dwim('~/'), os.path.expanduser('~'))
def test_get_real_file(self):
self.assertEqual(self._loader.get_real_file(__file__), __file__)
def test_is_file(self):
self.assertTrue(self._loader.is_file(__file__))
def test_is_directory_positive(self):
self.assertTrue(self._loader.is_directory(os.path.dirname(__file__)))
def test_get_file_contents_none_path(self):
self.assertRaisesRegexp(AnsibleParserError, 'Invalid filename',
self._loader._get_file_contents, None)
def test_get_file_contents_non_existent_path(self):
self.assertRaises(AnsibleFileNotFound, self._loader._get_file_contents, '/non_existent_file')
class TestPathDwimRelativeDataLoader(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
def test_all_slash(self):
self.assertEqual(self._loader.path_dwim_relative('/', '/', '/'), '/')
def test_path_endswith_role(self):
self.assertEqual(self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='/'), '/')
def test_path_endswith_role_main_yml(self):
self.assertIn('main.yml', self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='main.yml'))
def test_path_endswith_role_source_tilde(self):
self.assertEqual(self._loader.path_dwim_relative(path='foo/bar/tasks/', dirname='/', source='~/'), os.path.expanduser('~'))
class TestPathDwimRelativeStackDataLoader(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
def test_none(self):
self.assertRaisesRegexp(AnsibleFileNotFound, 'on the Ansible Controller', self._loader.path_dwim_relative_stack, None, None, None)
def test_empty_strings(self):
self.assertEqual(self._loader.path_dwim_relative_stack('', '', ''), './')
def test_empty_lists(self):
self.assertEqual(self._loader.path_dwim_relative_stack([], '', '~/'), os.path.expanduser('~'))
def test_all_slash(self):
self.assertEqual(self._loader.path_dwim_relative_stack('/', '/', '/'), '/')
def test_path_endswith_role(self):
self.assertEqual(self._loader.path_dwim_relative_stack(paths=['foo/bar/tasks/'], dirname='/', source='/'), '/')
def test_path_endswith_role_source_tilde(self):
self.assertEqual(self._loader.path_dwim_relative_stack(paths=['foo/bar/tasks/'], dirname='/', source='~/'), os.path.expanduser('~'))
def test_path_endswith_role_source_main_yml(self):
self.assertRaises(AnsibleFileNotFound, self._loader.path_dwim_relative_stack, ['foo/bar/tasks/'], '/', 'main.yml')
def test_path_endswith_role_source_main_yml_source_in_dirname(self):
self.assertRaises(AnsibleFileNotFound, self._loader.path_dwim_relative_stack, 'foo/bar/tasks/', 'tasks', 'tasks/main.yml')
class TestDataLoaderWithVault(unittest.TestCase):
def setUp(self):
self._loader = DataLoader()
vault_secrets = [('default', TextVaultSecret('ansible'))]
self._loader.set_vault_secrets(vault_secrets)
self.test_vault_data_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'vault.yml')
def tearDown(self):
pass
def test_get_real_file_vault(self):
real_file_path = self._loader.get_real_file(self.test_vault_data_path)
self.assertTrue(os.path.exists(real_file_path))
def test_get_real_file_vault_no_vault(self):
self._loader.set_vault_secrets(None)
self.assertRaises(AnsibleParserError, self._loader.get_real_file, self.test_vault_data_path)
def test_get_real_file_vault_wrong_password(self):
wrong_vault = [('default', TextVaultSecret('wrong_password'))]
self._loader.set_vault_secrets(wrong_vault)
self.assertRaises(AnsibleVaultError, self._loader.get_real_file, self.test_vault_data_path)
def test_get_real_file_not_a_path(self):
self.assertRaisesRegexp(AnsibleParserError, 'Invalid filename', self._loader.get_real_file, None)
@patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
def test_parse_from_vault_1_1_file(self):
vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
33343734386261666161626433386662623039356366656637303939306563376130623138626165
6436333766346533353463636566313332623130383662340a393835656134633665333861393331
37666233346464636263636530626332623035633135363732623332313534306438393366323966
3135306561356164310a343937653834643433343734653137383339323330626437313562306630
3035
"""
if PY3:
builtins_name = 'builtins'
else:
builtins_name = '__builtin__'
with patch(builtins_name + '.open', mock_open(read_data=vaulted_data.encode('utf-8'))):
output = self._loader.load_from_file('dummy_vault.txt')
self.assertEqual(output, dict(foo='bar'))
|
gpl-3.0
|
caveman-dick/ansible
|
lib/ansible/modules/network/f5/bigip_irule.py
|
10
|
11191
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_irule
short_description: Manage iRules across different modules on a BIG-IP.
description:
- Manage iRules across different modules on a BIG-IP.
version_added: "2.2"
options:
content:
description:
- When used instead of 'src', sets the contents of an iRule directly to
the specified value. This is for simple values, but can be used with
lookup plugins for anything complex or with formatting. Either one
of C(src) or C(content) must be provided.
module:
description:
- The BIG-IP module to add the iRule to.
required: True
choices:
- ltm
- gtm
name:
description:
- The name of the iRule.
required: True
src:
description:
- The iRule file to interpret and upload to the BIG-IP. Either one
of C(src) or C(content) must be provided.
required: True
state:
description:
- Whether the iRule should exist or not.
default: present
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add the iRule contained in template irule.tcl to the LTM module
bigip_irule:
content: "{{ lookup('template', 'irule.tcl') }}"
module: "ltm"
name: "MyiRule"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Add the iRule contained in static file irule.tcl to the LTM module
bigip_irule:
module: "ltm"
name: "MyiRule"
password: "secret"
server: "lb.mydomain.com"
src: "irule.tcl"
state: "present"
user: "admin"
delegate_to: localhost
'''
RETURN = '''
module:
description: The module that the iRule was added to
returned: changed and success
type: string
sample: "gtm"
src:
description: The filename that included the iRule source
returned: changed and success, when provided
type: string
sample: "/opt/src/irules/example1.tcl"
content:
description: The content of the iRule that was managed
returned: changed and success
type: string
sample: "when LB_FAILED { set wipHost [LB::server addr] }"
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'apiAnonymous': 'content'
}
updatables = [
'content'
]
api_attributes = [
'apiAnonymous'
]
returnables = [
'content', 'src', 'module'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def content(self):
if self._values['content'] is None:
return None
return str(self._values['content']).strip()
@property
def src(self):
if self._values['src'] is None:
return None
return self._values['src']
@src.setter
def src(self, value):
if value:
self._values['src'] = value
with open(value) as f:
result = f.read()
self._values['content'] = result
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
if self.client.module.params['module'] == 'ltm':
manager = self.get_manager('ltm')
elif self.client.module.params['module'] == 'gtm':
manager = self.get_manager('gtm')
else:
raise F5ModuleError(
"An unknown iRule module type was specified"
)
return manager.exec_module()
def get_manager(self, type):
if type == 'ltm':
return LtmManager(self.client)
elif type == 'gtm':
return GtmManager(self.client)
class BaseManager(object):
def __init__(self, client):
self.client = client
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def present(self):
if not self.want.content and not self.want.src:
raise F5ModuleError(
"Either 'content' or 'src' must be provided"
)
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
if not self.exists():
raise F5ModuleError("Failed to create the iRule")
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iRule")
return True
class LtmManager(BaseManager):
def exists(self):
result = self.client.api.tm.ltm.rules.rule.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.ltm.rules.rule.load(
name=self.want.name,
partition=self.want.partition
)
resource.update(**params)
def create_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.ltm.rules.rule
resource.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def read_current_from_device(self):
resource = self.client.api.tm.ltm.rules.rule.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def remove_from_device(self):
resource = self.client.api.tm.ltm.rules.rule.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
class GtmManager(BaseManager):
def read_current_from_device(self):
resource = self.client.api.tm.gtm.rules.rule.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def remove_from_device(self):
resource = self.client.api.tm.gtm.rules.rule.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
def exists(self):
result = self.client.api.tm.gtm.rules.rule.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.gtm.rules.rule.load(
name=self.want.name,
partition=self.want.partition
)
resource.update(**params)
def create_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.gtm.rules.rule
resource.create(
name=self.want.name,
partition=self.want.partition,
**params
)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
content=dict(
required=False,
default=None
),
src=dict(
required=False,
default=None
),
name=dict(required=True),
module=dict(
required=True,
choices=['gtm', 'ltm']
)
)
self.mutually_exclusive = [
['content', 'src']
]
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
mutually_exclusive=spec.mutually_exclusive,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
gpl-3.0
|
georgemarshall/django
|
tests/gis_tests/geogapp/tests.py
|
42
|
6977
|
"""
Tests for geography support in PostGIS
"""
import os
from unittest import skipIf, skipUnless
from django.contrib.gis.db import models
from django.contrib.gis.db.models.functions import Area, Distance
from django.contrib.gis.measure import D
from django.db import NotSupportedError, connection
from django.db.models.functions import Cast
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from ..utils import FuncTestMixin, oracle, postgis, spatialite
from .models import City, County, Zipcode
class GeographyTest(TestCase):
fixtures = ['initial']
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
@skipIf(spatialite, "SpatiaLite doesn't support distance lookups with Distance objects.")
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test02_distance_lookup(self):
"Testing distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
@skipUnless(postgis, "This is a PostGIS-specific test")
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
with self.assertRaises(ValueError):
City.objects.filter(point__within=z.poly).count()
# `@` operator not available.
with self.assertRaises(ValueError):
City.objects.filter(point__contained=z.poly).count()
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
with self.assertRaises(ValueError):
City.objects.get(point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {
'name': 'Name',
'state': 'State',
'mpoly': 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
class GeographyFunctionTests(FuncTestMixin, TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("supports_extent_aggr")
def test_cast_aggregate(self):
"""
Cast a geography to a geometry field for an aggregate function that
expects a geometry input.
"""
if not connection.ops.geography:
self.skipTest("This test needs geography support")
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
res = City.objects.filter(
name__in=('Houston', 'Dallas')
).aggregate(extent=models.Extent(Cast('point', models.PointField())))
for val, exp in zip(res['extent'], expected):
self.assertAlmostEqual(exp, val, 4)
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_function(self):
"""
Testing Distance() support on non-point geography fields.
"""
if oracle:
ref_dists = [0, 4899.68, 8081.30, 9115.15]
elif spatialite:
# SpatiaLite returns non-zero distance for polygons and points
# covered by that polygon.
ref_dists = [326.61, 4899.68, 8081.30, 9115.15]
else:
ref_dists = [0, 4891.20, 8071.64, 9123.95]
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.annotate(
distance=Distance('poly', htown.point),
distance2=Distance(htown.point, 'poly'),
)
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance.m, ref, 2)
if postgis:
# PostGIS casts geography to geometry when distance2 is calculated.
ref_dists = [0, 4899.68, 8081.30, 9115.15]
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance2.m, ref, 2)
if not spatialite:
# Distance function combined with a lookup.
hzip = Zipcode.objects.get(code='77002')
self.assertEqual(qs.get(distance__lte=0), hzip)
@skipUnlessDBFeature("has_Area_function", "supports_area_geodetic")
def test_geography_area(self):
"""
Testing that Area calculations work on geography columns.
"""
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
# Round to the nearest thousand as possible values (depending on
# the database and geolib) include 5439084, 5439100, 5439101.
rounded_value = z.area.sq_m
rounded_value -= z.area.sq_m % 1000
self.assertEqual(rounded_value, 5439000)
@skipUnlessDBFeature("has_Area_function")
@skipIfDBFeature("supports_area_geodetic")
def test_geodetic_area_raises_if_not_supported(self):
with self.assertRaisesMessage(NotSupportedError, 'Area on geodetic coordinate systems not supported.'):
Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
|
bsd-3-clause
|
LaMi-/pmatic
|
examples/lowlevel_api/print_available_api_methods.py
|
2
|
1086
|
#!/usr/bin/env python
# encoding: utf-8
#
# pmatic - Python API for Homematic. Easy to use.
# Copyright (C) 2016 Lars Michelsen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import pmatic.api
# Print all methods including their arguments and description which is available on your device
pmatic.api.init(
address="http://192.168.1.26",
credentials=("Admin", "EPIC-SECRET-PW")).print_methods()
|
gpl-2.0
|
dkarakats/edx-platform
|
common/lib/xmodule/xmodule/crowdsource_hinter.py
|
177
|
17456
|
"""
Adds crowdsourced hinting functionality to lon-capa numerical response problems.
Currently experimental - not for instructor use, yet.
"""
import logging
import json
import random
import copy
from pkg_resources import resource_string
from lxml import etree
from xmodule.x_module import XModule, STUDENT_VIEW
from xmodule.raw_module import RawDescriptor
from xblock.fields import Scope, String, Integer, Boolean, Dict, List
from capa.responsetypes import FormulaResponse
from django.utils.html import escape
log = logging.getLogger(__name__)
class CrowdsourceHinterFields(object):
"""Defines fields for the crowdsource hinter module."""
has_children = True
moderate = String(help='String "True"/"False" - activates moderation', scope=Scope.content,
default='False')
debug = String(help='String "True"/"False" - allows multiple voting', scope=Scope.content,
default='False')
# Usage: hints[answer] = {str(pk): [hint_text, #votes]}
# hints is a dictionary that takes answer keys.
# Each value is itself a dictionary, accepting hint_pk strings as keys,
# and returning [hint text, #votes] pairs as values
hints = Dict(help='A dictionary containing all the active hints.', scope=Scope.content, default={})
mod_queue = Dict(help='A dictionary containing hints still awaiting approval', scope=Scope.content,
default={})
hint_pk = Integer(help='Used to index hints.', scope=Scope.content, default=0)
# A list of previous hints that a student viewed.
# Of the form [answer, [hint_pk_1, ...]] for each problem.
# Sorry about the variable name - I know it's confusing.
previous_answers = List(help='A list of hints viewed.', scope=Scope.user_state, default=[])
# user_submissions actually contains a list of previous answers submitted.
# (Originally, preivous_answers did this job, hence the name confusion.)
user_submissions = List(help='A list of previous submissions', scope=Scope.user_state, default=[])
user_voted = Boolean(help='Specifies if the user has voted on this problem or not.',
scope=Scope.user_state, default=False)
class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
"""
An Xmodule that makes crowdsourced hints.
Currently, only works on capa problems with exactly one numerical response,
and no other parts.
Example usage:
<crowdsource_hinter>
<problem blah blah />
</crowdsource_hinter>
XML attributes:
-moderate="True" will not display hints until staff approve them in the hint manager.
-debug="True" will let users vote as often as they want.
"""
icon_class = 'crowdsource_hinter'
css = {'scss': [resource_string(__name__, 'css/crowdsource_hinter/display.scss')]}
js = {'coffee': [resource_string(__name__, 'js/src/crowdsource_hinter/display.coffee')],
'js': []}
js_module_name = "Hinter"
def __init__(self, *args, **kwargs):
super(CrowdsourceHinterModule, self).__init__(*args, **kwargs)
# We need to know whether we are working with a FormulaResponse problem.
try:
responder = self.get_display_items()[0].lcp.responders.values()[0]
except (IndexError, AttributeError):
log.exception('Unable to find a capa problem child.')
return
self.is_formula = isinstance(self, FormulaResponse)
if self.is_formula:
self.answer_to_str = self.formula_answer_to_str
else:
self.answer_to_str = self.numerical_answer_to_str
# compare_answer is expected to return whether its two inputs are close enough
# to be equal, or raise a StudentInputError if one of the inputs is malformatted.
if hasattr(responder, 'compare_answer') and hasattr(responder, 'validate_answer'):
self.compare_answer = responder.compare_answer
self.validate_answer = responder.validate_answer
else:
# This response type is not supported!
log.exception('Response type not supported for hinting: ' + str(responder))
def get_html(self):
"""
Puts a wrapper around the problem html. This wrapper includes ajax urls of the
hinter and of the problem.
- Dependent on lon-capa problem.
"""
if self.debug == 'True':
# Reset the user vote, for debugging only!
self.user_voted = False
if self.hints == {}:
# Force self.hints to be written into the database. (When an xmodule is initialized,
# fields are not added to the db until explicitly changed at least once.)
self.hints = {}
try:
child = self.get_display_items()[0]
out = child.render(STUDENT_VIEW).content
# The event listener uses the ajax url to find the child.
child_id = child.id
except IndexError:
out = u"Error in loading crowdsourced hinter - can't find child problem."
child_id = ''
# Wrap the module in a <section>. This lets us pass data attributes to the javascript.
out += u'<section class="crowdsource-wrapper" data-url="{ajax_url}" data-child-id="{child_id}"> </section>'.format(
ajax_url=self.runtime.ajax_url,
child_id=child_id
)
return out
def numerical_answer_to_str(self, answer):
"""
Converts capa numerical answer format to a string representation
of the answer.
-Lon-capa dependent.
-Assumes that the problem only has one part.
"""
return str(answer.values()[0])
def formula_answer_to_str(self, answer):
"""
Converts capa formula answer into a string.
-Lon-capa dependent.
-Assumes that the problem only has one part.
"""
return str(answer.values()[0])
def get_matching_answers(self, answer):
"""
Look in self.hints, and find all answer keys that are "equal with tolerance"
to the input answer.
"""
return [key for key in self.hints if self.compare_answer(key, answer)]
def handle_ajax(self, dispatch, data):
"""
This is the landing method for AJAX calls.
"""
if dispatch == 'get_hint':
out = self.get_hint(data)
elif dispatch == 'get_feedback':
out = self.get_feedback(data)
elif dispatch == 'vote':
out = self.tally_vote(data)
elif dispatch == 'submit_hint':
out = self.submit_hint(data)
else:
return json.dumps({'contents': 'Error - invalid operation.'})
if out is None:
out = {'op': 'empty'}
elif 'error' in out:
# Error in processing.
out.update({'op': 'error'})
else:
out.update({'op': dispatch})
return json.dumps({'contents': self.runtime.render_template('hinter_display.html', out)})
def get_hint(self, data):
"""
The student got the incorrect answer found in data. Give him a hint.
Called by hinter javascript after a problem is graded as incorrect.
Args:
`data` -- must be interpretable by answer_to_str.
Output keys:
- 'hints' is a list of hint strings to show to the user.
- 'answer' is the parsed answer that was submitted.
Will record the user's wrong answer in user_submissions, and the hints shown
in previous_answers.
"""
# First, validate our inputs.
try:
answer = self.answer_to_str(data)
except (ValueError, AttributeError):
# Sometimes, we get an answer that's just not parsable. Do nothing.
log.exception('Answer not parsable: ' + str(data))
return
if not self.validate_answer(answer):
# Answer is not in the right form.
log.exception('Answer not valid: ' + str(answer))
return
if answer not in self.user_submissions:
self.user_submissions += [answer]
# For all answers similar enough to our own, accumulate all hints together.
# Also track the original answer of each hint.
matching_answers = self.get_matching_answers(answer)
matching_hints = {}
for matching_answer in matching_answers:
temp_dict = copy.deepcopy(self.hints[matching_answer])
for key, value in temp_dict.items():
# Each value now has hint, votes, matching_answer.
temp_dict[key] = value + [matching_answer]
matching_hints.update(temp_dict)
# matching_hints now maps pk's to lists of [hint, votes, matching_answer]
# Finally, randomly choose a subset of matching_hints to actually show.
if not matching_hints:
# No hints to give. Return.
return
# Get the top hint, plus two random hints.
n_hints = len(matching_hints)
hints = []
# max(dict) returns the maximum key in dict.
# The key function takes each pk, and returns the number of votes for the
# hint with that pk.
best_hint_index = max(matching_hints, key=lambda pk: matching_hints[pk][1])
hints.append(matching_hints[best_hint_index][0])
best_hint_answer = matching_hints[best_hint_index][2]
# The brackets surrounding the index are for backwards compatability purposes.
# (It used to be that each answer was paired with multiple hints in a list.)
self.previous_answers += [[best_hint_answer, [best_hint_index]]]
for _ in xrange(min(2, n_hints - 1)):
# Keep making random hints until we hit a target, or run out.
while True:
# random.choice randomly chooses an element from its input list.
# (We then unpack the item, in this case data for a hint.)
(hint_index, (rand_hint, _, hint_answer)) =\
random.choice(matching_hints.items())
if rand_hint not in hints:
break
hints.append(rand_hint)
self.previous_answers += [[hint_answer, [hint_index]]]
return {'hints': hints,
'answer': answer}
def get_feedback(self, data):
"""
The student got it correct. Ask him to vote on hints, or submit a hint.
Args:
`data` -- not actually used. (It is assumed that the answer is correct.)
Output keys:
- 'answer_to_hints': a nested dictionary.
answer_to_hints[answer][hint_pk] returns the text of the hint.
- 'user_submissions': the same thing as self.user_submissions. A list of
the answers that the user previously submitted.
"""
# The student got it right.
# Did he submit at least one wrong answer?
if len(self.user_submissions) == 0:
# No. Nothing to do here.
return
# Make a hint-voting interface for each wrong answer. The student will only
# be allowed to make one vote / submission, but he can choose which wrong answer
# he wants to look at.
answer_to_hints = {} # answer_to_hints[answer text][hint pk] -> hint text
# Go through each previous answer, and populate index_to_hints and index_to_answer.
for i in xrange(len(self.previous_answers)):
answer, hints_offered = self.previous_answers[i]
if answer not in answer_to_hints:
answer_to_hints[answer] = {}
if answer in self.hints:
# Go through each hint, and add to index_to_hints
for hint_id in hints_offered:
if (hint_id is not None) and (hint_id not in answer_to_hints[answer]):
try:
answer_to_hints[answer][hint_id] = self.hints[answer][str(hint_id)][0]
except KeyError:
# Sometimes, the hint that a user saw will have been deleted by the instructor.
continue
return {'answer_to_hints': answer_to_hints,
'user_submissions': self.user_submissions}
def tally_vote(self, data):
"""
Tally a user's vote on his favorite hint.
Args:
`data` -- expected to have the following keys:
'answer': text of answer we're voting on
'hint': hint_pk
'pk_list': A list of [answer, pk] pairs, each of which representing a hint.
We will return a list of how many votes each hint in the list has so far.
It's up to the browser to specify which hints to return vote counts for.
Returns key 'hint_and_votes', a list of (hint_text, #votes) pairs.
"""
if self.user_voted:
return {'error': 'Sorry, but you have already voted!'}
ans = data['answer']
if not self.validate_answer(ans):
# Uh oh. Invalid answer.
log.exception('Failure in hinter tally_vote: Unable to parse answer: {ans}'.format(ans=ans))
return {'error': 'Failure in voting!'}
hint_pk = str(data['hint'])
# We use temp_dict because we need to do a direct write for the database to update.
temp_dict = self.hints
try:
temp_dict[ans][hint_pk][1] += 1
except KeyError:
log.exception('''Failure in hinter tally_vote: User voted for non-existant hint:
Answer={ans} pk={hint_pk}'''.format(ans=ans, hint_pk=hint_pk))
return {'error': 'Failure in voting!'}
self.hints = temp_dict
# Don't let the user vote again!
self.user_voted = True
# Return a list of how many votes each hint got.
pk_list = json.loads(data['pk_list'])
hint_and_votes = []
for answer, vote_pk in pk_list:
if not self.validate_answer(answer):
log.exception('In hinter tally_vote, couldn\'t parse {ans}'.format(ans=answer))
continue
try:
hint_and_votes.append(temp_dict[answer][str(vote_pk)])
except KeyError:
log.exception('In hinter tally_vote, couldn\'t find: {ans}, {vote_pk}'.format(
ans=answer, vote_pk=str(vote_pk)))
hint_and_votes.sort(key=lambda pair: pair[1], reverse=True)
# Reset self.previous_answers and user_submissions.
self.previous_answers = []
self.user_submissions = []
return {'hint_and_votes': hint_and_votes}
def submit_hint(self, data):
"""
Take a hint submission and add it to the database.
Args:
`data` -- expected to have the following keys:
'answer': text of answer
'hint': text of the new hint that the user is adding
Returns a thank-you message.
"""
# Do html escaping. Perhaps in the future do profanity filtering, etc. as well.
hint = escape(data['hint'])
answer = data['answer']
if not self.validate_answer(answer):
log.exception('Failure in hinter submit_hint: Unable to parse answer: {ans}'.format(
ans=answer))
return {'error': 'Could not submit answer'}
# Only allow a student to vote or submit a hint once.
if self.user_voted:
return {'message': 'Sorry, but you have already voted!'}
# Add the new hint to self.hints or self.mod_queue. (Awkward because a direct write
# is necessary.)
if self.moderate == 'True':
temp_dict = self.mod_queue
else:
temp_dict = self.hints
if answer in temp_dict:
temp_dict[answer][str(self.hint_pk)] = [hint, 1] # With one vote (the user himself).
else:
temp_dict[answer] = {str(self.hint_pk): [hint, 1]}
self.hint_pk += 1
if self.moderate == 'True':
self.mod_queue = temp_dict
else:
self.hints = temp_dict
# Mark the user has having voted; reset previous_answers
self.user_voted = True
self.previous_answers = []
self.user_submissions = []
return {'message': 'Thank you for your hint!'}
class CrowdsourceHinterDescriptor(CrowdsourceHinterFields, RawDescriptor):
module_class = CrowdsourceHinterModule
stores_state = True
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing CrowdsourceHinter. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('crowdsource_hinter')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
|
agpl-3.0
|
gromez/Sick-Beard
|
sickbeard/clients/generic.py
|
30
|
7839
|
import re
import time
from hashlib import sha1
import sickbeard
from sickbeard import logger
from sickbeard.exceptions import ex
from sickbeard.clients import http_error_code
from lib.bencode import bencode, bdecode
from lib import requests
class GenericClient(object):
def __init__(self, name, host=None, username=None, password=None, custom_url=None):
self.name = name
self.username = sickbeard.TORRENT_USERNAME if username is None else username
self.password = sickbeard.TORRENT_PASSWORD if password is None else password
self.host = sickbeard.TORRENT_HOST if host is None else host
self.custom_url = sickbeard.TORRENT_CUSTOM_URL if custom_url is None else custom_url
self.url = None
self.response = None
self.auth = None
self.last_time = time.time()
self.session = requests.session(auth=(self.username, self.password),timeout=60)
def _request(self, method='get', params={}, data=None, files=None):
if time.time() > self.last_time + 1800 or not self.auth:
self.last_time = time.time()
self._get_auth()
logger.log(self.name + u': Requested a ' + method.upper() + ' connection to url '+ self.url + ' with Params= ' + str(params) + ' Data=' + str(data if data else 'None')[0:99] + ('...' if len(data if data else 'None') > 100 else ''), logger.DEBUG)
if not self.auth:
logger.log(self.name + u': Autenthication Failed' , logger.ERROR)
return False
try:
self.response = self.session.__getattribute__(method)(self.url, params=params, data=data, files=files)
except requests.exceptions.ConnectionError, e:
logger.log(self.name + u': Unable to connect ' +ex(e), logger.ERROR)
return False
except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
logger.log(self.name + u': Invalid Host', logger.ERROR)
return False
except requests.exceptions.HTTPError, e:
logger.log(self.name + u': Invalid HTTP Request ' + ex(e), logger.ERROR)
return False
except Exception, e:
logger.log(self.name + u': Unknown exception raised when send torrent to ' + self.name + ': ' + ex(e), logger.ERROR)
return False
if self.response.status_code == 401:
logger.log(self.name + u': Invalid Username or Password, check your config', logger.ERROR)
return False
if self.response.status_code in http_error_code.keys():
logger.log(self.name + u': ' + http_error_code[self.response.status_code], logger.DEBUG)
return False
logger.log(self.name + u': Response to '+ method.upper() + ' request is ' + self.response.text, logger.DEBUG)
return True
def _get_auth(self):
"""
This should be overridden and should return the auth_id needed for the client
"""
return None
def _add_torrent_uri(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is added via url (magnet or .torrent link)
"""
return False
def _add_torrent_file(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is added via result.content (only .torrent file)
"""
return False
def _set_torrent_label(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is set with label
"""
return True
def _set_torrent_ratio(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is set with ratio
"""
return True
def _set_torrent_path(self, torrent_path):
"""
This should be overridden should return the True/False from the client
when a torrent is set with path
"""
return True
def _set_torrent_pause(self, result):
"""
This should be overridden should return the True/False from the client
when a torrent is set with pause
"""
return True
def _get_torrent_hash(self, result):
if result.url.startswith('magnet'):
torrent_hash = re.findall('urn:btih:([\w]{32,40})', result.url)[0]
else:
if hasattr(result , 'extraInfo') and len(result.extraInfo)>0:
torrent_hash = result.extraInfo[0]
elif hasattr(result,'content') :
info = bdecode(result.content)["info"]
torrent_hash = sha1(bencode(info)).hexdigest()
else:
torrent_hash = result.url
return torrent_hash
def sendTORRENT(self, result):
r_code = False
logger.log(u'Calling ' + self.name + ' Client', logger.DEBUG)
if not self._get_auth():
logger.log(self.name + u': Autenthication Failed' , logger.ERROR)
return r_code
try:
result.hash = self._get_torrent_hash(result)
if hasattr(result,'content') and not result.url.startswith('magnet'):
r_code = self._add_torrent_file(result)
else:
r_code = self._add_torrent_uri(result)
if not self._set_torrent_pause(result):
logger.log(self.name + u': Unable to set the pause for Torrent', logger.ERROR)
if not self._set_torrent_label(result):
logger.log(self.name + u': Unable to set the label for Torrent', logger.ERROR)
if not self._set_torrent_ratio(result):
logger.log(self.name + u': Unable to set the ratio for Torrent', logger.ERROR)
if not self._set_torrent_path(result):
logger.log(self.name + u': Unable to set the path for Torrent', logger.ERROR)
except Exception, e:
logger.log(self.name + u': Failed Sending Torrent ', logger.DEBUG)
logger.log(self.name + u': Exception raised when sending torrent: ' + ex(e), logger.DEBUG)
return r_code
return r_code
def testAuthentication(self):
try:
self.response = self.session.get(self.url)
except requests.exceptions.ConnectionError:
return False, 'Error: ' + self.name + ' Connection Error'
except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL):
return False,'Error: Invalid ' + self.name + ' host'
if self.response.status_code == 401:
return False, 'Error: Invalid ' + self.name + ' Username or Password, check your config!'
try:
self._get_auth()
if self.response.status_code == 200 and self.auth:
return True, 'Success: Connected and Authenticated'
else:
return False, 'Error: Unable to get ' + self.name + ' Authentication, check your config!'
except Exception:
return False, 'Error: Unable to connect to '+ self.name
|
gpl-3.0
|
mlavin/django-lastfm-auth
|
setup.py
|
1
|
1157
|
import os
from setuptools import setup, find_packages
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
setup(
name='django-lastfm-auth',
version=__import__('lastfm_auth').__version__,
author='Mark Lavin',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
url='https://github.com/mlavin/django-lastfm-auth',
license='BSD',
description=u' '.join(__import__('lastfm_auth').__doc__.splitlines()).strip(),
install_requires=['django-social-auth>=0.3.3', ],
classifiers=[
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
],
long_description=read_file('README.rst'),
)
|
bsd-2-clause
|
sinhrks/scikit-learn
|
sklearn/utils/tests/test_shortest_path.py
|
303
|
2841
|
from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
|
bsd-3-clause
|
varunarya10/nova_test_latest
|
nova/api/openstack/compute/contrib/server_start_stop.py
|
51
|
3355
|
# Copyright 2012 Midokura Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova import objects
class ServerStartStopActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ServerStartStopActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.NotFound:
msg = _("Instance not found")
raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.action('os-start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
extensions.check_compute_policy(context, 'start', instance)
try:
self.compute_api.start(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'start', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
extensions.check_compute_policy(context, 'stop', instance)
try:
self.compute_api.stop(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'stop', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
class Server_start_stop(extensions.ExtensionDescriptor):
"""Start/Stop instance compute API support."""
name = "ServerStartStop"
alias = "os-server-start-stop"
namespace = "http://docs.openstack.org/compute/ext/servers/api/v1.1"
updated = "2012-01-23T00:00:00Z"
def get_controller_extensions(self):
controller = ServerStartStopActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
apache-2.0
|
rshipp/python-appassure
|
appassure/core/IExchangeManagement.py
|
1
|
2434
|
"""AppAssure 5 Core API"""
from appassure.api import AppAssureAPI
class IExchangeManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/IExchangeManagement
"""
def verifyCredentials(self, data, agentId):
"""Verifies credentials to Exchange instance. Throws
exception on validation failure.
"""
return self.session.request('exchange/agent/%s/verifyExchangeCredentials'
% (agentId), 'PUT',
self.getXML(data, 'baseCredentials'))
def getAgentExchangeServerSettings(self, agentId):
"""Gets the exchange server settings for the agent."""
return self.session.request('exchange/agents/%s/exchangeSettings'
% (agentId))
def setAgentExchangeServerSettings(self, data, agentId):
"""Sets the exchange server settings for the agent."""
return self.session.request('exchange/agents/%s/exchangeSettings'
% (agentId), 'PUT',
self.getXML(data, 'exchangeServerSettings'))
def forceChecksumCheck(self, recoveryPointId):
"""Forces checksum verification for the specified
recovery point.
"""
return self.session.request('exchange/checksumcheck/%s/force'
% (recoveryPointId), 'POST')
def getMountabilityQueueContents(self):
"""Gets the contents of the mountability queue."""
return self.session.request('exchange/entries')
def getMountabilityQueueEntry(self, entryid):
"""Gets the info for a specific moutability queue
entry.
"""
return self.session.request('exchange/entries/%s'
% (entryid))
def forceMountabilityCheck(self, recoveryPointId):
"""Forces mountability verification for the specified
recovery point.
"""
return self.session.request('exchange/mountabilitycheck/%s/force'
% (recoveryPointId), 'POST')
def getMountabilityQueueConfiguration(self):
"""Gets the configuration of the mountability queue."""
return self.session.request('exchange/mountabilityConfig')
def setMountabilityConfiguration(self, data):
"""Sets the configuration of the mountability queue."""
return self.session.request('exchange/mountabilityConfig', 'POST',
self.getXML(data, 'mountabilityConfiguration'))
|
bsd-3-clause
|
jonasjberg/autonameow
|
autonameow/vendor/unidecode/x0c1.py
|
253
|
4765
|
data = (
'syae', # 0x00
'syaeg', # 0x01
'syaegg', # 0x02
'syaegs', # 0x03
'syaen', # 0x04
'syaenj', # 0x05
'syaenh', # 0x06
'syaed', # 0x07
'syael', # 0x08
'syaelg', # 0x09
'syaelm', # 0x0a
'syaelb', # 0x0b
'syaels', # 0x0c
'syaelt', # 0x0d
'syaelp', # 0x0e
'syaelh', # 0x0f
'syaem', # 0x10
'syaeb', # 0x11
'syaebs', # 0x12
'syaes', # 0x13
'syaess', # 0x14
'syaeng', # 0x15
'syaej', # 0x16
'syaec', # 0x17
'syaek', # 0x18
'syaet', # 0x19
'syaep', # 0x1a
'syaeh', # 0x1b
'seo', # 0x1c
'seog', # 0x1d
'seogg', # 0x1e
'seogs', # 0x1f
'seon', # 0x20
'seonj', # 0x21
'seonh', # 0x22
'seod', # 0x23
'seol', # 0x24
'seolg', # 0x25
'seolm', # 0x26
'seolb', # 0x27
'seols', # 0x28
'seolt', # 0x29
'seolp', # 0x2a
'seolh', # 0x2b
'seom', # 0x2c
'seob', # 0x2d
'seobs', # 0x2e
'seos', # 0x2f
'seoss', # 0x30
'seong', # 0x31
'seoj', # 0x32
'seoc', # 0x33
'seok', # 0x34
'seot', # 0x35
'seop', # 0x36
'seoh', # 0x37
'se', # 0x38
'seg', # 0x39
'segg', # 0x3a
'segs', # 0x3b
'sen', # 0x3c
'senj', # 0x3d
'senh', # 0x3e
'sed', # 0x3f
'sel', # 0x40
'selg', # 0x41
'selm', # 0x42
'selb', # 0x43
'sels', # 0x44
'selt', # 0x45
'selp', # 0x46
'selh', # 0x47
'sem', # 0x48
'seb', # 0x49
'sebs', # 0x4a
'ses', # 0x4b
'sess', # 0x4c
'seng', # 0x4d
'sej', # 0x4e
'sec', # 0x4f
'sek', # 0x50
'set', # 0x51
'sep', # 0x52
'seh', # 0x53
'syeo', # 0x54
'syeog', # 0x55
'syeogg', # 0x56
'syeogs', # 0x57
'syeon', # 0x58
'syeonj', # 0x59
'syeonh', # 0x5a
'syeod', # 0x5b
'syeol', # 0x5c
'syeolg', # 0x5d
'syeolm', # 0x5e
'syeolb', # 0x5f
'syeols', # 0x60
'syeolt', # 0x61
'syeolp', # 0x62
'syeolh', # 0x63
'syeom', # 0x64
'syeob', # 0x65
'syeobs', # 0x66
'syeos', # 0x67
'syeoss', # 0x68
'syeong', # 0x69
'syeoj', # 0x6a
'syeoc', # 0x6b
'syeok', # 0x6c
'syeot', # 0x6d
'syeop', # 0x6e
'syeoh', # 0x6f
'sye', # 0x70
'syeg', # 0x71
'syegg', # 0x72
'syegs', # 0x73
'syen', # 0x74
'syenj', # 0x75
'syenh', # 0x76
'syed', # 0x77
'syel', # 0x78
'syelg', # 0x79
'syelm', # 0x7a
'syelb', # 0x7b
'syels', # 0x7c
'syelt', # 0x7d
'syelp', # 0x7e
'syelh', # 0x7f
'syem', # 0x80
'syeb', # 0x81
'syebs', # 0x82
'syes', # 0x83
'syess', # 0x84
'syeng', # 0x85
'syej', # 0x86
'syec', # 0x87
'syek', # 0x88
'syet', # 0x89
'syep', # 0x8a
'syeh', # 0x8b
'so', # 0x8c
'sog', # 0x8d
'sogg', # 0x8e
'sogs', # 0x8f
'son', # 0x90
'sonj', # 0x91
'sonh', # 0x92
'sod', # 0x93
'sol', # 0x94
'solg', # 0x95
'solm', # 0x96
'solb', # 0x97
'sols', # 0x98
'solt', # 0x99
'solp', # 0x9a
'solh', # 0x9b
'som', # 0x9c
'sob', # 0x9d
'sobs', # 0x9e
'sos', # 0x9f
'soss', # 0xa0
'song', # 0xa1
'soj', # 0xa2
'soc', # 0xa3
'sok', # 0xa4
'sot', # 0xa5
'sop', # 0xa6
'soh', # 0xa7
'swa', # 0xa8
'swag', # 0xa9
'swagg', # 0xaa
'swags', # 0xab
'swan', # 0xac
'swanj', # 0xad
'swanh', # 0xae
'swad', # 0xaf
'swal', # 0xb0
'swalg', # 0xb1
'swalm', # 0xb2
'swalb', # 0xb3
'swals', # 0xb4
'swalt', # 0xb5
'swalp', # 0xb6
'swalh', # 0xb7
'swam', # 0xb8
'swab', # 0xb9
'swabs', # 0xba
'swas', # 0xbb
'swass', # 0xbc
'swang', # 0xbd
'swaj', # 0xbe
'swac', # 0xbf
'swak', # 0xc0
'swat', # 0xc1
'swap', # 0xc2
'swah', # 0xc3
'swae', # 0xc4
'swaeg', # 0xc5
'swaegg', # 0xc6
'swaegs', # 0xc7
'swaen', # 0xc8
'swaenj', # 0xc9
'swaenh', # 0xca
'swaed', # 0xcb
'swael', # 0xcc
'swaelg', # 0xcd
'swaelm', # 0xce
'swaelb', # 0xcf
'swaels', # 0xd0
'swaelt', # 0xd1
'swaelp', # 0xd2
'swaelh', # 0xd3
'swaem', # 0xd4
'swaeb', # 0xd5
'swaebs', # 0xd6
'swaes', # 0xd7
'swaess', # 0xd8
'swaeng', # 0xd9
'swaej', # 0xda
'swaec', # 0xdb
'swaek', # 0xdc
'swaet', # 0xdd
'swaep', # 0xde
'swaeh', # 0xdf
'soe', # 0xe0
'soeg', # 0xe1
'soegg', # 0xe2
'soegs', # 0xe3
'soen', # 0xe4
'soenj', # 0xe5
'soenh', # 0xe6
'soed', # 0xe7
'soel', # 0xe8
'soelg', # 0xe9
'soelm', # 0xea
'soelb', # 0xeb
'soels', # 0xec
'soelt', # 0xed
'soelp', # 0xee
'soelh', # 0xef
'soem', # 0xf0
'soeb', # 0xf1
'soebs', # 0xf2
'soes', # 0xf3
'soess', # 0xf4
'soeng', # 0xf5
'soej', # 0xf6
'soec', # 0xf7
'soek', # 0xf8
'soet', # 0xf9
'soep', # 0xfa
'soeh', # 0xfb
'syo', # 0xfc
'syog', # 0xfd
'syogg', # 0xfe
'syogs', # 0xff
)
|
gpl-2.0
|
40223145c2g18/c2g18
|
exts/w2/static/Brython2.0.0-20140209-164925/Lib/xml/dom/expatbuilder.py
|
733
|
35733
|
"""Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
node.data = value
return
else:
node = minidom.Text()
node.data = data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
node.data = node.data + data
return
node = minidom.Text()
node.data = node.data + data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
a.value = value
a.ownerDocument = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"startContainer() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"acceptNode() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
a.value = uri
a.ownerDocument = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
node._ensure_attributes()
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
a.ownerDocument = self.document
a.value = value
a.ownerElement = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
|
gpl-2.0
|
ryandub/skew
|
skew/resources/aws/elasticache.py
|
3
|
1868
|
# Copyright (c) 2014 Scopely, Inc.
# Copyright (c) 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from skew.resources.aws import AWSResource
class Cluster(AWSResource):
class Meta(object):
service = 'elasticache'
type = 'cluster'
enum_spec = ('describe_cache_clusters',
'CacheClusters[]', None)
detail_spec = None
id = 'CacheClusterId'
filter_name = 'CacheClusterId'
filter_type = 'scalar'
name = 'CacheClusterId'
date = 'CacheClusterCreateTime'
dimension = 'CacheClusterId'
class SubnetGroup(AWSResource):
class Meta(object):
service = 'elasticache'
type = 'subnet-group'
enum_spec = ('describe_cache_subnet_groups',
'CacheSubnetGroups', None)
detail_spec = None
id = 'CacheSubnetGroupName'
filter_name = 'CacheSubnetGroupName'
filter_type = 'scalar'
name = 'CacheSubnetGroupName'
date = None
dimension = None
class Snapshot(AWSResource):
class Meta(object):
service = 'elasticache'
type = 'snapshot'
enum_spec = ('describe_snapshots', 'Snapshots', None)
detail_spec = None
id = 'SnapshotName'
filter_name = 'SnapshotName'
filter_type = 'scalar'
name = 'SnapshotName'
date = 'StartTime'
dimension = None
|
apache-2.0
|
waytai/odoo
|
addons/sale_analytic_plans/__openerp__.py
|
262
|
1634
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales Analytic Distribution',
'version': '1.0',
'category': 'Sales Management',
'description': """
The base module to manage analytic distribution and sales orders.
=================================================================
Using this module you will be able to link analytic accounts to sales orders.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'account_analytic_plans'],
'data': ['sale_analytic_plans_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Juniper/contrail-dev-neutron
|
neutron/tests/unit/mlnx/test_defaults.py
|
25
|
1508
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
#NOTE this import loads tests required options
from neutron.plugins.mlnx.common import config # noqa
from neutron.tests import base
class ConfigurationTest(base.BaseTestCase):
def test_defaults(self):
self.assertEqual(2,
cfg.CONF.AGENT.polling_interval)
self.assertEqual('vlan',
cfg.CONF.MLNX.tenant_network_type)
self.assertEqual(1,
len(cfg.CONF.MLNX.network_vlan_ranges))
self.assertEqual('eth',
cfg.CONF.MLNX.physical_network_type)
self.assertFalse(cfg.CONF.MLNX.physical_network_type_mappings)
self.assertEqual(0,
len(cfg.CONF.ESWITCH.
physical_interface_mappings))
self.assertEqual('tcp://127.0.0.1:60001',
cfg.CONF.ESWITCH.daemon_endpoint)
|
apache-2.0
|
underyx/ansible-modules-core
|
cloud/amazon/ec2_facts.py
|
46
|
6431
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_facts
short_description: Gathers facts about remote hosts within ec2 (aws)
version_added: "1.0"
options:
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
description:
- This module fetches data from the metadata servers in ec2 (aws) as per
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.
The module must be called from within the EC2 instance itself.
notes:
- Parameters to filter on ec2_facts may be added later.
author: "Silviu Dicu <[email protected]>"
'''
EXAMPLES = '''
# Conditional example
- name: Gather facts
action: ec2_facts
- name: Conditional
action: debug msg="This instance is a t1.micro"
when: ansible_ec2_instance_type == "t1.micro"
'''
import socket
import re
socket.setdefaulttimeout(5)
class Ec2Metadata(object):
ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
AWS_REGIONS = ('ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2',
'us-gov-west-1'
)
def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):
self.module = module
self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
self._data = {}
self._prefix = 'ansible_ec2_%s'
def _fetch(self, url):
(response, info) = fetch_url(self.module, url, force=True)
if response:
data = response.read()
else:
data = None
return data
def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):
new_fields = {}
for key, value in fields.iteritems():
split_fields = key[len(uri):].split('/')
if len(split_fields) > 1 and split_fields[1]:
new_key = "-".join(split_fields)
new_fields[self._prefix % new_key] = value
else:
new_key = "".join(split_fields)
new_fields[self._prefix % new_key] = value
for pattern in filter_patterns:
for key in new_fields.keys():
match = re.search(pattern, key)
if match:
new_fields.pop(key)
return new_fields
def fetch(self, uri, recurse=True):
raw_subfields = self._fetch(uri)
if not raw_subfields:
return
subfields = raw_subfields.split('\n')
for field in subfields:
if field.endswith('/') and recurse:
self.fetch(uri + field)
if uri.endswith('/'):
new_uri = uri + field
else:
new_uri = uri + '/' + field
if new_uri not in self._data and not new_uri.endswith('/'):
content = self._fetch(new_uri)
if field == 'security-groups':
sg_fields = ",".join(content.split('\n'))
self._data['%s' % (new_uri)] = sg_fields
else:
self._data['%s' % (new_uri)] = content
def fix_invalid_varnames(self, data):
"""Change ':'' and '-' to '_' to ensure valid template variable names"""
for (key, value) in data.items():
if ':' in key or '-' in key:
newkey = key.replace(':','_').replace('-','_')
del data[key]
data[newkey] = value
def add_ec2_region(self, data):
"""Use the 'ansible_ec2_placement_availability_zone' key/value
pair to add 'ansible_ec2_placement_region' key/value pair with
the EC2 region name.
"""
# Only add a 'ansible_ec2_placement_region' key if the
# 'ansible_ec2_placement_availability_zone' exists.
zone = data.get('ansible_ec2_placement_availability_zone')
if zone is not None:
# Use the zone name as the region name unless the zone
# name starts with a known AWS region name.
region = zone
for r in self.AWS_REGIONS:
if zone.startswith(r):
region = r
break
data['ansible_ec2_placement_region'] = region
def run(self):
self.fetch(self.uri_meta) # populate _data
data = self._mangle_fields(self._data, self.uri_meta)
data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
self.fix_invalid_varnames(data)
self.add_ec2_region(data)
return data
def main():
argument_spec = url_argument_spec()
module = AnsibleModule(
argument_spec = argument_spec,
supports_check_mode = True,
)
ec2_facts = Ec2Metadata(module).run()
ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
gpl-3.0
|
madratman/nuklei-code
|
contrib/scons/scons-local-2.0.1/SCons/compat/_scons_subprocess.py
|
183
|
44500
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() will raise CalledProcessError, if the called process
returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional stdin argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
try:
import threading
except ImportError:
# SCons: the threading module is only used by the communicate()
# method, which we don't actually use, so don't worry if we
# can't import it.
pass
import msvcrt
try:
# Try to get _subprocess
from _subprocess import *
class STARTUPINFO(object):
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes(object):
error = IOError
except ImportError:
# If not there, then drop back to requiring pywin32
# TODO: Should this be wrapped in try as well? To notify user to install
# pywin32 ? With URL to it?
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
import select
import errno
import fcntl
import pickle
try:
fcntl.F_GETFD
except AttributeError:
fcntl.F_GETFD = 1
try:
fcntl.F_SETFD
except AttributeError:
fcntl.F_SETFD = 2
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
MAXFD = 256
try:
isinstance(1, int)
except TypeError:
def is_int(obj):
return isinstance(obj, type(1))
def is_int_or_long(obj):
return type(obj) in (type(1), type(1L))
else:
def is_int(obj):
return isinstance(obj, int)
def is_int_or_long(obj):
return isinstance(obj, (int, long))
try:
types.StringTypes
except AttributeError:
try:
types.StringTypes = (str, unicode)
except NameError:
types.StringTypes = (str,)
def is_string(obj):
return isinstance(obj, types.StringTypes)
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxsize) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return apply(Popen, popenargs, kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not is_int_or_long(bufsize):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif is_int(stdin):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif is_int(stdout):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags = creationflags | CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif is_int(stdin):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif is_int(stdout):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in range(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if is_string(args):
args = [args]
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
try:
set
except NameError:
# Fall-back for earlier Python versions, so epydoc
# can use this module directly to execute things.
if p2cread:
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread,):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite):
os.close(errwrite)
else:
for fd in set((p2cread, c2pwrite, errwrite))-set((0,1,2)):
if fd: os.close(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
m = memoryview(input)[input_offset:input_offset+512]
bytes_written = os.write(self.stdin.fileno(), m)
input_offset = input_offset + bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
sys.stderr.write( "Gosh. No error.\n" )
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-3.0
|
syphar/django
|
tests/template_tests/filter_tests/test_date.py
|
94
|
3011
|
from datetime import datetime, time
from django.template.defaultfilters import date
from django.test import SimpleTestCase, override_settings
from django.utils import timezone, translation
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class DateTests(TimezoneTestCase):
@setup({'date01': '{{ d|date:"m" }}'})
def test_date01(self):
output = self.engine.render_to_string('date01', {'d': datetime(2008, 1, 1)})
self.assertEqual(output, '01')
@setup({'date02': '{{ d|date }}'})
def test_date02(self):
output = self.engine.render_to_string('date02', {'d': datetime(2008, 1, 1)})
self.assertEqual(output, 'Jan. 1, 2008')
@override_settings(USE_L10N=True)
@setup({'date02_l10n': '{{ d|date }}'})
def test_date02_l10n(self):
"""
Without arg and when USE_L10N is True, the active language's DATE_FORMAT
is used.
"""
with translation.override('fr'):
output = self.engine.render_to_string('date02_l10n', {'d': datetime(2008, 1, 1)})
self.assertEqual(output, '1 janvier 2008')
@setup({'date03': '{{ d|date:"m" }}'})
def test_date03(self):
"""
#9520: Make sure |date doesn't blow up on non-dates
"""
output = self.engine.render_to_string('date03', {'d': 'fail_string'})
self.assertEqual(output, '')
# ISO date formats
@setup({'date04': '{{ d|date:"o" }}'})
def test_date04(self):
output = self.engine.render_to_string('date04', {'d': datetime(2008, 12, 29)})
self.assertEqual(output, '2009')
@setup({'date05': '{{ d|date:"o" }}'})
def test_date05(self):
output = self.engine.render_to_string('date05', {'d': datetime(2010, 1, 3)})
self.assertEqual(output, '2009')
# Timezone name
@setup({'date06': '{{ d|date:"e" }}'})
def test_date06(self):
output = self.engine.render_to_string(
'date06', {'d': datetime(2009, 3, 12, tzinfo=timezone.get_fixed_timezone(30))}
)
self.assertEqual(output, '+0030')
@setup({'date07': '{{ d|date:"e" }}'})
def test_date07(self):
output = self.engine.render_to_string('date07', {'d': datetime(2009, 3, 12)})
self.assertEqual(output, '')
# #19370: Make sure |date doesn't blow up on a midnight time object
@setup({'date08': '{{ t|date:"H:i" }}'})
def test_date08(self):
output = self.engine.render_to_string('date08', {'t': time(0, 1)})
self.assertEqual(output, '00:01')
@setup({'date09': '{{ t|date:"H:i" }}'})
def test_date09(self):
output = self.engine.render_to_string('date09', {'t': time(0, 0)})
self.assertEqual(output, '00:00')
class FunctionTests(SimpleTestCase):
def test_date(self):
self.assertEqual(date(datetime(2005, 12, 29), "d F Y"), '29 December 2005')
def test_escape_characters(self):
self.assertEqual(date(datetime(2005, 12, 29), r'jS \o\f F'), '29th of December')
|
bsd-3-clause
|
laperry1/android_external_chromium_org
|
build/android/pylib/flag_changer.py
|
46
|
5712
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import pylib.android_commands
import pylib.device.device_utils
class FlagChanger(object):
"""Changes the flags Chrome runs with.
There are two different use cases for this file:
* Flags are permanently set by calling Set().
* Flags can be temporarily set for a particular set of unit tests. These
tests should call Restore() to revert the flags to their original state
once the tests have completed.
"""
def __init__(self, device, cmdline_file):
"""Initializes the FlagChanger and records the original arguments.
Args:
device: A DeviceUtils instance.
cmdline_file: Path to the command line file on the device.
"""
# TODO(jbudorick) Remove once telemetry switches over.
if isinstance(device, pylib.android_commands.AndroidCommands):
device = pylib.device.device_utils.DeviceUtils(device)
self._device = device
self._cmdline_file = cmdline_file
# Save the original flags.
self._orig_line = self._device.ReadFile(self._cmdline_file)
if self._orig_line:
self._orig_line = self._orig_line[0].strip()
# Parse out the flags into a list to facilitate adding and removing flags.
self._current_flags = self._TokenizeFlags(self._orig_line)
def Get(self):
"""Returns list of current flags."""
return self._current_flags
def Set(self, flags):
"""Replaces all flags on the current command line with the flags given.
Args:
flags: A list of flags to set, eg. ['--single-process'].
"""
if flags:
assert flags[0] != 'chrome'
self._current_flags = flags
self._UpdateCommandLineFile()
def AddFlags(self, flags):
"""Appends flags to the command line if they aren't already there.
Args:
flags: A list of flags to add on, eg. ['--single-process'].
"""
if flags:
assert flags[0] != 'chrome'
# Avoid appending flags that are already present.
for flag in flags:
if flag not in self._current_flags:
self._current_flags.append(flag)
self._UpdateCommandLineFile()
def RemoveFlags(self, flags):
"""Removes flags from the command line, if they exist.
Args:
flags: A list of flags to remove, eg. ['--single-process']. Note that we
expect a complete match when removing flags; if you want to remove
a switch with a value, you must use the exact string used to add
it in the first place.
"""
if flags:
assert flags[0] != 'chrome'
for flag in flags:
if flag in self._current_flags:
self._current_flags.remove(flag)
self._UpdateCommandLineFile()
def Restore(self):
"""Restores the flags to their original state."""
self._current_flags = self._TokenizeFlags(self._orig_line)
self._UpdateCommandLineFile()
def _UpdateCommandLineFile(self):
"""Writes out the command line to the file, or removes it if empty."""
logging.info('Current flags: %s', self._current_flags)
# Root is not required to write to /data/local/tmp/.
use_root = '/data/local/tmp/' not in self._cmdline_file
if self._current_flags:
# The first command line argument doesn't matter as we are not actually
# launching the chrome executable using this command line.
cmd_line = ' '.join(['_'] + self._current_flags)
self._device.WriteFile(
self._cmdline_file, cmd_line, as_root=use_root)
file_contents = self._device.ReadFile(
self._cmdline_file, as_root=use_root)
assert len(file_contents) == 1 and file_contents[0] == cmd_line, (
'Failed to set the command line file at %s' % self._cmdline_file)
else:
self._device.RunShellCommand('rm ' + self._cmdline_file,
as_root=use_root)
assert not self._device.FileExists(self._cmdline_file), (
'Failed to remove the command line file at %s' % self._cmdline_file)
@staticmethod
def _TokenizeFlags(line):
"""Changes the string containing the command line into a list of flags.
Follows similar logic to CommandLine.java::tokenizeQuotedArguments:
* Flags are split using whitespace, unless the whitespace is within a
pair of quotation marks.
* Unlike the Java version, we keep the quotation marks around switch
values since we need them to re-create the file when new flags are
appended.
Args:
line: A string containing the entire command line. The first token is
assumed to be the program name.
"""
if not line:
return []
tokenized_flags = []
current_flag = ""
within_quotations = False
# Move through the string character by character and build up each flag
# along the way.
for c in line.strip():
if c is '"':
if len(current_flag) > 0 and current_flag[-1] == '\\':
# Last char was a backslash; pop it, and treat this " as a literal.
current_flag = current_flag[0:-1] + '"'
else:
within_quotations = not within_quotations
current_flag += c
elif not within_quotations and (c is ' ' or c is '\t'):
if current_flag is not "":
tokenized_flags.append(current_flag)
current_flag = ""
else:
current_flag += c
# Tack on the last flag.
if not current_flag:
if within_quotations:
logging.warn('Unterminated quoted argument: ' + line)
else:
tokenized_flags.append(current_flag)
# Return everything but the program name.
return tokenized_flags[1:]
|
bsd-3-clause
|
traveloka/ansible
|
contrib/inventory/consul_io.py
|
7
|
17349
|
#!/usr/bin/env python
#
# (c) 2015, Steve Gargan <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Consul.io inventory script (http://consul.io)
======================================
Generates Ansible inventory from nodes in a Consul cluster. This script will
group nodes by:
- datacenter,
- registered service
- service tags
- service status
- values from the k/v store
This script can be run with the switches
--list as expected groups all the nodes in all datacenters
--datacenter, to restrict the nodes to a single datacenter
--host to restrict the inventory to a single named node. (requires datacenter config)
The configuration for this plugin is read from a consul.ini file located in the
same directory as this inventory script. All config options in the config file
are optional except the host and port, which must point to a valid agent or
server running the http api. For more information on enabling the endpoint see.
http://www.consul.io/docs/agent/options.html
Other options include:
'datacenter':
which restricts the included nodes to those from the given datacenter
'url':
the URL of the Consul cluster. host, port and scheme are derived from the
URL. If not specified, connection configuration defaults to http requests
to localhost on port 8500.
'domain':
if specified then the inventory will generate domain names that will resolve
via Consul's inbuilt DNS. The name is derived from the node name, datacenter
and domain <node_name>.node.<datacenter>.<domain>. Note that you will need to
have consul hooked into your DNS server for these to resolve. See the consul
DNS docs for more info.
which restricts the included nodes to those from the given datacenter
'servers_suffix':
defining the a suffix to add to the service name when creating the service
group. e.g Service name of 'redis' and a suffix of '_servers' will add
each nodes address to the group name 'redis_servers'. No suffix is added
if this is not set
'tags':
boolean flag defining if service tags should be used to create Inventory
groups e.g. an nginx service with the tags ['master', 'v1'] will create
groups nginx_master and nginx_v1 to which the node running the service
will be added. No tag groups are created if this is missing.
'token':
ACL token to use to authorize access to the key value store. May be required
to retrieve the kv_groups and kv_metadata based on your consul configuration.
'kv_groups':
This is used to lookup groups for a node in the key value store. It specifies a
path to which each discovered node's name will be added to create a key to query
the key/value store. There it expects to find a comma separated list of group
names to which the node should be added e.g. if the inventory contains node
'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key
'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query
returned 'test,honeypot' then the node address to both groups.
'kv_metadata':
kv_metadata is used to lookup metadata for each discovered node. Like kv_groups
above it is used to build a path to lookup in the kv store where it expects to
find a json dictionary of metadata entries. If found, each key/value pair in the
dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter
'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key
'ansible/groups/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}'
'availability':
if true then availability groups will be created for each service. The node will
be added to one of the groups based on the health status of the service. The
group name is derived from the service name and the configurable availability
suffixes
'available_suffix':
suffix that should be appended to the service availability groups for available
services e.g. if the suffix is '_up' and the service is nginx, then nodes with
healthy nginx services will be added to the nginix_up group. Defaults to
'_available'
'unavailable_suffix':
as above but for unhealthy services, defaults to '_unavailable'
Note that if the inventory discovers an 'ssh' service running on a node it will
register the port as ansible_ssh_port in the node's metadata and this port will
be used to access the machine.
```
'''
import os
import re
import argparse
import sys
from time import time
import sys
import ConfigParser
import urllib, urllib2, base64
def get_log_filename():
tty_filename = '/dev/tty'
stdout_filename = '/dev/stdout'
if not os.path.exists(tty_filename):
return stdout_filename
if not os.access(tty_filename, os.W_OK):
return stdout_filename
if os.getenv('TEAMCITY_VERSION'):
return stdout_filename
return tty_filename
def setup_logging():
filename = get_log_filename()
import logging.config
logging.config.dictConfig({
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
},
},
'root': {
'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'),
'handlers': ['console'],
},
'handlers': {
'console': {
'class': 'logging.FileHandler',
'filename': filename,
'formatter': 'simple',
},
},
'loggers': {
'iso8601': {
'qualname': 'iso8601',
'level': 'INFO',
},
},
})
logger = logging.getLogger('consul_io.py')
logger.debug('Invoked with %r', sys.argv)
if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'):
setup_logging()
try:
import json
except ImportError:
import simplejson as json
try:
import consul
except ImportError as e:
sys.exit("""failed=True msg='python-consul required for this module.
See http://python-consul.readthedocs.org/en/latest/#installation'""")
from six import iteritems
class ConsulInventory(object):
def __init__(self):
''' Create an inventory based on the catalog of nodes and services
registered in a consul cluster'''
self.node_metadata = {}
self.nodes = {}
self.nodes_by_service = {}
self.nodes_by_tag = {}
self.nodes_by_datacenter = {}
self.nodes_by_kv = {}
self.nodes_by_availability = {}
self.current_dc = None
config = ConsulConfig()
self.config = config
self.consul_api = config.get_consul_api()
if config.has_config('datacenter'):
if config.has_config('host'):
self.load_data_for_node(config.host, config.datacenter)
else:
self.load_data_for_datacenter(config.datacenter)
else:
self.load_all_data_consul()
self.combine_all_results()
print(json.dumps(self.inventory, sort_keys=True, indent=2))
def load_all_data_consul(self):
''' cycle through each of the datacenters in the consul catalog and process
the nodes in each '''
self.datacenters = self.consul_api.catalog.datacenters()
for datacenter in self.datacenters:
self.current_dc = datacenter
self.load_data_for_datacenter(datacenter)
def load_availability_groups(self, node, datacenter):
'''check the health of each service on a node and add add the node to either
an 'available' or 'unavailable' grouping. The suffix for each group can be
controlled from the config'''
if self.config.has_config('availability'):
for service_name, service in iteritems(node['Services']):
for node in self.consul_api.health.service(service_name)[1]:
for check in node['Checks']:
if check['ServiceName'] == service_name:
ok = 'passing' == check['Status']
if ok:
suffix = self.config.get_availability_suffix(
'available_suffix', '_available')
else:
suffix = self.config.get_availability_suffix(
'unavailable_suffix', '_unavailable')
self.add_node_to_map(self.nodes_by_availability,
service_name + suffix, node['Node'])
def load_data_for_datacenter(self, datacenter):
'''processes all the nodes in a particular datacenter'''
index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
for node in nodes:
self.add_node_to_map(self.nodes_by_datacenter, datacenter, node)
self.load_data_for_node(node['Node'], datacenter)
def load_data_for_node(self, node, datacenter):
'''loads the data for a sinle node adding it to various groups based on
metadata retrieved from the kv store and service availability'''
index, node_data = self.consul_api.catalog.node(node, dc=datacenter)
node = node_data['Node']
self.add_node_to_map(self.nodes, 'all', node)
self.add_metadata(node_data, "consul_datacenter", datacenter)
self.add_metadata(node_data, "consul_nodename", node['Node'])
self.load_groups_from_kv(node_data)
self.load_node_metadata_from_kv(node_data)
self.load_availability_groups(node_data, datacenter)
for name, service in node_data['Services'].items():
self.load_data_from_service(name, service, node_data)
def load_node_metadata_from_kv(self, node_data):
''' load the json dict at the metadata path defined by the kv_metadata value
and the node name add each entry in the dictionary to the the node's
metadata '''
node = node_data['Node']
if self.config.has_config('kv_metadata'):
key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
index, metadata = self.consul_api.kv.get(key)
if metadata and metadata['Value']:
try:
metadata = json.loads(metadata['Value'])
for k,v in metadata.items():
self.add_metadata(node_data, k, v)
except:
pass
def load_groups_from_kv(self, node_data):
''' load the comma separated list of groups at the path defined by the
kv_groups config value and the node name add the node address to each
group found '''
node = node_data['Node']
if self.config.has_config('kv_groups'):
key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
index, groups = self.consul_api.kv.get(key)
if groups and groups['Value']:
for group in groups['Value'].split(','):
self.add_node_to_map(self.nodes_by_kv, group.strip(), node)
def load_data_from_service(self, service_name, service, node_data):
'''process a service registered on a node, adding the node to a group with
the service name. Each service tag is extracted and the node is added to a
tag grouping also'''
self.add_metadata(node_data, "consul_services", service_name, True)
if self.is_service("ssh", service_name):
self.add_metadata(node_data, "ansible_ssh_port", service['Port'])
if self.config.has_config('servers_suffix'):
service_name = service_name + self.config.servers_suffix
self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node'])
self.extract_groups_from_tags(service_name, service, node_data)
def is_service(self, target, name):
return name and (name.lower() == target.lower())
def extract_groups_from_tags(self, service_name, service, node_data):
'''iterates each service tag and adds the node to groups derived from the
service and tag names e.g. nginx_master'''
if self.config.has_config('tags') and service['Tags']:
tags = service['Tags']
self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
for tag in service['Tags']:
tagname = service_name +'_'+tag
self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
def combine_all_results(self):
'''prunes and sorts all groupings for combination into the final map'''
self.inventory = {"_meta": { "hostvars" : self.node_metadata}}
groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
for grouping in groupings:
for name, addresses in grouping.items():
self.inventory[name] = sorted(list(set(addresses)))
def add_metadata(self, node_data, key, value, is_list = False):
''' Pushed an element onto a metadata dict for the node, creating
the dict if it doesn't exist '''
key = self.to_safe(key)
node = self.get_inventory_name(node_data['Node'])
if node in self.node_metadata:
metadata = self.node_metadata[node]
else:
metadata = {}
self.node_metadata[node] = metadata
if is_list:
self.push(metadata, key, value)
else:
metadata[key] = value
def get_inventory_name(self, node_data):
'''return the ip or a node name that can be looked up in consul's dns'''
domain = self.config.domain
if domain:
node_name = node_data['Node']
if self.current_dc:
return '%s.node.%s.%s' % ( node_name, self.current_dc, domain)
else:
return '%s.node.%s' % ( node_name, domain)
else:
return node_data['Address']
def add_node_to_map(self, map, name, node):
self.push(map, name, self.get_inventory_name(node))
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the
dict '''
key = self.to_safe(key)
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used
as Ansible groups '''
return re.sub('[^A-Za-z0-9\-\.]', '_', word)
def sanitize_dict(self, d):
new_dict = {}
for k, v in d.items():
if v != None:
new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
return new_dict
def sanitize_list(self, seq):
new_seq = []
for d in seq:
new_seq.append(self.sanitize_dict(d))
return new_seq
class ConsulConfig(dict):
def __init__(self):
self.read_settings()
self.read_cli_args()
def has_config(self, name):
if hasattr(self, name):
return getattr(self, name)
else:
return False
def read_settings(self):
''' Reads the settings from the consul.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini')
config_options = ['host', 'token', 'datacenter', 'servers_suffix',
'tags', 'kv_metadata', 'kv_groups', 'availability',
'unavailable_suffix', 'available_suffix', 'url',
'domain']
for option in config_options:
value = None
if config.has_option('consul', option):
value = config.get('consul', option)
setattr(self, option, value)
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description=
'Produce an Ansible Inventory file based nodes in a Consul cluster')
parser.add_argument('--list', action='store_true',
help='Get all inventory variables from all nodes in the consul cluster')
parser.add_argument('--host', action='store',
help='Get all inventory variables about a specific consul node, \
requires datacenter set in consul.ini.')
parser.add_argument('--datacenter', action='store',
help='Get all inventory about a specific consul datacenter')
args = parser.parse_args()
arg_names = ['host', 'datacenter']
for arg in arg_names:
if getattr(args, arg):
setattr(self, arg, getattr(args, arg))
def get_availability_suffix(self, suffix, default):
if self.has_config(suffix):
return self.has_config(suffix)
return default
def get_consul_api(self):
'''get an instance of the api based on the supplied configuration'''
host = 'localhost'
port = 8500
token = None
scheme = 'http'
if hasattr(self, 'url'):
from urlparse import urlparse
o = urlparse(self.url)
if o.hostname:
host = o.hostname
if o.port:
port = o.port
if o.scheme:
scheme = o.scheme
if hasattr(self, 'token'):
token = self.token
if not token:
token = 'anonymous'
return consul.Consul(host=host, port=port, token=token, scheme=scheme)
ConsulInventory()
|
gpl-3.0
|
acsone/knowledge
|
document_reindex/models/__init__.py
|
12
|
1051
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import ir_attachment
from . import knowledge_config_settings
|
agpl-3.0
|
slisson/intellij-community
|
python/helpers/pydev/_pydev_getopt.py
|
108
|
4458
|
#=======================================================================================================================
# getopt code copied since gnu_getopt is not available on jython 2.1
#=======================================================================================================================
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
def gnu_getopt(args, shortopts, longopts=[]):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if type('') == type(longopts):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
all_options_first = False
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i + 1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i + 1)
raise GetoptError('option -%s not recognized' % opt, opt)
#=======================================================================================================================
# End getopt code
#=======================================================================================================================
|
apache-2.0
|
uiri/pxqz
|
venv/lib/python2.7/site-packages/django/utils/http.py
|
26
|
7878
|
import calendar
import datetime
import re
import sys
import urllib
import urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_unicode(urllib.unquote(smart_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, unicode)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_unicode(urllib.unquote_plus(smart_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an floating point number expressed in seconds since the epoch, in
UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int.
if value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if not 0 <= i <= sys.maxint:
raise ValueError("Base36 conversion input too large or incorrect type.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
if sys.version_info >= (2, 6):
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
else:
# Python 2.5 compatibility. This actually works for Python 2.6 and above,
# but the above definition is much more obviously correct and so is
# preferred going forward.
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
|
gpl-3.0
|
Jidgdoi/PacmanPy
|
src/Main.py
|
1
|
4832
|
# -*- coding:utf-8 -*-
# Cyril Fournier
# 20/01/2016
import os,sys
import wx
import threading
import Queue
import time
import UtilsAndGlobal as UAG
from Cell import Cell
from Map import Map
from UI import UI, UICatcher
from GhostAI import GhostAI
from Graphical import Graphical
from Pacman import Pacman, PacmanGame
import Colors as txt
# =======================
# === Main ===
# =======================
def getRootDir():
return os.sep.join(os.path.realpath(sys.argv[0]).split(os.sep)[:-2])
def getMapPath():
if len(sys.argv) == 2: return sys.argv[1]
return "%s%s%s" %(rootDir, os.sep, UAG.DefaultMap)
def askQuestion(question, lChoices):
"""
Ask a question to the user, and return his answer.
"""
mess = "\n%s\n%s" %(colorQ(question), '\n'.join([ " [%s] %s" %(colorC(i+1), lChoices[i]) for i in range(len(lChoices))]))
mess += "\n [%s] Quit\nChoice: %s" %(colorC('Q'), txt.buildColor(fgColor="Red"))
choice = raw_input(mess)
sys.stdout.write(txt.reset())
sys.stdout.flush()
if choice.lower() == 'q':
sys.exit(0)
if choice not in map(str,range(1,len(lChoices)+1)):
print "Your choice \"%s\" doesn't exist." %choice
return askQuestion(question, lChoices)
return int(choice) -1
def setDifficulty(choice):
"""
Set game parameters for the selected difficulty.
"""
if choice == 0: # Easy
UAG.GhostSpeed = 0.5
UAG.FearTime = 10
UAG.GhostPredator = 0
if choice >= 1: # Medium
UAG.GhostPredator = 1
UAG.GhostSmell = 5
if choice >= 2: # Hard
UAG.GhostSpeed *= (2/3.0)
if choice >= 3: # Nightmare
UAG.GhostSmell += 3
UAG.LifeBonusThresh = 1e10
UAG.StartLife = 1
if choice >= 4: # Doom
UAG.GhostSpeed == UAG.PacmanDelay
def listMap(myPath):
"""
Ask the user to pick a map, and return the path to this file.
"""
# Get files
lFiles = [f for f in os.listdir(myPath) if os.path.splitext(f)[1] == '.map']
print lFiles
if len(lFiles) == 0:
print "No map to load."
return False
# Ask user
choice = askQuestion("Choose file:", lFiles)
print choice
return ''.join([myPath, lFiles[choice]])
def terminalVersion():
"""
Game in terminal.
"""
# --- New game or load a save
choice = askQuestion("Menu:", ["New game", "Load game"])
if choice == 0:
# New game
objMap = Map(listMap("%s%smap%s" %(rootDir, os.sep, os.sep)))
# Select difficulty
difficulty = askQuestion("Select the difficulty:",
["Easy: slow ghost.",
"Medium: slow ghost, mode predator ON.",
"Hard: fast ghost, mode predator ON.",
"Nightmare: fast ghost, mode boosted predator ON, 1 life, no bonus life.",
"Doom: run."])
setDifficulty(difficulty)
elif choice == 1:
# Load game
objMap = Map(listMap("%s%ssave%s" %(rootDir, os.sep, os.sep)))
# --- Initiate threads and the wx app
lock = threading.Lock()
queue = Queue.Queue(5)
objApp = wx.PySimpleApp()
objUI = UI(1, "Thread-UI", queue, lock, UAG.PacmanDelay)
objCatcher = UICatcher(2, "Thread-UICatcher", objApp, objUI)
objGhostAI = GhostAI(3, "Thread-Ghost", queue, lock, UAG.GhostSpeed, len(objMap.dGhostSpawns))
lThreads = [objUI, objCatcher, objGhostAI]
print "[PacmanGame] Initiate threads"
for t in lThreads:
print "\t%s" %t.threadName
t.start()
# --- Initiate game
game = PacmanGame(objMap, queue, lock, UAG.PacmanDelay, objUI=objUI, objGhostAI=objGhostAI)
game.run()
# --- Wait for all threads to terminate before leaving
print "[PacmanGame] Wait all threads before leaving"
for t in lThreads:
print "\t%s" %t.threadName
t.join()
def asciiArtVersion():
"""
Game in ASCII art.
"""
print "ASCII-art version is currently not available."
def graphicalVersion():
"""
Game in a window.
"""
print "Window version is currently not available."
if __name__=='__main__':
# rows, columns = map(int, os.popen('stty size', 'r').read().split())
print """
.-. .-. .--. |=======================| .--. .-. .-.
| OO| | OO| / _.-' .-. .-. .''. | Welcome | .''. .-. .-. '-._ \ |OO | |OO |
| | | | \ '-. '-' '-' '..' | To the PacmanPy game !| '..' '-' '-' .-' / | | | |
'^^^' '^^^' '--' |=======================| '--' '^^^' '^^^'
"""
rootDir = getRootDir()
colorQ = txt.color(fgColor='Cyan', bold=True)
colorC = txt.color(fgColor='Yellow', bold=True)
# ===========
# === Select the graphical output version
gameVersion = askQuestion("Select the graphical output version you want to play with:", ["Terminal version", "ASCII-art version", "Graphical version"])
mapPath = getMapPath()
if gameVersion == 0:
terminalVersion()
elif gameVersion == 1:
asciiArtVersion()
elif gameVersion == 2:
graphicalVersion()
print "Exit Pacman"
#os.system("gnome-terminal --geometry=60x20+2000+2000")
|
gpl-2.0
|
hivam/doctor_multiroom
|
models/doctor_room.py
|
1
|
2194
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
_logger = logging.getLogger(__name__)
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
from datetime import date, datetime, timedelta
class doctor_room(osv.osv):
_name = "doctor.room"
_description = "It allows you to create multiple doctor rooms."
_columns = {
'codigo':fields.char('Código', size=3, required=True),
'name':fields.char('Nombre Consultorio', required='True'),
'multi_paciente': fields.boolean('Multi Paciente'),
'numero_pacientes':fields.integer('Numero de Pacientes',size=2)
}
_defaults={
'numero_pacientes': 1,
}
_sql_constraints = [
('name_unico','unique(name)', 'Ya existe un consultorio con este mismo nombre.'),
('codigo_unico','unique(codigo)', u'Ya existe un consultorio con este mismo código.')
]
#Guardando el nombre del consultorio en mayúscula.
def create(self, cr, uid, vals, context=None):
vals.update({'name': vals['name'].upper()})
numero_pacientes=vals['numero_pacientes']
multi_paciente=vals['multi_paciente']
if multi_paciente:
if numero_pacientes <= 1:
raise osv.except_osv(_('Error!'),_('El número de pacientes tiene que ser mayor a 1.'))
return super(doctor_room, self).create(cr, uid, vals, context=context)
|
agpl-3.0
|
xyuanmu/XX-Net
|
python3.8.2/Lib/symtable.py
|
10
|
7568
|
"""Interface to the compiler's internal symbol tables"""
import _symtable
from _symtable import (USE, DEF_GLOBAL, DEF_NONLOCAL, DEF_LOCAL, DEF_PARAM,
DEF_IMPORT, DEF_BOUND, DEF_ANNOT, SCOPE_OFF, SCOPE_MASK, FREE,
LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL)
import weakref
__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
def symtable(code, filename, compile_type):
top = _symtable.symtable(code, filename, compile_type)
return _newSymbolTable(top, filename)
class SymbolTableFactory:
def __init__(self):
self.__memo = weakref.WeakValueDictionary()
def new(self, table, filename):
if table.type == _symtable.TYPE_FUNCTION:
return Function(table, filename)
if table.type == _symtable.TYPE_CLASS:
return Class(table, filename)
return SymbolTable(table, filename)
def __call__(self, table, filename):
key = table, filename
obj = self.__memo.get(key, None)
if obj is None:
obj = self.__memo[key] = self.new(table, filename)
return obj
_newSymbolTable = SymbolTableFactory()
class SymbolTable(object):
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "global":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
return self._table.id
def get_name(self):
return self._table.name
def get_lineno(self):
return self._table.lineno
def is_optimized(self):
return bool(self._table.type == _symtable.TYPE_FUNCTION)
def is_nested(self):
return bool(self._table.nested)
def has_children(self):
return bool(self._table.children)
def has_exec(self):
"""Return true if the scope uses exec. Deprecated method."""
return False
def get_identifiers(self):
return self._table.symbols.keys()
def lookup(self, name):
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
sym = self._symbols[name] = Symbol(name, flags, namespaces)
return sym
def get_symbols(self):
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
class Function(SymbolTable):
# Default values for instance variables
__params = None
__locals = None
__frees = None
__globals = None
__nonlocals = None
def __idents_matching(self, test_func):
return tuple(ident for ident in self.get_identifiers()
if test_func(self._table.symbols[ident]))
def get_parameters(self):
if self.__params is None:
self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
return self.__params
def get_locals(self):
if self.__locals is None:
locs = (LOCAL, CELL)
test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
self.__locals = self.__idents_matching(test)
return self.__locals
def get_globals(self):
if self.__globals is None:
glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
self.__globals = self.__idents_matching(test)
return self.__globals
def get_nonlocals(self):
if self.__nonlocals is None:
self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL)
return self.__nonlocals
def get_frees(self):
if self.__frees is None:
is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
self.__frees = self.__idents_matching(is_free)
return self.__frees
class Class(SymbolTable):
__methods = None
def get_methods(self):
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d)
return self.__methods
class Symbol(object):
def __init__(self, name, flags, namespaces=None):
self.__name = name
self.__flags = flags
self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
self.__namespaces = namespaces or ()
def __repr__(self):
return "<symbol {0!r}>".format(self.__name)
def get_name(self):
return self.__name
def is_referenced(self):
return bool(self.__flags & _symtable.USE)
def is_parameter(self):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT))
def is_nonlocal(self):
return bool(self.__flags & DEF_NONLOCAL)
def is_declared_global(self):
return bool(self.__scope == GLOBAL_EXPLICIT)
def is_local(self):
return bool(self.__flags & DEF_BOUND)
def is_annotated(self):
return bool(self.__flags & DEF_ANNOT)
def is_free(self):
return bool(self.__scope == FREE)
def is_imported(self):
return bool(self.__flags & DEF_IMPORT)
def is_assigned(self):
return bool(self.__flags & DEF_LOCAL)
def is_namespace(self):
"""Returns true if name binding introduces new namespace.
If the name is used as the target of a function or class
statement, this will be true.
Note that a single name can be bound to multiple objects. If
is_namespace() is true, the name may also be bound to other
objects, like an int or list, that does not introduce a new
namespace.
"""
return bool(self.__namespaces)
def get_namespaces(self):
"""Return a list of namespaces bound to this name"""
return self.__namespaces
def get_namespace(self):
"""Returns the single namespace bound to this name.
Raises ValueError if the name is bound to multiple namespaces.
"""
if len(self.__namespaces) != 1:
raise ValueError("name is bound to multiple namespaces")
return self.__namespaces[0]
if __name__ == "__main__":
import os, sys
with open(sys.argv[0]) as f:
src = f.read()
mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
for ident in mod.get_identifiers():
info = mod.lookup(ident)
print(info, info.is_local(), info.is_namespace())
|
bsd-2-clause
|
Cyberbio-Lab/bcbio-nextgen
|
bcbio/variation/gatk.py
|
2
|
5183
|
"""GATK variant calling -- HaplotypeCaller and UnifiedGenotyper.
"""
from distutils.version import LooseVersion
import toolz as tz
from bcbio import bam, broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.pipeline import datadict as dd
from bcbio.variation import annotation, bamprep, ploidy
def _shared_gatk_call_prep(align_bams, items, ref_file, dbsnp, region, out_file):
"""Shared preparation work for GATK variant calling.
"""
data = items[0]
config = data["config"]
broad_runner = broad.runner_from_path("picard", config)
broad_runner.run_fn("picard_index_ref", ref_file)
for x in align_bams:
bam.index(x, config)
params = ["-R", ref_file]
coverage_depth_min = tz.get_in(["algorithm", "coverage_depth_min"], config)
if coverage_depth_min and coverage_depth_min < 4:
confidence = "4.0"
params += ["--standard_min_confidence_threshold_for_calling", confidence,
"--standard_min_confidence_threshold_for_emitting", confidence]
for a in annotation.get_gatk_annotations(config):
params += ["--annotation", a]
for x in align_bams:
params += ["-I", x]
if dbsnp:
params += ["--dbsnp", dbsnp]
variant_regions = tz.get_in(["algorithm", "variant_regions"], config)
region = subset_variant_regions(variant_regions, region, out_file, items)
if region:
params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"]
broad_runner = broad.runner_from_config(config)
return broad_runner, params
def unified_genotyper(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Perform SNP genotyping on the given alignment file.
"""
if out_file is None:
out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0]
if not utils.file_exists(out_file):
broad_runner, params = \
_shared_gatk_call_prep(align_bams, items,
ref_file, assoc_files.get("dbsnp"),
region, out_file)
with file_transaction(items[0], out_file) as tx_out_file:
params += ["-T", "UnifiedGenotyper",
"-o", tx_out_file,
"-ploidy", (str(ploidy.get_ploidy(items, region))
if broad_runner.gatk_type() == "restricted" else "2"),
"--genotype_likelihoods_model", "BOTH"]
broad_runner.run_gatk(params)
return out_file
def _joint_calling(items):
"""Determine if this call feeds downstream into joint calls.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0])
if jointcaller:
assert len(items) == 1, "Can only do joint calling preparation with GATK with single samples"
assert tz.get_in(("metadata", "batch"), items[0]) is not None, \
"Joint calling requires batched samples, %s has no metadata batch." % dd.get_sample_name(items[0])
return jointcaller
def haplotype_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Call variation with GATK's HaplotypeCaller.
This requires the full non open-source version of GATK.
"""
if out_file is None:
out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0]
if not utils.file_exists(out_file):
broad_runner, params = \
_shared_gatk_call_prep(align_bams, items,
ref_file, assoc_files.get("dbsnp"),
region, out_file)
assert broad_runner.gatk_type() == "restricted", \
"Require full version of GATK 2.4+ for haplotype calling"
with file_transaction(items[0], out_file) as tx_out_file:
params += ["-T", "HaplotypeCaller",
"-o", tx_out_file,
"--annotation", "ClippingRankSumTest",
"--annotation", "DepthPerSampleHC"]
# Enable hardware based optimizations in GATK 3.1+
if LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.1"):
params += ["--pair_hmm_implementation", "VECTOR_LOGLESS_CACHING"]
# Enable non-diploid calling in GATK 3.3+
if LooseVersion(broad_runner.gatk_major_version()) >= LooseVersion("3.3"):
params += ["-ploidy", str(ploidy.get_ploidy(items, region))]
if _joint_calling(items): # Prepare gVCFs if doing joint calling
params += ["--emitRefConfidence", "GVCF", "--variant_index_type", "LINEAR",
"--variant_index_parameter", "128000"]
resources = config_utils.get_resources("gatk-haplotype", items[0]["config"])
if "options" in resources:
params += [str(x) for x in resources.get("options", [])]
broad_runner.new_resources("gatk-haplotype")
broad_runner.run_gatk(params)
return out_file
|
mit
|
lugia/Python-MyWalk
|
base/utils.py
|
1
|
1085
|
from django.http import HttpResponse
import simplejson as json
def idx(a, id, default = None):
if a.has_key(id):
return a[id]
return default
def printItems(dictObj, indent):
ret = ""
ret = ret + ' '*indent + '<ul>\n'
for k,v in dictObj.iteritems():
if isinstance(v, dict):
ret = ret + ' '*indent + '<li>' + k+ ':'+ '</li>'
ret = ret + printItems(v, indent+1)
else:
ret = ret + ' '*indent + '<li>' + str(k) + ':' + str(v) + '</li>'
ret = ret + ' '*indent + '</ul>\n'
return ret
def renderJsonResponse(msg):
if type(msg) != str:
msg = json.dumps(msg)
#return renderTextResponse(msg, "application/json")
return renderTextResponse(msg, "text/plain")
def renderJsonErrorResponse(msg):
return renderJsonResponse({'error' : msg})
def renderTextResponse(text, mimetype="text/html"):
return HttpResponse(text, mimetype=mimetype)
def getPartsFromRequest(request):
ret = request.get_full_path().split("/")
if ret[0] == '':
ret = ret[1:]
return ret
|
apache-2.0
|
fbradyirl/home-assistant
|
tests/components/history_graph/test_init.py
|
4
|
1171
|
"""The tests the Graph component."""
import unittest
from homeassistant.setup import setup_component
from tests.common import init_recorder_component, get_test_home_assistant
class TestGraph(unittest.TestCase):
"""Test the Google component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Test setup component."""
self.init_recorder()
config = {"history": {}, "history_graph": {"name_1": {"entities": "test.test"}}}
assert setup_component(self.hass, "history_graph", config)
assert dict(self.hass.states.get("history_graph.name_1").attributes) == {
"entity_id": ["test.test"],
"friendly_name": "name_1",
"hours_to_show": 24,
"refresh": 0,
}
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
|
apache-2.0
|
jordanemedlock/psychtruths
|
temboo/core/Library/Facebook/Actions/Fitness/Bikes/ReadBikes.py
|
5
|
5217
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ReadBikes
# Retrieves one or more bike actions.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ReadBikes(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ReadBikes Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ReadBikes, self).__init__(temboo_session, '/Library/Facebook/Actions/Fitness/Bikes/ReadBikes')
def new_input_set(self):
return ReadBikesInputSet()
def _make_result_set(self, result, path):
return ReadBikesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ReadBikesChoreographyExecution(session, exec_id, path)
class ReadBikesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ReadBikes
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
super(ReadBikesInputSet, self)._set_input('AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((optional, string) The id of an action to retrieve. If an id is not provided, a list of all bike actions will be returned.)
"""
super(ReadBikesInputSet, self)._set_input('ActionID', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma separated list of fields to return (i.e. id,name).)
"""
super(ReadBikesInputSet, self)._set_input('Fields', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Used to page through results. Limits the number of records returned in the response.)
"""
super(ReadBikesInputSet, self)._set_input('Limit', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Used to page through results. Returns results starting from the specified number.)
"""
super(ReadBikesInputSet, self)._set_input('Offset', value)
def set_ProfileID(self, value):
"""
Set the value of the ProfileID input for this Choreo. ((optional, string) The id of the user's profile. Defaults to "me" indicating the authenticated user.)
"""
super(ReadBikesInputSet, self)._set_input('ProfileID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(ReadBikesInputSet, self)._set_input('ResponseFormat', value)
class ReadBikesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ReadBikes Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Facebook. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
def get_HasNext(self):
"""
Retrieve the value for the "HasNext" output from this Choreo execution. ((boolean) A boolean flag indicating that a next page exists.)
"""
return self._output.get('HasNext', None)
def get_HasPrevious(self):
"""
Retrieve the value for the "HasPrevious" output from this Choreo execution. ((boolean) A boolean flag indicating that a previous page exists.)
"""
return self._output.get('HasPrevious', None)
class ReadBikesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ReadBikesResultSet(response, path)
|
apache-2.0
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/gui/ag.py
|
2
|
5192
|
#!/usr/bin/env python
# Copyright 2008, 2009
# CAMd (see accompanying license files for details).
import os
from optparse import OptionParser, SUPPRESS_HELP
import ase.gui.i18n
from gettext import gettext as _
# Grrr, older versions (pre-python2.7) of optparse have a bug
# which prevents non-ascii descriptions. How do we circumvent this?
# For now, we'll have to use English in the command line options then.
def build_parser():
parser = OptionParser(usage='%prog [options] [file[, file2, ...]]',
version='%prog 0.1',
description='See the online manual ' +
'(https://wiki.fysik.dtu.dk/ase/ase/gui.html) ' +
'for more information.')
parser.add_option('-n', '--image-number',
default=':', metavar='NUMBER',
help='Pick image(s) from trajectory. NUMBER can be a '
'single number (use a negative number to count from '
'the back) or a range: start:stop:step, where the '
'":step" part can be left out - default values are '
'0:nimages:1.')
parser.add_option('-u', '--show-unit-cell', type='int',
default=1, metavar='I',
help="0: Don't show unit cell. 1: Show unit cell. "
'2: Show all of unit cell.')
parser.add_option('-r', '--repeat',
default='1',
help='Repeat unit cell. Use "-r 2" or "-r 2,3,1".')
parser.add_option('-R', '--rotations', default='',
help='Examples: "-R -90x", "-R 90z,-30x".')
parser.add_option('-o', '--output', metavar='FILE',
help='Write configurations to FILE.')
parser.add_option('-g', '--graph',
# TRANSLATORS: EXPR abbreviates 'expression'
metavar='EXPR',
help='Plot x,y1,y2,... graph from configurations or '
'write data to sdtout in terminal mode. Use the '
'symbols: i, s, d, fmax, e, ekin, A, R, E and F. See '
'https://wiki.fysik.dtu.dk/ase/ase/gui.html'
'#plotting-data for more details.')
parser.add_option('-t', '--terminal',
action='store_true',
default=False,
help='Run in terminal window - no GUI.')
parser.add_option('--aneb',
action='store_true',
default=False,
help='Read ANEB data.')
parser.add_option('--interpolate',
type='int', metavar='N',
help='Interpolate N images between 2 given images.')
parser.add_option('-b', '--bonds',
action='store_true',
default=False,
help='Draw bonds between atoms.')
parser.add_option('-s', '--scale', dest='radii_scale', metavar='FLOAT',
default=None, type=float,
help='Scale covalent radii.')
return parser
def main():
parser = build_parser()
opt, args = parser.parse_args()
try:
import ase
except ImportError:
import sys
from os.path import dirname, join, pardir
sys.path.append(join(dirname(__file__), pardir))
from ase.gui.images import Images
from ase.atoms import Atoms
def run(opt, args):
images = Images()
if opt.aneb:
opt.image_number = '-1'
if len(args) > 0:
from ase.io import string2index
images.read(args, string2index(opt.image_number))
else:
images.initialize([Atoms()])
if opt.interpolate:
images.interpolate(opt.interpolate)
if opt.aneb:
images.aneb()
if opt.repeat != '1':
r = opt.repeat.split(',')
if len(r) == 1:
r = 3 * r
images.repeat_images([int(c) for c in r])
if opt.radii_scale:
images.set_radii(opt.radii_scale)
if opt.output is not None:
images.write(opt.output, rotations=opt.rotations,
show_unit_cell=opt.show_unit_cell)
opt.terminal = True
if opt.terminal:
if opt.graph is not None:
data = images.graph(opt.graph)
for line in data.T:
for x in line:
print x,
print
else:
from ase.gui.gui import GUI
import ase.gui.gtkexcepthook
gui = GUI(images, opt.rotations, opt.show_unit_cell, opt.bonds)
gui.run(opt.graph)
import traceback
try:
run(opt, args)
except KeyboardInterrupt:
pass
except Exception:
traceback.print_exc()
print(_("""
An exception occurred! Please report the issue to
[email protected] - thanks! Please also report this if
it was a user error, so that a better error message can be provided
next time."""))
|
gpl-2.0
|
deployed/django
|
tests/dates/tests.py
|
32
|
2509
|
from __future__ import unicode_literals
import datetime
from django.test import TestCase
from .models import Article, Comment, Category
class DatesTests(TestCase):
def test_related_model_traverse(self):
a1 = Article.objects.create(
title="First one",
pub_date=datetime.date(2005, 7, 28),
)
a2 = Article.objects.create(
title="Another one",
pub_date=datetime.date(2010, 7, 28),
)
a3 = Article.objects.create(
title="Third one, in the first day",
pub_date=datetime.date(2005, 7, 28),
)
a1.comments.create(
text="Im the HULK!",
pub_date=datetime.date(2005, 7, 28),
)
a1.comments.create(
text="HULK SMASH!",
pub_date=datetime.date(2005, 7, 29),
)
a2.comments.create(
text="LMAO",
pub_date=datetime.date(2010, 7, 28),
)
a3.comments.create(
text="+1",
pub_date=datetime.date(2005, 8, 29),
)
c = Category.objects.create(name="serious-news")
c.articles.add(a1, a3)
self.assertQuerysetEqual(
Comment.objects.dates("article__pub_date", "year"), [
datetime.date(2005, 1, 1),
datetime.date(2010, 1, 1),
],
lambda d: d,
)
self.assertQuerysetEqual(
Comment.objects.dates("article__pub_date", "month"), [
datetime.date(2005, 7, 1),
datetime.date(2010, 7, 1),
],
lambda d: d
)
self.assertQuerysetEqual(
Comment.objects.dates("article__pub_date", "day"), [
datetime.date(2005, 7, 28),
datetime.date(2010, 7, 28),
],
lambda d: d
)
self.assertQuerysetEqual(
Article.objects.dates("comments__pub_date", "day"), [
datetime.date(2005, 7, 28),
datetime.date(2005, 7, 29),
datetime.date(2005, 8, 29),
datetime.date(2010, 7, 28),
],
lambda d: d
)
self.assertQuerysetEqual(
Article.objects.dates("comments__approval_date", "day"), []
)
self.assertQuerysetEqual(
Category.objects.dates("articles__pub_date", "day"), [
datetime.date(2005, 7, 28),
],
lambda d: d,
)
|
bsd-3-clause
|
sdague/home-assistant
|
tests/components/homekit_controller/test_sensor.py
|
14
|
6430
|
"""Basic checks for HomeKit sensor."""
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
)
from tests.components.homekit_controller.common import setup_test_component
TEMPERATURE = ("temperature", "temperature.current")
HUMIDITY = ("humidity", "relative-humidity.current")
LIGHT_LEVEL = ("light", "light-level.current")
CARBON_DIOXIDE_LEVEL = ("carbon-dioxide", "carbon-dioxide.level")
BATTERY_LEVEL = ("battery", "battery-level")
CHARGING_STATE = ("battery", "charging-state")
LO_BATT = ("battery", "status-lo-batt")
def create_temperature_sensor_service(accessory):
"""Define temperature characteristics."""
service = accessory.add_service(ServicesTypes.TEMPERATURE_SENSOR)
cur_state = service.add_char(CharacteristicsTypes.TEMPERATURE_CURRENT)
cur_state.value = 0
def create_humidity_sensor_service(accessory):
"""Define humidity characteristics."""
service = accessory.add_service(ServicesTypes.HUMIDITY_SENSOR)
cur_state = service.add_char(CharacteristicsTypes.RELATIVE_HUMIDITY_CURRENT)
cur_state.value = 0
def create_light_level_sensor_service(accessory):
"""Define light level characteristics."""
service = accessory.add_service(ServicesTypes.LIGHT_SENSOR)
cur_state = service.add_char(CharacteristicsTypes.LIGHT_LEVEL_CURRENT)
cur_state.value = 0
def create_carbon_dioxide_level_sensor_service(accessory):
"""Define carbon dioxide level characteristics."""
service = accessory.add_service(ServicesTypes.CARBON_DIOXIDE_SENSOR)
cur_state = service.add_char(CharacteristicsTypes.CARBON_DIOXIDE_LEVEL)
cur_state.value = 0
def create_battery_level_sensor(accessory):
"""Define battery level characteristics."""
service = accessory.add_service(ServicesTypes.BATTERY_SERVICE)
cur_state = service.add_char(CharacteristicsTypes.BATTERY_LEVEL)
cur_state.value = 100
low_battery = service.add_char(CharacteristicsTypes.STATUS_LO_BATT)
low_battery.value = 0
charging_state = service.add_char(CharacteristicsTypes.CHARGING_STATE)
charging_state.value = 0
return service
async def test_temperature_sensor_read_state(hass, utcnow):
"""Test reading the state of a HomeKit temperature sensor accessory."""
helper = await setup_test_component(
hass, create_temperature_sensor_service, suffix="temperature"
)
helper.characteristics[TEMPERATURE].value = 10
state = await helper.poll_and_get_state()
assert state.state == "10"
helper.characteristics[TEMPERATURE].value = 20
state = await helper.poll_and_get_state()
assert state.state == "20"
assert state.attributes["device_class"] == DEVICE_CLASS_TEMPERATURE
async def test_humidity_sensor_read_state(hass, utcnow):
"""Test reading the state of a HomeKit humidity sensor accessory."""
helper = await setup_test_component(
hass, create_humidity_sensor_service, suffix="humidity"
)
helper.characteristics[HUMIDITY].value = 10
state = await helper.poll_and_get_state()
assert state.state == "10"
helper.characteristics[HUMIDITY].value = 20
state = await helper.poll_and_get_state()
assert state.state == "20"
assert state.attributes["device_class"] == DEVICE_CLASS_HUMIDITY
async def test_light_level_sensor_read_state(hass, utcnow):
"""Test reading the state of a HomeKit temperature sensor accessory."""
helper = await setup_test_component(
hass, create_light_level_sensor_service, suffix="light_level"
)
helper.characteristics[LIGHT_LEVEL].value = 10
state = await helper.poll_and_get_state()
assert state.state == "10"
helper.characteristics[LIGHT_LEVEL].value = 20
state = await helper.poll_and_get_state()
assert state.state == "20"
assert state.attributes["device_class"] == DEVICE_CLASS_ILLUMINANCE
async def test_carbon_dioxide_level_sensor_read_state(hass, utcnow):
"""Test reading the state of a HomeKit carbon dioxide sensor accessory."""
helper = await setup_test_component(
hass, create_carbon_dioxide_level_sensor_service, suffix="co2"
)
helper.characteristics[CARBON_DIOXIDE_LEVEL].value = 10
state = await helper.poll_and_get_state()
assert state.state == "10"
helper.characteristics[CARBON_DIOXIDE_LEVEL].value = 20
state = await helper.poll_and_get_state()
assert state.state == "20"
async def test_battery_level_sensor(hass, utcnow):
"""Test reading the state of a HomeKit battery level sensor."""
helper = await setup_test_component(
hass, create_battery_level_sensor, suffix="battery"
)
helper.characteristics[BATTERY_LEVEL].value = 100
state = await helper.poll_and_get_state()
assert state.state == "100"
assert state.attributes["icon"] == "mdi:battery"
helper.characteristics[BATTERY_LEVEL].value = 20
state = await helper.poll_and_get_state()
assert state.state == "20"
assert state.attributes["icon"] == "mdi:battery-20"
assert state.attributes["device_class"] == DEVICE_CLASS_BATTERY
async def test_battery_charging(hass, utcnow):
"""Test reading the state of a HomeKit battery's charging state."""
helper = await setup_test_component(
hass, create_battery_level_sensor, suffix="battery"
)
helper.characteristics[BATTERY_LEVEL].value = 0
helper.characteristics[CHARGING_STATE].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["icon"] == "mdi:battery-outline"
helper.characteristics[BATTERY_LEVEL].value = 20
state = await helper.poll_and_get_state()
assert state.attributes["icon"] == "mdi:battery-charging-20"
async def test_battery_low(hass, utcnow):
"""Test reading the state of a HomeKit battery's low state."""
helper = await setup_test_component(
hass, create_battery_level_sensor, suffix="battery"
)
helper.characteristics[LO_BATT].value = 0
helper.characteristics[BATTERY_LEVEL].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["icon"] == "mdi:battery-10"
helper.characteristics[LO_BATT].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["icon"] == "mdi:battery-alert"
|
apache-2.0
|
Glottotopia/aagd
|
moin/local/moin/MoinMoin/support/pygments/styles/vs.py
|
3
|
1111
|
# -*- coding: utf-8 -*-
"""
pygments.styles.vs
~~~~~~~~~~~~~~~~~~
Simple style with MS Visual Studio colors.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Operator, Generic
class VisualStudioStyle(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "#008000",
Comment.Preproc: "#0000ff",
Keyword: "#0000ff",
Operator.Word: "#0000ff",
Keyword.Type: "#2b91af",
Name.Class: "#2b91af",
String: "#a31515",
Generic.Heading: "bold",
Generic.Subheading: "bold",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold",
Error: "border:#FF0000"
}
|
mit
|
huntxu/fuel-web
|
nailgun/nailgun/rpc/threaded.py
|
9
|
2323
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import traceback
from kombu import Connection
from kombu.mixins import ConsumerMixin
from nailgun.db import db
from nailgun.errors import errors
from nailgun.logger import logger
import nailgun.rpc as rpc
from nailgun.rpc.receiver import NailgunReceiver
class RPCConsumer(ConsumerMixin):
def __init__(self, connection, receiver):
self.connection = connection
self.receiver = receiver
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[rpc.nailgun_queue],
callbacks=[self.consume_msg])]
def consume_msg(self, body, msg):
callback = getattr(self.receiver, body["method"])
try:
callback(**body["args"])
db().commit()
except errors.CannotFindTask as e:
logger.warn(str(e))
db().rollback()
except Exception:
logger.error(traceback.format_exc())
db().rollback()
finally:
msg.ack()
db().expire_all()
class RPCKombuThread(threading.Thread):
def __init__(self, rcvr_class=NailgunReceiver):
super(RPCKombuThread, self).__init__()
self.stoprequest = threading.Event()
self.receiver = rcvr_class
self.connection = None
def join(self, timeout=None):
self.stoprequest.set()
# this should interrupt inner kombu event loop
# actually, it doesn't
self.consumer.should_stop = True
super(RPCKombuThread, self).join(timeout)
def run(self):
with Connection(rpc.conn_str) as conn:
self.consumer = RPCConsumer(conn, self.receiver)
self.consumer.run()
|
apache-2.0
|
sociateru/fabtools
|
fabtools/tests/functional_tests/test_nodejs.py
|
4
|
3575
|
import functools
try:
import json
except ImportError:
import simplejson as json
import pytest
from fabric.api import cd, run
from fabtools.files import is_file
from fabtools.require import directory as require_directory
from fabtools.require import file as require_file
pytestmark = pytest.mark.network
@pytest.fixture
def nodejs(scope='module'):
from fabtools.nodejs import install_from_source, version, DEFAULT_VERSION
if version() != DEFAULT_VERSION:
install_from_source()
def test_nodejs_is_installed(nodejs):
from fabtools.nodejs import version, DEFAULT_VERSION
assert is_file('/usr/local/bin/node')
assert version() == DEFAULT_VERSION
def test_install_and_uninstall_global_package(nodejs):
from fabtools.nodejs import install_package, package_version, uninstall_package
if not package_version('underscore'):
install_package('underscore', version='1.4.2')
assert package_version('underscore') == '1.4.2'
assert is_file('/usr/local/lib/node_modules/underscore/underscore.js')
uninstall_package('underscore')
assert package_version('underscore') is None
assert not is_file('/usr/local/lib/node_modules/underscore/underscore.js')
def test_install_and_uninstall_local_package(nodejs):
from fabtools.nodejs import install_package, package_version, uninstall_package
if not package_version('underscore', local=True):
install_package('underscore', version='1.4.2', local=True)
assert is_file('node_modules/underscore/underscore.js')
assert package_version('underscore', local=True) == '1.4.2'
uninstall_package('underscore', local=True)
assert package_version('underscore', local=True) is None
assert not is_file('node_modules/underscore/underscore.js')
@pytest.fixture
def testdir(request):
require_directory('nodetest')
request.addfinalizer(functools.partial(run, 'rm -rf nodetest'))
return 'nodetest'
def test_install_dependencies_from_package_json_file(nodejs, testdir):
from fabtools.nodejs import install_dependencies, package_version, uninstall_package
with cd(testdir):
require_file('package.json', contents=json.dumps({
'name': 'nodetest',
'version': '1.0.0',
'dependencies': {
'underscore': '1.4.2'
}
}))
install_dependencies()
assert is_file('node_modules/underscore/underscore.js')
assert package_version('underscore', local=True) == '1.4.2'
uninstall_package('underscore', local=True)
def test_require_global_package(nodejs):
from fabtools.require.nodejs import package as require_package
from fabtools.nodejs import package_version, uninstall_package
try:
# Require specific version
require_package('underscore', version='1.4.1')
assert package_version('underscore') == '1.4.1'
# Downgrade
require_package('underscore', version='1.4.0')
assert package_version('underscore') == '1.4.0'
# Upgrade
require_package('underscore', version='1.4.2')
assert package_version('underscore') == '1.4.2'
finally:
uninstall_package('underscore')
def test_require_local_package(nodejs):
from fabtools.require.nodejs import package as require_package
from fabtools.nodejs import package_version, uninstall_package
require_package('underscore', version='1.4.2', local=True)
assert package_version('underscore', local=True) == '1.4.2'
uninstall_package('underscore', local=True)
|
bsd-2-clause
|
vFense/vFenseAgent-nix
|
agent/deps/rpm6/Python-2.7.5/lib/python2.7/ctypes/test/test_bitfields.py
|
44
|
9293
|
from ctypes import *
import unittest
import os
import ctypes
import _ctypes_test
class BITS(Structure):
_fields_ = [("A", c_int, 1),
("B", c_int, 2),
("C", c_int, 3),
("D", c_int, 4),
("E", c_int, 5),
("F", c_int, 6),
("G", c_int, 7),
("H", c_int, 8),
("I", c_int, 9),
("M", c_short, 1),
("N", c_short, 2),
("O", c_short, 3),
("P", c_short, 4),
("Q", c_short, 5),
("R", c_short, 6),
("S", c_short, 7)]
func = CDLL(_ctypes_test.__file__).unpack_bitfields
func.argtypes = POINTER(BITS), c_char
##for n in "ABCDEFGHIMNOPQRS":
## print n, hex(getattr(BITS, n).size), getattr(BITS, n).offset
class C_Test(unittest.TestCase):
def test_ints(self):
for i in range(512):
for name in "ABCDEFGHI":
b = BITS()
setattr(b, name, i)
self.assertEqual((name, i, getattr(b, name)), (name, i, func(byref(b), name)))
def test_shorts(self):
for i in range(256):
for name in "MNOPQRS":
b = BITS()
setattr(b, name, i)
self.assertEqual((name, i, getattr(b, name)), (name, i, func(byref(b), name)))
signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong)
unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong)
int_types = unsigned_int_types + signed_int_types
class BitFieldTest(unittest.TestCase):
def test_longlong(self):
class X(Structure):
_fields_ = [("a", c_longlong, 1),
("b", c_longlong, 62),
("c", c_longlong, 1)]
self.assertEqual(sizeof(X), sizeof(c_longlong))
x = X()
x.a, x.b, x.c = -1, 7, -1
self.assertEqual((x.a, x.b, x.c), (-1, 7, -1))
def test_ulonglong(self):
class X(Structure):
_fields_ = [("a", c_ulonglong, 1),
("b", c_ulonglong, 62),
("c", c_ulonglong, 1)]
self.assertEqual(sizeof(X), sizeof(c_longlong))
x = X()
self.assertEqual((x.a, x.b, x.c), (0, 0, 0))
x.a, x.b, x.c = 7, 7, 7
self.assertEqual((x.a, x.b, x.c), (1, 7, 1))
def test_signed(self):
for c_typ in signed_int_types:
class X(Structure):
_fields_ = [("dummy", c_typ),
("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ)*2)
x = X()
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, -1, 0, 0))
x.a, x.b = 0, -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, -1, 0))
def test_unsigned(self):
for c_typ in unsigned_int_types:
class X(Structure):
_fields_ = [("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ))
x = X()
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 7, 0, 0))
x.a, x.b = 0, -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 7, 0))
def fail_fields(self, *fields):
return self.get_except(type(Structure), "X", (),
{"_fields_": fields})
def test_nonint_types(self):
# bit fields are not allowed on non-integer types.
result = self.fail_fields(("a", c_char_p, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char_p'))
result = self.fail_fields(("a", c_void_p, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_void_p'))
if c_int != c_long:
result = self.fail_fields(("a", POINTER(c_int), 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type LP_c_int'))
result = self.fail_fields(("a", c_char, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char'))
try:
c_wchar
except NameError:
pass
else:
result = self.fail_fields(("a", c_wchar, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_wchar'))
class Dummy(Structure):
_fields_ = []
result = self.fail_fields(("a", Dummy, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type Dummy'))
def test_single_bitfield_size(self):
for c_typ in int_types:
result = self.fail_fields(("a", c_typ, -1))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
result = self.fail_fields(("a", c_typ, 0))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
class X(Structure):
_fields_ = [("a", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ))
class X(Structure):
_fields_ = [("a", c_typ, sizeof(c_typ)*8)]
self.assertEqual(sizeof(X), sizeof(c_typ))
result = self.fail_fields(("a", c_typ, sizeof(c_typ)*8 + 1))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
def test_multi_bitfields_size(self):
class X(Structure):
_fields_ = [("a", c_short, 1),
("b", c_short, 14),
("c", c_short, 1)]
self.assertEqual(sizeof(X), sizeof(c_short))
class X(Structure):
_fields_ = [("a", c_short, 1),
("a1", c_short),
("b", c_short, 14),
("c", c_short, 1)]
self.assertEqual(sizeof(X), sizeof(c_short)*3)
self.assertEqual(X.a.offset, 0)
self.assertEqual(X.a1.offset, sizeof(c_short))
self.assertEqual(X.b.offset, sizeof(c_short)*2)
self.assertEqual(X.c.offset, sizeof(c_short)*2)
class X(Structure):
_fields_ = [("a", c_short, 3),
("b", c_short, 14),
("c", c_short, 14)]
self.assertEqual(sizeof(X), sizeof(c_short)*3)
self.assertEqual(X.a.offset, sizeof(c_short)*0)
self.assertEqual(X.b.offset, sizeof(c_short)*1)
self.assertEqual(X.c.offset, sizeof(c_short)*2)
def get_except(self, func, *args, **kw):
try:
func(*args, **kw)
except Exception, detail:
return detail.__class__, str(detail)
def test_mixed_1(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 4)]
if os.name in ("nt", "ce"):
self.assertEqual(sizeof(X), sizeof(c_int)*2)
else:
self.assertEqual(sizeof(X), sizeof(c_int))
def test_mixed_2(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 32)]
self.assertEqual(sizeof(X), sizeof(c_int)*2)
def test_mixed_3(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
self.assertEqual(sizeof(X), sizeof(c_byte))
def test_mixed_4(self):
class X(Structure):
_fields_ = [("a", c_short, 4),
("b", c_short, 4),
("c", c_int, 24),
("d", c_short, 4),
("e", c_short, 4),
("f", c_int, 24)]
# MSVC does NOT combine c_short and c_int into one field, GCC
# does (unless GCC is run with '-mms-bitfields' which
# produces code compatible with MSVC).
if os.name in ("nt", "ce"):
self.assertEqual(sizeof(X), sizeof(c_int) * 4)
else:
self.assertEqual(sizeof(X), sizeof(c_int) * 2)
def test_anon_bitfields(self):
# anonymous bit-fields gave a strange error message
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
class Y(Structure):
_anonymous_ = ["_"]
_fields_ = [("_", X)]
@unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required")
def test_uint32(self):
class X(Structure):
_fields_ = [("a", c_uint32, 32)]
x = X()
x.a = 10
self.assertEqual(x.a, 10)
x.a = 0xFDCBA987
self.assertEqual(x.a, 0xFDCBA987)
@unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required")
def test_uint64(self):
class X(Structure):
_fields_ = [("a", c_uint64, 64)]
x = X()
x.a = 10
self.assertEqual(x.a, 10)
x.a = 0xFEDCBA9876543211
self.assertEqual(x.a, 0xFEDCBA9876543211)
if __name__ == "__main__":
unittest.main()
|
lgpl-3.0
|
RaumZeit/gdesklets-core
|
utils/dialog.py
|
2
|
6662
|
import gtk
import gobject
import sys
import traceback
from cStringIO import StringIO
from HIGDialog import HIGDialog
from LogView import LogView
# define some Dialog icons
_ERROR = gtk.STOCK_DIALOG_ERROR
_INFO = gtk.STOCK_DIALOG_INFO
_QUESTION = gtk.STOCK_DIALOG_QUESTION
_WARNING = gtk.STOCK_DIALOG_WARNING
# we only want to display one dialog at a time, so let's queue them
_dialog_queue = []
# IDs which are to skip
_skip_ids = []
# remember the previous message to avoid displaying the same message twice
# in a sequence
_last_message = None
#
# Adds a details button to the given dialog.
#
def _set_details(dialog, details):
vbox1 = gtk.VBox()
vbox2 = dialog.vbox
vbox2.pack_start(vbox1)
align1 = gtk.Alignment(0.0, 0.0, 0.0, 0.0)
align1.set_property("border-width", 6)
align1.show()
vbox2.pack_start(align1)
align2 = gtk.Alignment(0.0, 0.0, 0.0, 0.0)
align2.set_property("border-width", 6)
align2.show()
details = details.rstrip()
expander = gtk.expander_new_with_mnemonic(
_("_Details (%d lines)") % len(details.splitlines()))
expander.show()
viewport = gtk.ScrolledWindow()
viewport.set_policy(gtk.POLICY_NEVER, gtk.POLICY_NEVER)
viewport.show()
lbl = LogView()
lbl.append(details)
lbl.show()
nil, height = lbl.get_size_request()
width, nil = vbox2.get_size_request()
viewport.set_size_request(width, min(height, 480))
viewport.add_with_viewport(lbl)
expander.add(viewport)
align2.add(expander)
vbox1.show()
vbox1.pack_start(align2)
#
# Queues the given dialog for displaying.
#
def _queue_dialog(ident, dialog):
def proceed(*args):
if (not _dialog_queue): return
_dialog_queue.pop(0)
if (not _dialog_queue): return
ident, dialog = _dialog_queue[0]
if (not ident in _skip_ids):
dialog.present()
else:
dialog.destroy()
proceed()
dialog.connect("destroy", proceed)
# display the dialog immediately if there are no others in the queue
_dialog_queue.append((ident, dialog))
if (len(_dialog_queue) == 1):
dialog.present()
#
# Removes all dialogs associated with the given ID from the queue.
#
def forget(ident_to_forget):
q = []
for ident, dialog in _dialog_queue:
if (ident != ident_to_forget): q.append((ident, dialog))
_dialog_queue[:] = q
if (ident_to_forget in _skip_ids): _skip_ids.remove(ident_to_forget)
#
# Displays an error dialog. Errors are critical and the program terminates
# afterwards.
#
def error(primary, secondary):
dialog = HIGDialog((gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE),
_ERROR, primary, secondary)
gtk.threads_enter()
dialog.run()
gtk.threads_leave()
sys.exit(1337)
def _configurable(icon, primary, secondary, *buttons):
def responder(src, response):
callback = buttons[response][1]
# Before calling back, check to see if it's callable
if (callback and hasattr(callback, '__call__')): callback()
response = 0
btns = []
for label, callback in buttons:
btns.append(label)
btns.append(response)
response += 1
dialog = HIGDialog(tuple(btns), icon, primary, secondary)
dialog.connect("response", responder)
return dialog
#
# Displays an information dialog.
#
def info(primary, secondary, *buttons):
if not buttons:
buttons = [(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)]
dialog = _configurable(_INFO, primary, secondary, *buttons)
_queue_dialog(0, dialog)
#
# Displays a question dialog.
#
def question(primary, secondary, *buttons):
dialog = _configurable(_QUESTION, primary, secondary, *buttons)
dialog.show()
#
# Displays a warning dialog.
#
def warning(primary, secondary, details = "", force = False):
global _last_message
# don't show the same dialog twice in a sequence
if (force): _last_message = ""
if (_last_message == (primary, secondary, details)): return
else: _last_message = (primary, secondary, details)
dialog = HIGDialog((gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE),
_WARNING, primary, secondary)
if (details):
_set_details(dialog, details)
_queue_dialog(0, dialog)
#
# Displays a user error dialog. This dialog is for hilighting invalid lines
# of code and is associated with a display instance.
#
def user_error(ident, primary, secondary, details = ""):
if (ident in _skip_ids): return
dialog = HIGDialog((gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE),
_WARNING, primary, secondary)
if (details):
_set_details(dialog, details)
def f(src, ident):
if (src.get_active() and not ident in _skip_ids):
_skip_ids.append(ident)
elif (not src.get_active() and ident in _skip_ids):
_skip_ids.remove(ident)
vbox = dialog.vbox
chkbtn = gtk.CheckButton(_("_Ignore errors from this desklet"))
chkbtn.connect("toggled", f, ident)
chkbtn.show()
vbox.pack_start(chkbtn)
_queue_dialog(ident, dialog)
#
# Use the new filechoose if possible, or fallback to the old one
#
def fileselector(title, callback_ok, callback_cancel, *args):
def handler(src, response):
if (response == gtk.RESPONSE_OK):
if (callback_ok): callback_ok(src, *args)
else:
if (callback_cancel): callback_cancel(src, *args)
else: src.destroy()
# do we have FileChooserDialog available?
try:
fsel = gtk.FileChooserDialog(title, None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
fsel.connect("response", handler)
# no, then use the old FileSelection
except:
def f(btn, fsel, response): handler(fsel, response)
fsel = gtk.FileSelection()
if (title): fsel.set_title(title)
fsel.ok_button.connect("clicked", f, fsel, gtk.RESPONSE_OK)
fsel.cancel_button.connect("clicked", f, fsel, gtk.RESPONSE_CANCEL)
fsel.show()
#
# Wrap URLs uniformly for use in a GtkLabel
# If url isn't set, assume text is the href
#
if gtk.gtk_version >= (2, 18, 0):
def urlwrap(text, url=None):
if not url:
url = text
return "<a href=\"%s\">%s</a>" % (url, text)
else:
def urlwrap(text, url=None):
if url:
return "%s (<i>%s</i>)" % (text, url)
else:
return "<i>%s</i>" % (text)
|
gpl-2.0
|
leorochael/odoo
|
openerp/addons/base/__init__.py
|
379
|
1134
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir
import workflow
import module
import res
import report
import tests
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
dungtn/babi_dialog_system
|
data/data_utils.py
|
1
|
8344
|
DATA_SOURCE = 'data/dialog-bAbI-tasks/dialog-babi-candidates.txt'
DATA_SOURCE_TASK6 = 'data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-candidates.txt'
DATA_DIR = 'dialog-bAbI-tasks/dialog-babi-candidates.txt'
STOP_WORDS=set(["a","an","the"])
import re
import os
from itertools import chain
from six.moves import range, reduce
import numpy as np
import tensorflow as tf
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple']
'''
sent=sent.lower()
if sent=='<silence>':
return [sent]
result=[x.strip() for x in re.split('(\W+)?', sent) if x.strip() and x.strip() not in STOP_WORDS]
if not result:
result=['<silence>']
if result[-1]=='.' or result[-1]=='?' or result[-1]=='!':
result=result[:-1]
return result
def load_candidates(task_id, candidates_f=DATA_SOURCE):
# containers
candidates, candid2idx, idx2candid = [], {}, {}
# update data source file based on task id
candidates_f = DATA_SOURCE_TASK6 if task_id==6 else candidates_f
# read from file
with open(candidates_f) as f:
# iterate through lines
for i, line in enumerate(f):
# tokenize each line into... well.. tokens!
candid2idx[line.strip().split(' ',1)[1]] = i
candidates.append(tokenize(line.strip()))
idx2candid[i] = line.strip().split(' ',1)[1]
return candidates, candid2idx, idx2candid
def parse_dialogs_per_response(lines,candid_dic):
'''
Parse dialogs provided in the babi tasks format
'''
data=[]
context=[]
u=None
r=None
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if '\t' in line:
u, r = line.split('\t')
a = candid_dic[r]
u = tokenize(u)
r = tokenize(r)
# temporal encoding, and utterance/response encoding
# data.append((context[:],u[:],candid_dic[' '.join(r)]))
data.append((context[:],u[:],a))
u.append('$u')
u.append('#'+str(nid))
r.append('$r')
r.append('#'+str(nid))
context.append(u)
context.append(r)
else:
r=tokenize(line)
r.append('$r')
r.append('#'+str(nid))
context.append(r)
else:
# clear context
context=[]
return data
def get_dialogs(f,candid_dic):
'''Given a file name, read the file, retrieve the dialogs, and then convert the sentences into a single dialog.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_dialogs_per_response(f.readlines(),candid_dic)
def load_dialog_task(data_dir, task_id, candid_dic, isOOV=False):
'''Load the nth task.
Returns a tuple containing the training and testing data for the task.
'''
assert task_id > 0 and task_id < 7
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = 'dialog-babi-task{}-'.format(task_id)
train_file = [f for f in files if s in f and 'trn' in f][0]
if isOOV:
test_file = [f for f in files if s in f and 'tst-OOV' in f][0]
else:
test_file = [f for f in files if s in f and 'tst.' in f][0]
val_file = [f for f in files if s in f and 'dev' in f][0]
train_data = get_dialogs(train_file,candid_dic)
test_data = get_dialogs(test_file,candid_dic)
val_data = get_dialogs(val_file,candid_dic)
return train_data, test_data, val_data
def build_vocab(data, candidates, memory_size=50):
vocab = reduce(lambda x, y: x | y, (set(list(chain.from_iterable(s)) + q) for s, q, a in data))
vocab |= reduce(lambda x,y: x|y, (set(candidate) for candidate in candidates) )
vocab=sorted(vocab)
w2idx = dict((c, i + 1) for i, c in enumerate(vocab))
max_story_size = max(map(len, (s for s, _, _ in data)))
mean_story_size = int(np.mean([ len(s) for s, _, _ in data ]))
sentence_size = max(map(len, chain.from_iterable(s for s, _, _ in data)))
candidate_sentence_size=max(map(len,candidates))
query_size = max(map(len, (q for _, q, _ in data)))
memory_size = min(memory_size, max_story_size)
vocab_size = len(w2idx) + 1 # +1 for nil word
sentence_size = max(query_size, sentence_size) # for the position
return {
'w2idx' : w2idx,
'idx2w' : vocab,
'sentence_size' : sentence_size,
'candidate_sentence_size' : candidate_sentence_size,
'memory_size' : memory_size,
'vocab_size' : vocab_size,
'n_cand' : len(candidates)
} # metadata
def vectorize_candidates(candidates, word_idx, sentence_size):
shape=(len(candidates),sentence_size)
C=[]
for i,candidate in enumerate(candidates):
lc=max(0,sentence_size-len(candidate))
C.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc)
return tf.constant(C,shape=shape)
def vectorize_data(data, word_idx, sentence_size, batch_size, candidates_size, max_memory_size):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
S = []
Q = []
A = []
data.sort(key=lambda x:len(x[0]),reverse=True)
for i, (story, query, answer) in enumerate(data):
ss = []
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
ss.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
# take only the most recent sentences that fit in memory
ss = ss[::-1][:max_memory_size][::-1]
# pad to memory_size
lm = max(0, max_memory_size - len(ss))
for _ in range(lm):
ss.append([0] * sentence_size)
lq = max(0, sentence_size - len(query))
q = [word_idx[w] if w in word_idx else 0 for w in query] + [0] * lq
S.append(np.array(ss))
Q.append(np.array(q))
A.append(np.array(answer))
return S, Q, A
def get_batches(train_data, val_data, test_data, metadata, batch_size):
'''
input : train data, valid data
metadata : {batch_size, w2idx, sentence_size, num_cand, memory_size}
output : batch indices ([start, end]); train, val split into stories, ques, answers
'''
w2idx = metadata['w2idx']
sentence_size = metadata['sentence_size']
memory_size = metadata['memory_size']
n_cand = metadata['n_cand']
trainS, trainQ, trainA = vectorize_data(train_data, w2idx, sentence_size, batch_size, n_cand, memory_size)
valS, valQ, valA = vectorize_data(val_data, w2idx, sentence_size, batch_size, n_cand, memory_size)
testS, testQ, testA = vectorize_data(test_data, w2idx, sentence_size, batch_size, n_cand, memory_size)
n_train = len(trainS)
n_val = len(valS)
n_test = len(testS)
print("Training Size",n_train)
print("Validation Size", n_val)
print("Test Size", n_test)
batches = zip(range(0, n_train-batch_size, batch_size), range(batch_size, n_train, batch_size))
last_train_ix = n_train % batch_size
last_val_ix = n_val % batch_size
last_test_ix = n_test % batch_size
# package train set
train = { 's' : trainS[:-last_train_ix], 'q' : trainQ[:-last_train_ix], 'a' : trainA[:-last_train_ix] } # you have a better idea?
# package validation set
val = { 's' : valS[:-last_val_ix], 'q' : valQ[:-last_val_ix], 'a' : valA[:-last_val_ix] }
# package test set
test = { 's' : testS[:-last_test_ix], 'q' : testQ[:-last_test_ix], 'a' : testA[:-last_test_ix] }
return train, val, test, [(start, end) for start, end in batches]
if __name__ == '__main__':
candidates, candid2idx, idx2candid = load_candidates(task_id=1)
|
gpl-3.0
|
PeRDy/django-audit-tools
|
docs/source/conf.py
|
1
|
9176
|
# -*- coding: utf-8 -*-
#
# EbAudit documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 7 13:00:34 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
PROJECT_DIR = os.path.realpath(os.path.join(os.path.realpath(os.path.dirname(__file__)), '..', '..'))
sys.path.insert(0, PROJECT_DIR)
from django.conf import settings
settings.configure(
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:"
}
},
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"audit_tools",
],
)
import django
django.setup()
import audit_tools
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
#'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ebury-audit'
copyright = u'2014, Jose Antonio Perdiguero Lopez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = audit_tools.__version__
# The full version, including alpha/beta/rc tags.
release = audit_tools.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'EburyAuditdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ebury-audit.tex', u'Ebury Audit Documentation',
audit_tools.__author__, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Ebury Audit', u'Ebury Audit Documentation',
[audit_tools.__author__], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ebury-audit', u'Ebury Audit Documentation',
audit_tools.__author__, 'ebury-audit', audit_tools.__description__,
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
|
gpl-2.0
|
eBay/restcommander
|
play-1.2.4/python/Lib/socket.py
|
9
|
17974
|
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- mape a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
ssl() -- secure socket layer support (only available if configured)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
try:
import _ssl
except ImportError:
# no SSL support
pass
else:
def ssl(sock, keyfile=None, certfile=None):
# we do an internal import here because the ssl
# module imports the socket module
import ssl as _realssl
warnings.warn("socket.ssl() is deprecated. Use ssl.wrap_socket() instead.",
DeprecationWarning, stacklevel=2)
return _realssl.sslwrap_simple(sock, keyfile, certfile)
# we need to import the same constants we used to...
from _ssl import SSLError as sslerror
from _ssl import \
RAND_add, \
RAND_egd, \
RAND_status, \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
import os, sys, warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from errno import EBADF
except ImportError:
EBADF = 9
__all__ = ["getfqdn"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown')
if os.name == "nt":
_socketmethods = _socketmethods + ('ioctl',)
if sys.platform == "riscos":
_socketmethods = _socketmethods + ('sleeptaskw',)
# All the method names that must be delegated to either the real socket
# object or the _closedsocket object.
_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
"send", "sendto")
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(EBADF, 'Bad file descriptor')
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
__getattr__ = _dummy
# Wrapper around platform socket objects. This implements
# a platform-independent dup() functionality. The
# implementation currently relies on reference counting
# to close the underlying socket object.
class _socketobject(object):
__doc__ = _realsocket.__doc__
__slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
self._sock = _sock
for method in _delegate_methods:
setattr(self, method, getattr(_sock, method))
def close(self):
self._sock = _closedsocket()
dummy = self._sock._dummy
for method in _delegate_methods:
setattr(self, method, dummy)
close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
return _socketobject(_sock=self._sock)
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return _fileobject(self._sock, mode, bufsize)
family = property(lambda self: self._sock.family, doc="the socket family")
type = property(lambda self: self._sock.type, doc="the socket type")
proto = property(lambda self: self._sock.proto, doc="the socket protocol")
_s = ("def %s(self, *args): return self._sock.%s(*args)\n\n"
"%s.__doc__ = _realsocket.%s.__doc__\n")
for _m in _socketmethods:
exec _s % (_m, _m, _m, _m)
del _m, _s
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf",
"_close"]
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
# _rbufsize is the suggested recv buffer size. It is *strictly*
# obeyed within readline() for recv calls. If it is larger than
# default_bufsize it will be used for recv calls within read().
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
# We use StringIO for the read buffer to avoid holding a list
# of variously sized string objects which have been known to
# fragment the heap due to how they are malloc()ed and often
# realloc()ed down much smaller than their original allocation.
self._rbuf = StringIO()
self._wbuf = [] # A list of strings
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
if self._close:
self._sock.close()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self._sock.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self._sock.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except error, msg:
if sock is not None:
sock.close()
raise error, msg
|
apache-2.0
|
doduytrung/odoo-8.0
|
addons/l10n_es/__openerp__.py
|
314
|
2772
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2008-2010 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <[email protected]>
# Copyright (c) 2012-2013, Grupo OPENTIA (<http://opentia.com>) Registered EU Trademark.
# Dpto. Consultoría <[email protected]>
# Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro Manuel Baeza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Spanish Charts of Accounts (PGCE 2008)",
"version" : "4.0",
"author" : "Spanish Localization Team",
'website' : 'https://launchpad.net/openerp-spain',
"category" : "Localization/Account Charts",
"description": """
Spanish charts of accounts (PGCE 2008).
========================================
* Defines the following chart of account templates:
* Spanish general chart of accounts 2008
* Spanish general chart of accounts 2008 for small and medium companies
* Spanish general chart of accounts 2008 for associations
* Defines templates for sale and purchase VAT
* Defines tax code templates
* Defines fiscal positions for spanish fiscal legislation
""",
"license" : "AGPL-3",
"depends" : ["account", "base_vat", "base_iban"],
"data" : [
"account_type.xml",
"account_chart_template.xml",
"account_account_common.xml",
"account_account_full.xml",
"account_account_pymes.xml",
"account_account_assoc.xml",
"tax_codes_common.xml",
"taxes_common.xml",
"fiscal_templates_common.xml",
"account_chart_template_post.xml",
"l10n_es_wizard.xml",
],
"demo" : [],
'auto_install': False,
"installable": True,
'images': ['images/config_chart_l10n_es.png', 'images/l10n_es_chart.png'],
}
|
agpl-3.0
|
sparkslabs/kamaelia_
|
Code/Python/Kamaelia/Kamaelia/Device/DVB/EIT.py
|
3
|
14456
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# parse EIT now & next information from DVB-T streams
from Kamaelia.Device.DVB.Core import DVB_Multiplex, DVB_Demuxer
from Axon.Component import component
import struct
from Axon.Ipc import shutdownMicroprocess,producerFinished
class PSIPacketReconstructor(component):
"""\
Takes DVB Transport stream packets for a given PID and reconstructs the
PSI packets from within the stream.
Will only handle stream from a single PID.
"""
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
buffer = ""
nextCont = None
# XXX assuming for the moment that this can only handle one PID at a time
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
byte = ord(data[1])
start_indicator = (byte & 0x40) != 0
byte = ord(data[3])
adaption = (byte & 0x30) >> 4
contcount = byte & 0x0f
# check continuity counter is okay (otherwise ignore packet)
# or that its the start of a new packet and we've not started receiving yet
if (nextCont == None and start_indicator) or nextCont == contcount:
# determine start of payload offset
if adaption == 1:
payload_start = 4
elif adaption == 3:
payload_start = 4+1+ord(data[4]) # skip past adaption data
else: # adaption == 0 or adaption == 2
# ignore if adaption field==0 or no payload
continue
# if start of new payload present, flush previous, now complete, packet
if start_indicator:
prevstart = payload_start
payload_start = prevstart + ord(data[prevstart]) + 1
buffer = buffer + data[prevstart+1:payload_start]
if len(buffer) and nextCont != None: # don't flush through dregs if this is the first time
self.send( buffer, "outbox" )
buffer = ""
buffer = buffer + data[payload_start:]
nextCont = (contcount + 1) & 0xf
else:
# reset for crash relock
nextCont = None
buffer= ""
self.pause()
yield 1
class EITPacketParser(component):
"""\
Parses EIT packets and extracts NOW & NEXT short event descriptions for
channels within this transport stream.
(Ignores events belonging to other multiplexes)
"""
Inboxes = { "inbox" : "PES packets",
"control" : "NOT USED",
}
Outboxes = { "outbox" : "Parsed NOW and NEXT EIT events",
"signal" : "NOT USED",
}
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
msg = {}
# passes CRC test
s = struct.unpack(">BHHBBBHHBB", data[:14])
table_id = s[0]
syntax = s[1] & 0x8000;
section_length = s[1] & 0x0fff
service_id = s[2]
version = (s[3] >>1) & 0x1f
current_next = s[3] & 0x01
section_num = s[4]
last_section = s[5]
ts_id = s[6]
net_id = s[7]
seg_last_sect = s[8]
last_table_id = s[9]
data=data[:3+section_length] # remove any padding at end of table
if table_id != 0x4e: # only interested in present/following data for this TS
continue
if not syntax:
print ("wrong syntax")
continue
if not current_next: # subtable not yet applicable
continue
# which subtable (uniquely identified by table_id, service(channel), TS and network)
subtable_id = (table_id, service_id, ts_id, net_id)
# print ("EIT table_id=",hex(table_id))
# print (subtable_id_
# print (section_num,last_section,seg_last_sect)
if crc32(data): # fail on non-zero result
print ("EIT packet CRC error")
continue
msg['service'] = service_id
msg['transportstream'] = ts_id
# go through events
pos = 14
while pos < len(data) - 4: # 4 bytes for final checksum
e = struct.unpack(">HHBBBBBBH",data[pos:pos+12])
event_id = e[0]
date = parseMJD(e[1]) # Y, M, D
time = unBCD(e[2]), unBCD(e[3]), unBCD(e[4]) # HH, MM, SS
duration = unBCD(e[5]), unBCD(e[6]), unBCD(e[7]) # HH, MM, SS
running_status = (e[8] & 0xe000) >> 13
free_CA_mode = e[8] & 0x1000
descriptors_len = e[8] & 0x0fff
if running_status in [1,2]:
msg['when'] = "NEXT"
elif running_status in [3,4]:
msg['when'] = "NOW"
msg['startdate'] = date
msg['starttime'] = time
msg['duration'] = duration
pos = pos + 12
descriptors_end = pos + descriptors_len
# go through descriptors
while pos < descriptors_end:
desc_tag = ord(data[pos])
desc_len = ord(data[pos+1])
if desc_tag == 0x4d: # only interested in Short Event Descriptor
lang = data[pos+2:pos+5]
namelen = ord(data[pos+5])
name = data[pos+6:pos+6+namelen]
textlen = ord(data[pos+6+namelen])
text = data[pos+7+namelen:pos+7+namelen+textlen]
msg['name'] = name
msg['description'] = text
pos = pos + 2 + desc_len
self.send(msg, "outbox")
self.pause()
yield 1
def crc32(data):
poly = 0x4c11db7
crc = 0xffffffff
for byte in data:
byte = ord(byte)
for bit in range(7,-1,-1): # MSB to LSB
z32 = crc>>31 # top bit
crc = crc << 1
if ((byte>>bit)&1) ^ z32:
crc = crc ^ poly
crc = crc & 0xffffffff
return crc
def parseMJD(MJD):
"""Parse 16 bit unsigned int containing Modified Julian Date, as per DVB-SI spec
returning year,month,day"""
YY = int( (MJD - 15078.2) / 365.25 )
MM = int( (MJD - 14956.1 - int(YY*365.25) ) / 30.6001 )
D = MJD - 14956 - int(YY*365.25) - int(MM * 30.6001)
K=0
if MM == 14 or MM == 15:
K=1
return (1900 + YY+K), (MM-1-K*12), D
def unBCD(byte):
return (byte>>4)*10 + (byte & 0xf)
class NowNextChanges(component):
"""\
Simple attempt to filter DVB now and next info for multiple services,
such that we only send output when the data changes.
"""
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
current = {}
while not self.shutdown():
while self.dataReady("inbox"):
event = self.recv("inbox")
# only interested in 'now' events, not 'next' events
if event['when'] != "NOW":
continue
uid = event['service'], event['transportstream']
if current.get(uid,None) != event:
current[uid] = event
self.send(current[uid],"outbox")
self.pause()
yield 1
class NowNextServiceFilter(component):
"""\
Filters now/next event data for only specified services.
"""
def __init__(self, *services):
super(NowNextServiceFilter,self).__init__()
self.services = services
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
event = self.recv("inbox")
if event['service'] in self.services:
self.send(event,"outbox")
self.pause()
yield 1
class TimeAndDatePacketParser(component):
"""\
Parses "Time and Date" packets.
"""
Inboxes = { "inbox" : "PES packets",
"control" : "NOT USED",
}
Outboxes = { "outbox" : "Parsed date and time",
"signal" : "NOT USED",
}
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
msg = {}
s = struct.unpack(">BHHBBB", data[:8])
table_id = s[0]
syntax = s[1] & 0x8000;
section_length = s[1] & 0x0fff
data=data[:3+section_length] # remove any padding at end of table
if table_id != 0x70: # only interested Date & Time packets
continue
if syntax:
print ("wrong syntax")
continue
date = parseMJD(s[2]) # Y, M, D
time = unBCD(s[3]), unBCD(s[4]), unBCD(s[5]) # HH, MM, SS
msg['date'] = date
msg['time'] = time
self.send(msg, "outbox")
self.pause()
yield 1
__kamaelia_components__ = ( PSIPacketReconstructor, EITPacketParser, NowNextChanges, NowNextServiceFilter, TimeAndDatePacketParser, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Util.Console import ConsoleEchoer
import dvb3.frontend
feparams = {
"inversion" : dvb3.frontend.INVERSION_AUTO,
"constellation" : dvb3.frontend.QAM_16,
"code_rate_HP" : dvb3.frontend.FEC_3_4,
"code_rate_LP" : dvb3.frontend.FEC_3_4,
}
Graphline(
SOURCE=DVB_Multiplex(505833330.0/1000000.0, [18,20,600,601], feparams),
DEMUX=DVB_Demuxer({ 18: ["_EIT_"], 20:["_DATETIME_"] }),
EIT = Pipeline( PSIPacketReconstructor(),
EITPacketParser(),
NowNextServiceFilter(4164, 4228), # BBC ONE & BBC TWO
NowNextChanges(),
ConsoleEchoer(),
),
DATETIME = Pipeline( PSIPacketReconstructor(),
TimeAndDatePacketParser(),
ConsoleEchoer(),
),
linkages={ ("SOURCE", "outbox"):("DEMUX","inbox"),
("DEMUX", "_EIT_"): ("EIT", "inbox"),
("DEMUX", "_DATETIME_"): ("DATETIME", "inbox"),
}
).run()
# RELEASE: MH, MPS
|
apache-2.0
|
cysuncn/python
|
spark/crm/PROC_F_CI_CUST_SIGN.py
|
1
|
49022
|
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_F_CI_CUST_SIGN').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#清除数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/OCRM_F_CI_CUST_SIGN/*.parquet")
#恢复数据到今日数据文件
ret = os.system("hdfs dfs -cp -f /"+dbname+"/OCRM_F_CI_CUST_SIGN_BK/"+V_DT_LD+".parquet /"+dbname+"/OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet")
F_CSP_WIRESIGNINFOHIST = sqlContext.read.parquet(hdfs+'/F_CSP_WIRESIGNINFOHIST/*')
F_CSP_WIRESIGNINFOHIST.registerTempTable("F_CSP_WIRESIGNINFOHIST")
F_DP_CBOD_SAACNACN = sqlContext.read.parquet(hdfs+'/F_DP_CBOD_SAACNACN/*')
F_DP_CBOD_SAACNACN.registerTempTable("F_DP_CBOD_SAACNACN")
F_CSP_PERNETBANKCIFINFOHIST = sqlContext.read.parquet(hdfs+'/F_CSP_PERNETBANKCIFINFOHIST/*')
F_CSP_PERNETBANKCIFINFOHIST.registerTempTable("F_CSP_PERNETBANKCIFINFOHIST")
F_CI_AFA_CUSTINFO = sqlContext.read.parquet(hdfs+'/F_CI_AFA_CUSTINFO/*')
F_CI_AFA_CUSTINFO.registerTempTable("F_CI_AFA_CUSTINFO")
F_CSP_SMSSIGNINFOHIST = sqlContext.read.parquet(hdfs+'/F_CSP_SMSSIGNINFOHIST/*')
F_CSP_SMSSIGNINFOHIST.registerTempTable("F_CSP_SMSSIGNINFOHIST")
F_NI_AFA_ELEC_DKGX = sqlContext.read.parquet(hdfs+'/F_NI_AFA_ELEC_DKGX/*')
F_NI_AFA_ELEC_DKGX.registerTempTable("F_NI_AFA_ELEC_DKGX")
OCRM_F_DP_CARD_INFO = sqlContext.read.parquet(hdfs+'/OCRM_F_DP_CARD_INFO/*')
OCRM_F_DP_CARD_INFO.registerTempTable("OCRM_F_DP_CARD_INFO")
OCRM_F_CI_CUST_DESC = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_DESC/*')
OCRM_F_CI_CUST_DESC.registerTempTable("OCRM_F_CI_CUST_DESC")
F_CSP_MOBILEBANKCIFINFOHIST = sqlContext.read.parquet(hdfs+'/F_CSP_MOBILEBANKCIFINFOHIST/*')
F_CSP_MOBILEBANKCIFINFOHIST.registerTempTable("F_CSP_MOBILEBANKCIFINFOHIST")
F_CSP_ENTBANKCIFINFOHIST = sqlContext.read.parquet(hdfs+'/F_CSP_ENTBANKCIFINFOHIST/*')
F_CSP_ENTBANKCIFINFOHIST.registerTempTable("F_CSP_ENTBANKCIFINFOHIST")
F_CSP_TVSIGNINFOHIST = sqlContext.read.parquet(hdfs+'/F_CSP_TVSIGNINFOHIST/*')
F_CSP_TVSIGNINFOHIST.registerTempTable("F_CSP_TVSIGNINFOHIST")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT COALESCE(B.SA_CUST_NO, C.CR_CUST_NO) AS CUST_ID
,CASE WHEN A.ZT='0' THEN '1' ELSE '0' END AS STATE
,A.FR_ID AS FR_ID
FROM F_NI_AFA_ELEC_DKGX A --省级电费代扣关系表
LEFT JOIN F_DP_CBOD_SAACNACN B --活存主档
ON A.YHZH = B.SA_ACCT_NO
AND B.FR_ID = A.FR_ID
LEFT JOIN OCRM_F_DP_CARD_INFO C --卡档
ON A.YHZH = C.CR_CRD_NO
AND C.FR_ID = A.FR_ID
WHERE A.SYSID = '800012'
AND A.ODS_ST_DATE = V_DT
GROUP BY B.SA_CUST_NO
,C.CR_CUST_NO
,A.ZT
,A.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_OCRM_F_CI_CUST_SIGN_01 = sqlContext.sql(sql)
TMP_OCRM_F_CI_CUST_SIGN_01.registerTempTable("TMP_OCRM_F_CI_CUST_SIGN_01")
dfn="TMP_OCRM_F_CI_CUST_SIGN_01/"+V_DT+".parquet"
TMP_OCRM_F_CI_CUST_SIGN_01.cache()
nrows = TMP_OCRM_F_CI_CUST_SIGN_01.count()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/TMP_OCRM_F_CI_CUST_SIGN_01/*.parquet")
TMP_OCRM_F_CI_CUST_SIGN_01.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_OCRM_F_CI_CUST_SIGN_01.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert TMP_OCRM_F_CI_CUST_SIGN_01 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[21] 001-02::
V_STEP = V_STEP + 1
sql = """
SELECT DISTINCT COALESCE(B.SA_CUST_NO, C.CR_CUST_NO) AS CUST_ID
,CASE WHEN SYSID = '800235' THEN 'Water'
WHEN SYSID = '800037' THEN 'Gas'
END AS TYPE
,CASE WHEN A.SIGNSTATE = '0' THEN '1' ELSE '0' END AS STATE
,A.FR_ID AS FR_ID
FROM F_CI_AFA_CUSTINFO A --代理单位客户签约信息表
LEFT JOIN F_DP_CBOD_SAACNACN B --活存主档
ON A.ACCOUNT = B.SA_ACCT_NO
AND B.FR_ID = A.FR_ID
LEFT JOIN OCRM_F_DP_CARD_INFO C --卡档
ON A.ACCOUNT = C.CR_CRD_NO
AND C.FR_ID = A.FR_ID
WHERE A.SYSID IN('800235', '800037')
AND A.ODS_ST_DATE = V_DT
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_OCRM_F_CI_CUST_SIGN_02 = sqlContext.sql(sql)
TMP_OCRM_F_CI_CUST_SIGN_02.registerTempTable("TMP_OCRM_F_CI_CUST_SIGN_02")
dfn="TMP_OCRM_F_CI_CUST_SIGN_02/"+V_DT+".parquet"
TMP_OCRM_F_CI_CUST_SIGN_02.cache()
nrows = TMP_OCRM_F_CI_CUST_SIGN_02.count()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/TMP_OCRM_F_CI_CUST_SIGN_02/*.parquet")
TMP_OCRM_F_CI_CUST_SIGN_02.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_OCRM_F_CI_CUST_SIGN_02.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert TMP_OCRM_F_CI_CUST_SIGN_02 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[12] 001-03::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
TMP_OCRM_F_CI_CUST_SIGN_01 = sqlContext.read.parquet(hdfs+'/TMP_OCRM_F_CI_CUST_SIGN_01/*')
TMP_OCRM_F_CI_CUST_SIGN_01.registerTempTable("TMP_OCRM_F_CI_CUST_SIGN_01")
sql = """
SELECT A.CUST_ID AS CUST_ID
,A.STATE AS IF_ELEC
,B.IF_WATER AS IF_WATER
,B.IF_TV AS IF_TV
,B.IF_MOBILE AS IF_MOBILE
,B.IF_WY AS IF_WY
,B.IF_MSG AS IF_MSG
,B.IF_GAS AS IF_GAS
,B.IF_WIRE AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT FR_ID,CUST_ID,STATE,
ROW_NUMBER() OVER(PARTITION BY FR_ID,CUST_ID ORDER BY STATE DESC ) RN
FROM TMP_OCRM_F_CI_CUST_SIGN_01) A --客户签约临时表01(电费)
LEFT JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID
WHERE A.CUST_ID IS NOT NULL
AND RN = '1' """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-04::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
TMP_OCRM_F_CI_CUST_SIGN_02 = sqlContext.read.parquet(hdfs+'/TMP_OCRM_F_CI_CUST_SIGN_02/*')
TMP_OCRM_F_CI_CUST_SIGN_02.registerTempTable("TMP_OCRM_F_CI_CUST_SIGN_02")
sql = """
SELECT A.CUST_ID AS CUST_ID
,B.IF_ELEC AS IF_ELEC
,A.STATE AS IF_WATER
,B.IF_TV AS IF_TV
,B.IF_MOBILE AS IF_MOBILE
,B.IF_WY AS IF_WY
,B.IF_MSG AS IF_MSG
,B.IF_GAS AS IF_GAS
,B.IF_WIRE AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT FR_ID,CUST_ID,STATE, ROW_NUMBER()OVER(PARTITION BY FR_ID,CUST_ID ORDER BY STATE DESC) RN
FROM TMP_OCRM_F_CI_CUST_SIGN_02
WHERE TYPE = 'Water' AND CUST_ID IS NOT NULL
) A --客户签约临时表02(水费煤气费)
LEFT JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID
WHERE RN = '1' """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-05::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CIFNO AS CUST_ID
,B.IF_ELEC AS IF_ELEC
,B.IF_WATER AS IF_WATER
,'1' AS IF_TV
,B.IF_MOBILE AS IF_MOBILE
,B.IF_WY AS IF_WY
,B.IF_MSG AS IF_MSG
,B.IF_GAS AS IF_GAS
,B.IF_WIRE AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT DISTINCT CIFNO,FR_ID
FROM F_CSP_TVSIGNINFOHIST
WHERE ODS_ST_DATE = V_DT
AND STATE = 'N'
AND MAINTCODE IN('A', 'U')
) A --代缴费广电签约信息历史表
LEFT JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.CIFNO = B.CUST_ID
AND A.FR_ID = B.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-06::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CUST_ID AS CUST_ID
,C.IF_ELEC AS IF_ELEC
,C.IF_WATER AS IF_WATER
,C.IF_TV AS IF_TV
,'1' AS IF_MOBILE
,C.IF_WY AS IF_WY
,C.IF_MSG AS IF_MSG
,C.IF_GAS AS IF_GAS
,C.IF_WIRE AS IF_WIRE
,C.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT DISTINCT A.FR_ID,B.CIFNO AS CUST_ID
FROM F_CSP_MOBILEBANKCIFINFOHIST A
JOIN F_CSP_ENTBANKCIFINFOHIST B ON A.CIFSEQ = B.MAINTJNLNO AND B.FR_ID = A.FR_ID
WHERE A.ODS_ST_DATE = V_DT
AND A.MAINTCODE IN ('A','U')) A --手机银行开通信息历史表
LEFT JOIN OCRM_F_CI_CUST_SIGN C --客户签约临时表
ON A.CUST_ID = C.CUST_ID
AND A.FR_ID = C.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-07::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CUST_ID AS CUST_ID
,B.IF_ELEC AS IF_ELEC
,B.IF_WATER AS IF_WATER
,B.IF_TV AS IF_TV
,'1' AS IF_MOBILE
,B.IF_WY AS IF_WY
,B.IF_MSG AS IF_MSG
,B.IF_GAS AS IF_GAS
,B.IF_WIRE AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT DISTINCT CUST_ID ,FR_ID
FROM OCRM_F_CI_CUST_DESC A
WHERE SUBSTR(A.ODS_SYS_ID, 11, 1) = '1'
AND A.CRM_DT = V_DT) A --统一客户信息表
INNER JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.FR_ID = B.FR_ID
AND A.CUST_ID = B.CUST_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-08::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CIFNO AS CUST_ID
,B.IF_ELEC AS IF_ELEC
,B.IF_WATER AS IF_WATER
,B.IF_TV AS IF_TV
,B.IF_MOBILE AS IF_MOBILE
,B.IF_WY AS IF_WY
,'1' AS IF_MSG
,B.IF_GAS AS IF_GAS
,B.IF_WIRE AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT DISTINCT CIFNO,FR_ID
FROM F_CSP_SMSSIGNINFOHIST --短信平台签约信息历史表
WHERE ODS_ST_DATE = V_DT
AND MAINTCODE IN ('A', 'U')
AND STATE = 'N' ) A
LEFT JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.CIFNO = B.CUST_ID
AND A.FR_ID = B.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-09::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CUST_ID AS CUST_ID
,B.IF_ELEC AS IF_ELEC
,B.IF_WATER AS IF_WATER
,B.IF_TV AS IF_TV
,B.IF_MOBILE AS IF_MOBILE
,B.IF_WY AS IF_WY
,'1' AS IF_MSG
,B.IF_GAS AS IF_GAS
,B.IF_WIRE AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT DISTINCT CUST_ID ,FR_ID
FROM OCRM_F_CI_CUST_DESC A
WHERE SUBSTR(A.ODS_SYS_ID, 9, 1) = '1'
AND A.CRM_DT = V_DT) A --统一客户信息表
LEFT JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-10::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CUST_ID AS CUST_ID
,B.IF_ELEC AS IF_ELEC
,B.IF_WATER AS IF_WATER
,B.IF_TV AS IF_TV
,B.IF_MOBILE AS IF_MOBILE
,B.IF_WY AS IF_WY
,B.IF_MSG AS IF_MSG
,A.STATE AS IF_GAS
,B.IF_WIRE AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT CUST_ID,STATE,FR_ID,
ROW_NUMBER()OVER(PARTITION BY CUST_ID ORDER BY STATE DESC) RN
FROM TMP_OCRM_F_CI_CUST_SIGN_02
WHERE TYPE = 'Gas' AND CUST_ID IS NOT NULL
) A --客户签约临时表02(水费煤气费)
LEFT JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID
WHERE RN = '1' """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-11::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CUST_ID AS CUST_ID
,C.IF_ELEC AS IF_ELEC
,C.IF_WATER AS IF_WATER
,C.IF_TV AS IF_TV
,C.IF_MOBILE AS IF_MOBILE
,'1' AS IF_WY
,C.IF_MSG AS IF_MSG
,C.IF_GAS AS IF_GAS
,C.IF_WIRE AS IF_WIRE
,C.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT DISTINCT B.CIFNO AS CUST_ID,A.FR_ID
FROM F_CSP_PERNETBANKCIFINFOHIST A
JOIN F_CSP_ENTBANKCIFINFOHIST B ON A.CIFSEQ = B.MAINTJNLNO AND B.FR_ID = A.FR_ID
WHERE A.ODS_ST_DATE = V_DT
AND A.MAINTCODE IN ('A','U') --A:签约;U:变更;D:解约
) A --个人网银开通信息历史表
LEFT JOIN OCRM_F_CI_CUST_SIGN C --客户签约临时表
ON A.CUST_ID = C.CUST_ID
AND A.FR_ID = C.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-12::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CUST_ID AS CUST_ID
,B.IF_ELEC AS IF_ELEC
,B.IF_WATER AS IF_WATER
,B.IF_TV AS IF_TV
,B.IF_MOBILE AS IF_MOBILE
,'1' AS IF_WY
,B.IF_MSG AS IF_MSG
,B.IF_GAS AS IF_GAS
,B.IF_WIRE AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT DISTINCT CUST_ID ,FR_ID
FROM OCRM_F_CI_CUST_DESC A
WHERE SUBSTR(A.ODS_SYS_ID, 3, 1) = '1'
AND A.CRM_DT = V_DT) A --统一客户信息表
INNER JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.FR_ID = B.FR_ID
AND A.CUST_ID = B.CUST_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-13::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT A.CUST_ID AS CUST_ID
,B.IF_ELEC AS IF_ELEC
,B.IF_WATER AS IF_WATER
,B.IF_TV AS IF_TV
,B.IF_MOBILE AS IF_MOBILE
,B.IF_WY AS IF_WY
,B.IF_MSG AS IF_MSG
,B.IF_GAS AS IF_GAS
,'1' AS IF_WIRE
,B.SIGN_FLAG AS SIGN_FLAG
,A.FR_ID AS FR_ID
,V_DT AS ST_DATE
FROM (SELECT DISTINCT FR_ID,CIFNO AS CUST_ID
FROM F_CSP_WIRESIGNINFOHIST
WHERE ODS_ST_DATE = V_DT
AND MAINTCODE IN ('A','U') --A:签约;U:变更;D:解约
AND STATE = 'N' --N:正常;C:关闭
) A --代缴费电信签约信息历史表
LEFT JOIN OCRM_F_CI_CUST_SIGN B --客户签约临时表
ON A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
#任务[12] 001-14::
V_STEP = V_STEP + 1
OCRM_F_CI_CUST_SIGN = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_SIGN/*')
OCRM_F_CI_CUST_SIGN.registerTempTable("OCRM_F_CI_CUST_SIGN")
sql = """
SELECT CUST_ID AS CUST_ID
,IF_ELEC AS IF_ELEC
,IF_WATER AS IF_WATER
,IF_TV AS IF_TV
,IF_MOBILE AS IF_MOBILE
,IF_WY AS IF_WY
,IF_MSG AS IF_MSG
,IF_GAS AS IF_GAS
,IF_WIRE AS IF_WIRE
,CONCAT(IF_WY , IF_MOBILE , IF_MSG , IF_ELEC , IF_WATER , IF_GAS , IF_TV , IF_WIRE) AS SIGN_FLAG
,FR_ID AS FR_ID
,ST_DATE AS ST_DATE
FROM OCRM_F_CI_CUST_SIGN A --客户签约临时表
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP1 = sqlContext.sql(sql)
OCRM_F_CI_CUST_SIGN_INNTMP1.registerTempTable("OCRM_F_CI_CUST_SIGN_INNTMP1")
sql = """
SELECT DST.CUST_ID --客户号:src.CUST_ID
,DST.IF_ELEC --是否电费签约:src.IF_ELEC
,DST.IF_WATER --是否水费签约:src.IF_WATER
,DST.IF_TV --是否广电签约:src.IF_TV
,DST.IF_MOBILE --是否手机银行签约:src.IF_MOBILE
,DST.IF_WY --是否网银签约:src.IF_WY
,DST.IF_MSG --是否短信签约:src.IF_MSG
,DST.IF_GAS --是否代缴费燃气签约:src.IF_GAS
,DST.IF_WIRE --是否代缴费电信签约:src.IF_WIRE
,DST.SIGN_FLAG --签约汇总(网银-手机银行-短信-电费-水费-燃气-广电-电信):src.SIGN_FLAG
,DST.FR_ID --法人号:src.FR_ID
,DST.ST_DATE --ETL日期:src.ST_DATE
FROM OCRM_F_CI_CUST_SIGN DST
LEFT JOIN OCRM_F_CI_CUST_SIGN_INNTMP1 SRC
ON SRC.FR_ID = DST.FR_ID
AND SRC.CUST_ID = DST.CUST_ID
WHERE SRC.FR_ID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
OCRM_F_CI_CUST_SIGN_INNTMP2 = sqlContext.sql(sql)
dfn="OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet"
OCRM_F_CI_CUST_SIGN_INNTMP2=OCRM_F_CI_CUST_SIGN_INNTMP2.unionAll(OCRM_F_CI_CUST_SIGN_INNTMP1)
OCRM_F_CI_CUST_SIGN_INNTMP1.cache()
OCRM_F_CI_CUST_SIGN_INNTMP2.cache()
nrowsi = OCRM_F_CI_CUST_SIGN_INNTMP1.count()
nrowsa = OCRM_F_CI_CUST_SIGN_INNTMP2.count()
#装载数据
OCRM_F_CI_CUST_SIGN_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
#删除
ret = os.system("hdfs dfs -rm -r /"+dbname+"/OCRM_F_CI_CUST_SIGN_BK/"+V_DT+".parquet ")
#备份最新数据
ret = os.system("hdfs dfs -cp -f /"+dbname+"/OCRM_F_CI_CUST_SIGN/"+V_DT+".parquet /"+dbname+"/OCRM_F_CI_CUST_SIGN_BK/"+V_DT+".parquet")
OCRM_F_CI_CUST_SIGN_INNTMP1.unpersist()
OCRM_F_CI_CUST_SIGN_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert OCRM_F_CI_CUST_SIGN lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
|
gpl-3.0
|
GeoscienceAustralia/geodesy-domain-model
|
aws/amazonia/test/sys_tests/test_sys_elb.py
|
2
|
2306
|
#!/usr/bin/python3
from amazonia.classes.elb import Elb
from amazonia.classes.elb_config import ElbConfig, ElbListenersConfig
from network_setup import get_network_config
def main():
network_config, template = get_network_config()
elb_listeners_config = [
ElbListenersConfig(
instance_port='80',
loadbalancer_port='80',
loadbalancer_protocol='HTTP',
instance_protocol='HTTP',
sticky_app_cookie='JSESSION'
),
ElbListenersConfig(
instance_port='8080',
loadbalancer_port='8080',
loadbalancer_protocol='HTTP',
instance_protocol='HTTP',
sticky_app_cookie='SESSIONTOKEN'
)
]
elb_config1 = ElbConfig(
elb_listeners_config=elb_listeners_config,
elb_health_check='HTTP:80/index.html',
elb_log_bucket='my-s3-bucket',
public_unit=False,
ssl_certificate_id=None,
healthy_threshold=10,
unhealthy_threshold=2,
interval=300,
timeout=30
)
elb_config2 = ElbConfig(
elb_listeners_config=elb_listeners_config,
elb_health_check='HTTP:80/index.html',
elb_log_bucket='my-s3-bucket',
public_unit=True,
ssl_certificate_id='arn:aws:acm::tester',
healthy_threshold=10,
unhealthy_threshold=2,
interval=300,
timeout=30
)
elb_config3 = ElbConfig(
elb_listeners_config=elb_listeners_config,
elb_health_check='HTTP:80/index.html',
elb_log_bucket='my-s3-bucket',
public_unit=True,
ssl_certificate_id=None,
healthy_threshold=10,
unhealthy_threshold=2,
interval=300,
timeout=30
)
Elb(title='MyUnit1',
network_config=network_config,
elb_config=elb_config1,
template=template
)
Elb(title='MyUnit2',
network_config=network_config,
elb_config=elb_config2,
template=template
)
network_config.public_hosted_zone_name = None
Elb(title='MyUnit3',
network_config=network_config,
elb_config=elb_config3,
template=template
)
print(template.to_json(indent=2, separators=(',', ': ')))
if __name__ == '__main__':
main()
|
bsd-3-clause
|
mushtaqak/edx-platform
|
openedx/core/djangoapps/profile_images/tests/test_images.py
|
111
|
7026
|
"""
Test cases for image processing functions in the profile image package.
"""
from contextlib import closing
from itertools import product
import os
from tempfile import NamedTemporaryFile
import unittest
from django.conf import settings
from django.core.files.uploadedfile import UploadedFile
from django.test import TestCase
from django.test.utils import override_settings
import ddt
import mock
from PIL import Image
from ..images import (
FILE_UPLOAD_TOO_LARGE,
FILE_UPLOAD_TOO_SMALL,
FILE_UPLOAD_BAD_TYPE,
FILE_UPLOAD_BAD_EXT,
FILE_UPLOAD_BAD_MIMETYPE,
create_profile_images,
ImageValidationError,
remove_profile_images,
validate_uploaded_image,
)
from .helpers import make_image_file, make_uploaded_file
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
class TestValidateUploadedImage(TestCase):
"""
Test validate_uploaded_image
"""
def check_validation_result(self, uploaded_file, expected_failure_message):
"""
Internal DRY helper.
"""
if expected_failure_message is not None:
with self.assertRaises(ImageValidationError) as ctx:
validate_uploaded_image(uploaded_file)
self.assertEqual(ctx.exception.message, expected_failure_message)
else:
validate_uploaded_image(uploaded_file)
self.assertEqual(uploaded_file.tell(), 0)
@ddt.data(
(99, FILE_UPLOAD_TOO_SMALL),
(100, ),
(1024, ),
(1025, FILE_UPLOAD_TOO_LARGE),
)
@ddt.unpack
@override_settings(PROFILE_IMAGE_MIN_BYTES=100, PROFILE_IMAGE_MAX_BYTES=1024)
def test_file_size(self, upload_size, expected_failure_message=None):
"""
Ensure that files outside the accepted size range fail validation.
"""
with make_uploaded_file(
dimensions=(1, 1), extension=".png", content_type="image/png", force_size=upload_size
) as uploaded_file:
self.check_validation_result(uploaded_file, expected_failure_message)
@ddt.data(
(".gif", "image/gif"),
(".jpg", "image/jpeg"),
(".jpeg", "image/jpeg"),
(".png", "image/png"),
(".bmp", "image/bmp", FILE_UPLOAD_BAD_TYPE),
(".tif", "image/tiff", FILE_UPLOAD_BAD_TYPE),
)
@ddt.unpack
def test_extension(self, extension, content_type, expected_failure_message=None):
"""
Ensure that files whose extension is not supported fail validation.
"""
with make_uploaded_file(extension=extension, content_type=content_type) as uploaded_file:
self.check_validation_result(uploaded_file, expected_failure_message)
def test_extension_mismatch(self):
"""
Ensure that validation fails when the file extension does not match the
file data.
"""
# make a bmp, try to fool the function into thinking it's a jpeg
with make_image_file(extension=".bmp") as bmp_file:
with closing(NamedTemporaryFile(suffix=".jpeg")) as fake_jpeg_file:
fake_jpeg_file.write(bmp_file.read())
fake_jpeg_file.seek(0)
uploaded_file = UploadedFile(
fake_jpeg_file,
content_type="image/jpeg",
size=os.path.getsize(fake_jpeg_file.name)
)
with self.assertRaises(ImageValidationError) as ctx:
validate_uploaded_image(uploaded_file)
self.assertEqual(ctx.exception.message, FILE_UPLOAD_BAD_EXT)
def test_content_type(self):
"""
Ensure that validation fails when the content_type header and file
extension do not match
"""
with make_uploaded_file(extension=".jpeg", content_type="image/gif") as uploaded_file:
with self.assertRaises(ImageValidationError) as ctx:
validate_uploaded_image(uploaded_file)
self.assertEqual(ctx.exception.message, FILE_UPLOAD_BAD_MIMETYPE)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
class TestGenerateProfileImages(TestCase):
"""
Test create_profile_images
"""
@ddt.data(
*product(
["gif", "jpg", "png"],
[(1, 1), (10, 10), (100, 100), (1000, 1000), (1, 10), (10, 100), (100, 1000), (1000, 999)],
)
)
@ddt.unpack
def test_generation(self, image_type, dimensions):
"""
Ensure that regardless of the input format or dimensions, the outcome
of calling the function is square jpeg files with explicitly-requested
dimensions being saved to the profile image storage backend.
"""
extension = "." + image_type
content_type = "image/" + image_type
requested_sizes = {
10: "ten.jpg",
100: "hundred.jpg",
1000: "thousand.jpg",
}
mock_storage = mock.Mock()
with make_uploaded_file(dimensions=dimensions, extension=extension, content_type=content_type) as uploaded_file:
with mock.patch(
"openedx.core.djangoapps.profile_images.images.get_profile_image_storage",
return_value=mock_storage,
):
create_profile_images(uploaded_file, requested_sizes)
names_and_files = [v[0] for v in mock_storage.save.call_args_list]
actual_sizes = {}
for name, file_ in names_and_files:
# get the size of the image file and ensure it's square jpeg
with closing(Image.open(file_)) as image_obj:
width, height = image_obj.size
self.assertEqual(width, height)
self.assertEqual(image_obj.format, 'JPEG')
actual_sizes[width] = name
self.assertEqual(requested_sizes, actual_sizes)
mock_storage.save.reset_mock()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
class TestRemoveProfileImages(TestCase):
"""
Test remove_profile_images
"""
def test_remove(self):
"""
Ensure that the outcome of calling the function is that the named images
are deleted from the profile image storage backend.
"""
requested_sizes = {
10: "ten.jpg",
100: "hundred.jpg",
1000: "thousand.jpg",
}
mock_storage = mock.Mock()
with mock.patch(
"openedx.core.djangoapps.profile_images.images.get_profile_image_storage",
return_value=mock_storage,
):
remove_profile_images(requested_sizes)
deleted_names = [v[0][0] for v in mock_storage.delete.call_args_list]
self.assertEqual(requested_sizes.values(), deleted_names)
mock_storage.save.reset_mock()
|
agpl-3.0
|
evilpie/servo
|
python/mach/mach/test/test_entry_point.py
|
121
|
1886
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import imp
import os
import sys
from mach.base import MachError
from mach.test.common import TestBase
from mock import patch
from mozunit import main
here = os.path.abspath(os.path.dirname(__file__))
class Entry():
"""Stub replacement for pkg_resources.EntryPoint"""
def __init__(self, providers):
self.providers = providers
def load(self):
def _providers():
return self.providers
return _providers
class TestEntryPoints(TestBase):
"""Test integrating with setuptools entry points"""
provider_dir = os.path.join(here, 'providers')
def _run_mach(self):
return TestBase._run_mach(self, ['help'], entry_point='mach.providers')
@patch('pkg_resources.iter_entry_points')
def test_load_entry_point_from_directory(self, mock):
# Ensure parent module is present otherwise we'll (likely) get
# an error due to unknown parent.
if b'mach.commands' not in sys.modules:
mod = imp.new_module(b'mach.commands')
sys.modules[b'mach.commands'] = mod
mock.return_value = [Entry(['providers'])]
# Mach error raised due to conditions_invalid.py
with self.assertRaises(MachError):
self._run_mach()
@patch('pkg_resources.iter_entry_points')
def test_load_entry_point_from_file(self, mock):
mock.return_value = [Entry([os.path.join('providers', 'basic.py')])]
result, stdout, stderr = self._run_mach()
self.assertIsNone(result)
self.assertIn('cmd_foo', stdout)
# Not enabled in automation because tests are failing.
#if __name__ == '__main__':
# main()
|
mpl-2.0
|
alphapapa/youtube-dl
|
youtube_dl/extractor/yourupload.py
|
142
|
1537
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class YourUploadIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:yourupload\.com/watch|
embed\.yourupload\.com|
embed\.yucache\.net
)/(?P<id>[A-Za-z0-9]+)
'''
_TESTS = [
{
'url': 'http://yourupload.com/watch/14i14h',
'md5': '5e2c63385454c557f97c4c4131a393cd',
'info_dict': {
'id': '14i14h',
'ext': 'mp4',
'title': 'BigBuckBunny_320x180.mp4',
'thumbnail': 're:^https?://.*\.jpe?g',
}
},
{
'url': 'http://embed.yourupload.com/14i14h',
'only_matching': True,
},
{
'url': 'http://embed.yucache.net/14i14h?client_file_id=803349',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
embed_url = 'http://embed.yucache.net/{0:}'.format(video_id)
webpage = self._download_webpage(embed_url, video_id)
title = self._og_search_title(webpage)
video_url = self._og_search_video_url(webpage)
thumbnail = self._og_search_thumbnail(webpage, default=None)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'http_headers': {
'Referer': embed_url,
},
}
|
unlicense
|
andreadelprete/pinocchio_inv_dyn
|
python/pinocchio_inv_dyn/multi_contact/bezier/bezier_0_step_capturability.py
|
1
|
21165
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 1 16:54:39 2016
@author: stonneau
"""
from pinocchio_inv_dyn.optimization.solver_LP_abstract import LP_status, LP_status_string
from pinocchio_inv_dyn.multi_contact.stability_criterion import Bunch
from pinocchio_inv_dyn.optimization.solver_LP_abstract import getNewSolver
from pinocchio_inv_dyn.abstract_solver import AbstractSolver as qp_solver
from spline import bezier, bezier6, polynom, bernstein
from numpy import array, vstack, zeros, ones, sqrt, matrix, asmatrix, asarray, identity
from numpy import cross as X
from numpy.linalg import norm
import numpy as np
from math import atan, pi, sqrt
import cProfile
np.set_printoptions(precision=2, suppress=True, linewidth=100);
from centroidal_dynamics import *
__EPS = 1e-8;
def skew(x):
res = zeros([3,3])
res[0,0] = 0; res[0,1] = -x[2]; res[0,2] = x[1];
res[1,0] = x[2]; res[1,1] = 0 ; res[1,2] = -x[0];
res[2,0] = -x[1]; res[2,1] = x[0]; res[2,2] = 0 ;
return res
def splitId(v):
dim = v.shape[0]
res = zeros([dim,dim]);
for i in range(dim):
res[i,i] = v[i]
return res
def __init_6D():
return zeros([6,3]), zeros(6)
def normalize(A,b=None):
null_rows = []
for i in range (A.shape[0]):
n_A = norm(A[i,:])
if(n_A != 0.):
A[i,:] = A[i,:] / n_A
if b != None:
b[i] = b[i] / n_A
if b== None:
return A
return A, b
##
# Given a list of contact points
# as well as a list of associated normals
# compute the gravito inertial wrench cone
# \param p array of 3d contact positions
# \param N array of 3d contact normals
# \param mass mass of the robot
# \param mu friction coefficient
# \return the CWC H, H w <= 0, where w is the wrench. [WARNING!] TODO: The H matrix is such that
# the wrench w is the one present in the ICRA paper 15 of del prete et al., contrary to the current c++ implementation
def compute_CWC(p, N, mass, mu, T=None):
__cg = 4; #num generators per contact
eq = Equilibrium("dyn_eq2", mass, __cg)
if T == None:
eq.setNewContacts(asmatrix(p),asmatrix(N),mu,EquilibriumAlgorithm.EQUILIBRIUM_ALGORITHM_PP)
else:
eq.setNewContactsWithTangents(asmatrix(p),asmatrix(N),asmatrix(T),mu,EquilibriumAlgorithm.EQUILIBRIUM_ALGORITHM_PP)
H, h = eq.getPolytopeInequalities()
assert(norm(h) < __EPS), "h is not equal to zero"
return normalize(np.squeeze(np.asarray(-H)))
#################################################
# global constant bezier variables and methods ##
#################################################
def w0(p0, p1, g, p0X, p1X, gX, alpha):
wx, ws = __init_6D()
wx[:3,:] = 6*alpha*identity(3); wx[3:,:] = 6*alpha*p0X;
ws[:3] = 6*alpha*(p0 - 2*p1)
ws[3:] = X(-p0, 12*alpha*p1 + g )
return (wx, ws)
def w1(p0, p1, g, p0X, p1X, gX, alpha):
wx, ws = __init_6D()
wx[:3,:] = 3*alpha*identity(3);
wx[3:,:] = skew(1.5 * (3*p1 - p0))*alpha
ws[:3] = 1.5 *alpha* (3*p0 - 5*p1);
ws[3:] = X(3*alpha*p0, -p1) + 0.25 * (gX.dot(3*p1 + p0))
return (wx, ws)
def w2(p0, p1, g, p0X, p1X, gX, alpha):
wx, ws = __init_6D()
#~ wx[:3,:] = 0;
wx[3:,:] = skew(0.5*g - 3*alpha* p0 + 3*alpha*p1)
ws[:3] = 3*alpha*(p0 - p1);
ws[3:] = 0.5 * gX.dot(p1)
return (wx, ws)
def w3(p0, p1, g, p0X, p1X, gX, alpha):
wx, ws = __init_6D()
wx[:3,:] = -3*alpha* identity(3);
wx[3:,:] = skew(g - 1.5 *alpha* (p1 + p0))
ws[:3] = 1.5*alpha * (p1 + p0)
#~ ws[3:] = 0
return (wx, ws)
def w4(p0, p1, g, p0X, p1X, gX, alpha):
wx, ws = __init_6D()
wx[:3,:] = -6*alpha *identity(3);
wx[3:,:] = skew(g - 6*alpha* p1)
ws[:3] = 6*alpha*p1
#~ ws[3:] = 0
return (wx, ws)
#angular momentum waypoints
def u0(l0, alpha):
ux, us = __init_6D()
ux[3:] = identity(3)* 3 * alpha
us[3:] = -3*alpha*l0[:]
return (ux, us)
def u1(l0, alpha):
ux, us = __init_6D()
us[3:] = -1.5*l0*alpha
return (ux, us)
def u2(l0, alpha):
ux, us = __init_6D()
ux[3:] = identity(3)* (-1.5) * alpha
us[3:] = -l0 / 2. * alpha
return (ux, us)
def u3(l0, alpha):
ux, us = __init_6D()
ux[3:] = identity(3)* (-1.5) * alpha
return (ux, us)
def u4(l0, alpha):
ux, us = __init_6D()
return (ux, us)
wis = [w0,w1,w2,w3,w4]
uis = [u0,u1,u2,u3,u4]
b4 = [bernstein(4,i) for i in range(5)]
def c_of_t(curve, T):
def _eval(t):
return asarray(curve(t/T)).flatten()
return _eval
def dc_of_t(curve, T):
def _eval(t):
return 1/T * asarray(curve(t/T)).flatten()
return _eval
def ddc_of_t(curve, T):
def _eval(t):
return 1/(T*T) * asarray(curve(t/T)).flatten()
return _eval
#################################################
# BezierZeroStepCapturability ##
#################################################
class BezierZeroStepCapturability(object):
_name = ""
_maxIter = 0;
_verb = 0;
_com_acc_solver = None;
_c0 = None;
_dc0 = None;
_computationTime = 0.0;
_outerIterations = 0;
_innerIterations = 0;
def __init__ (self, name, c0, dc0, contact_points, contact_normals, mu, g, mass, kinematic_constraints = None, angular_momentum_constraints = None,
contactTangents = None, maxIter=1000, verb=0, regularization=1e-5, solver='qpoases'):
''' Constructor
@param c0 Initial CoM position
@param dc0 Initial CoM velocity
@param contact points A matrix containing the contact points
@param contact normals A matrix containing the contact normals
@param mu Friction coefficient (either a scalar or an array)
@param g Gravity vector
@param mass The robot mass
@param kinematic constraints couple [A,b] such that the com is constrained by A x <= b
@param regularization Weight of the force minimization, the higher this value, the sparser the solution
'''
assert mass>0.0, "Mass is not positive"
assert mu>0.0, "Friction coefficient is not positive"
assert np.asarray(c0).squeeze().shape[0]==3, "Com position vector has not size 3"
assert np.asarray(dc0).squeeze().shape[0]==3, "Com velocity vector has not size 3"
assert np.asarray(contact_points).shape[1]==3, "Contact points have not size 3"
assert np.asarray(contact_normals).shape[1]==3, "Contact normals have not size 3"
assert np.asarray(contact_points).shape[0]==np.asarray(contact_normals).shape[0], "Number of contact points do not match number of contact normals"
self._name = name;
self._maxIter = maxIter;
self._verb = verb;
self._c0 = np.asarray(c0).squeeze().copy();
self._dc0 = np.asarray(dc0).squeeze().copy();
self._mass = mass;
self._g = np.asarray(g).squeeze().copy();
self._gX = skew(self._g )
# self._regularization = regularization;
self.set_contacts(contact_points, contact_normals, mu, contactTangents)
if kinematic_constraints != None:
self._kinematic_constraints = kinematic_constraints[:]
else:
self._kinematic_constraints = None
if angular_momentum_constraints != None:
self._angular_momentum_constraints = angular_momentum_constraints[:]
else:
self._angular_momentum_constraints = None
self._lp_solver = getNewSolver('qpoases', "name", useWarmStart=False, verb=0)
self._qp_solver = qp_solver(3, 0, solver='qpoases', accuracy=1e-6, maxIter=100, verb=0)
def init_bezier(self, c0, dc0, n, T =1.):
self._n = n
self._p0 = c0[:]
self._p1 = dc0 * T / n + self._p0
self._p0X = skew(c0)
self._p1X = skew(self._p1)
def set_contacts(self, contact_points, contact_normals, mu, contactTangents):
self._contact_points = np.asarray(contact_points).copy();
self._contact_normals = np.asarray(contact_normals).copy();
self._mu = mu;
self._H = compute_CWC(self._contact_points, self._contact_normals, self._mass, mu, contactTangents)#CWC inequality matrix
def __compute_wixs(self, T, num_step = -1):
alpha = 1. / (T*T)
wps = [wi(self._p0, self._p1, self._g, self._p0X, self._p1X, self._gX, alpha) for wi in wis]
if num_step > 0:
dt = (1./float(num_step))
wps_bern = [ [ (b(i*dt)*wps[idx][0], b(i*dt)*wps[idx][1]) for idx,b in enumerate(b4)] for i in range(num_step + 1) ]
wps = [reduce(lambda a, b : (a[0] + b[0], a[1] + b[1]), wps_bern_i) for wps_bern_i in wps_bern]
return wps
#angular momentum waypoints
def __compute_uixs(self, l0, T, num_step = -1):
alpha = 1. / (T)
wps = [ui(l0, alpha) for ui in uis]
if num_step > 0:
dt = (1./float(num_step))
wps_bern = [ [ (b(i*dt)*wps[idx][0], b(i*dt)*wps[idx][1]) for idx,b in enumerate(b4)] for i in range(num_step + 1) ]
wps = [reduce(lambda a, b : (a[0] + b[0], a[1] + b[1]), wps_bern_i) for wps_bern_i in wps_bern]
return wps
def _init_matrices_AL_bL(self, ups, A, b):
dimL = 0
if self._angular_momentum_constraints != None:
dimL = self._angular_momentum_constraints[0].shape[0]
AL = zeros([A.shape[0]+dimL, 6]);
bL = zeros([A.shape[0]+dimL ]);
AL[:A.shape[0],:3] = A[:]
bL[:b.shape[0] ] = b[:]
return AL,bL
def __add_angular_momentum(self,A,b,l0, T, num_steps):
ups = self.__compute_uixs(l0, T ,num_steps)
AL, bL = self._init_matrices_AL_bL(ups, A, b)
dimH = self._H.shape[0]
#final matrix has num rows equal to initial matrix rows + angular momentum constraints
# the angular momentum constraints are added AFTER the eventual kinematic ones
for i, (uxi, usi) in enumerate(ups):
AL[i*dimH : (i+1)*dimH, 3:] = self._H.dot(uxi) #constant part of A, Ac = Ac * wxi
bL[i*dimH : (i+1)*dimH ] += self._H.dot(-usi)
if self._angular_momentum_constraints != None:
dimL = self._angular_momentum_constraints[0].shape[0]
AL[-dimL:,3:] = self._angular_momentum_constraints[0][:]
bL[-dimL: ] = self._angular_momentum_constraints[1][:]
AL, bL = normalize(AL,bL)
return AL, bL
def __add_kinematic_and_normalize(self,A,b, norm = True):
if self._kinematic_constraints != None:
dim_kin = self._kinematic_constraints[0].shape[0]
A[-dim_kin:,:] = self._kinematic_constraints[0][:]
b[-dim_kin:] = self._kinematic_constraints[1][:]
if(norm):
A, b = normalize(A,b)
return A, b
def _compute_num_steps(self, T, time_step):
num_steps = -1
if(time_step > 0.):
num_steps = int(T / time_step)
return num_steps
def _init_matrices_A_b(self, wps):
dim_kin = 0
dimH = self._H.shape[0]
if self._kinematic_constraints != None:
dim_kin = self._kinematic_constraints[0].shape[0]
A = zeros([dimH * len(wps)+dim_kin,3])
b = zeros(dimH * len(wps)+ dim_kin)
return A,b
def compute_6d_control_point_inequalities(self, T, time_step = -1., l0 = None):
''' compute the inequality methods that determine the 6D bezier curve w(t)
as a function of a variable waypoint for the 3D COM trajectory.
The initial curve is of degree 3 (init pos and velocity, 0 velocity constraints + one free variable).
The 6d curve is of degree 2*n-2 = 4, thus 5 control points are to be computed.
Each control point produces a 6 * 3 inequality matrix wix, and a 6 *1 column right member wsi.
Premultiplying it by H gives mH w_xi * x <= mH_wsi where m is the mass
Stacking all of these results in a big inequality matrix A and a column vector x that determines the constraints
On the 6d curves, Ain x <= Aub
'''
self.init_bezier(self._c0, self._dc0, 3, T)
dimH = self._H.shape[0]
mH = self._mass *self._H
num_steps = self._compute_num_steps(T, time_step)
wps = self.__compute_wixs(T ,num_steps)
A,b = self._init_matrices_A_b(wps)
bc = np.concatenate([self._g,zeros(3)]) #constant part of Aub, Aubi = mH * (bc - wsi)
for i, (wxi, wsi) in enumerate(wps):
A[i*dimH : (i+1)*dimH, : ] = mH.dot(wxi) #constant part of A, Ac = Ac * wxi
b[i*dimH : (i+1)*dimH ] = mH.dot(bc - wsi)
use_angular_momentum = l0 != None
A,b = self.__add_kinematic_and_normalize(A,b, not use_angular_momentum)
if use_angular_momentum:
A,b = self.__add_angular_momentum(A,b, l0, T, num_steps)
self.__Ain = A[:]; self.__Aub = b[:]
def _solve(self, dim_pb, l0, asLp = False, guess = None ):
cost = 0
if asLp:
c = zeros(dim_pb); c[2] = -1
(status, x, y) = self._lp_solver.solve(c, lb= -100. * ones(dim_pb), ub = 100. * ones(dim_pb),
A_in=self.__Ain, Alb=-100000.* ones(self.__Ain.shape[0]), Aub=self.__Aub,
A_eq=None, b=None)
return status, x, cost, self._lp_solver.getLpTime()
else:
#~ self._qp_solver = qp_solver(dim_pb, self.__Ain.shape[0], solver='qpoases', accuracy=1e-6, maxIter=100, verb=0)
self._qp_solver.changeInequalityNumber(self.__Ain.shape[0], dim_pb)
#weight_dist_or = 0.001
weight_dist_or = 0
D = identity(dim_pb);
alpha = sqrt(12./5.)
for i in range(3):
D[i,i] = weight_dist_or
d = zeros(dim_pb);
d[:3]= self._p0 * weight_dist_or
if(l0 != None):
# minimizing integral of angular momentum
for i in range(3,6):
D[i,i] = alpha
d[3:]= (9.* l0) / (5. * alpha)
D = (D[:]); d = (d[:]); A = (self.__Ain[:]);
lbA = (-100000.* ones(self.__Ain.shape[0]))[:]; ubA=(self.__Aub);
lb = (-100. * ones(dim_pb))[:]; ub = (100. * ones(dim_pb))[:];
self._qp_solver.setProblemData(D = D , d = d, A=A, lbA=lbA, ubA=ubA, lb = lb, ub = ub, x0=None)
(x, imode) = self._qp_solver.solve(D = D , d = d, A=A, lbA=lbA, ubA=ubA, lb = lb, ub = ub, x0=None)
if l0 == None:
cost = norm(self._p0 - x)
else:
cost = (1./5.)*(9.*l0.dot(l0) - 9.*l0.dot(x[3:]) + 6.*x[3:].dot(x[3:]))
return imode, x, cost , self._qp_solver.qpTime
def can_I_stop(self, c0=None, dc0=None, T=1., MAX_ITER=None, time_step = -1, l0 = None, asLp = False):
''' Determine whether the system can come to a stop without changing contacts.
Keyword arguments:
c0 -- initial CoM position
dc0 -- initial CoM velocity
T -- the EXACT given time to stop
time_step -- if negative, a continuous resolution is used
to guarantee that the trajectory is feasible. If > 0, then
used a discretized approach to validate trajectory. This allows
to have control points outside the cone, which is supposed to increase the
solution space.
l0 : if equals None, angular momentum is not considered and set to 0. Else
it becomes a variable of the problem and l0 is the initial angular momentum
asLp : If true, problem is solved as an LP. If false, solved as a qp that
minimizes distance to original point (weight of 0.001) and angular momentum if applies (weight of 1.)
Output: An object containing the following member variables:
is_stable -- boolean value
c -- final com position
dc -- final com velocity. [WARNING] if is_stable is False, not used
ddc_min -- [WARNING] Not relevant (used)
t -- always T (Bezier curve)
computation_time -- time taken to solve all the LPs
c_of_t, dc_of_t, ddc_of_t: trajectories and derivatives in function of the time
dL_of_t : trajectory of the angular momentum along time
wps : waypoints of the solution bezier curve c*(s)
wpsL : waypoints of the solution angular momentum curve L*(s) Zero if no angular mementum
wpsdL : waypoints of the solution angular momentum curve dL*(s) Zero if no angular mementum
'''
if T <=0.:
raise ValueError('T cannot be lesser than 0')
print "\n *** [WARNING] In bezier step capturability: you set a T_0 or MAX_ITER value, but they are not used by the algorithm"
if MAX_ITER !=None:
print "\n *** [WARNING] In bezier step capturability: you set a T_0 or MAX_ITER value, but they are not used by the algorithm"
if(c0 is not None):
assert np.asarray(c0).squeeze().shape[0]==3, "CoM has not size 3"
self._c0 = np.asarray(c0).squeeze().copy();
if(dc0 is not None):
assert np.asarray(dc0).squeeze().shape[0]==3, "CoM velocity has not size 3"
self._dc0 = np.asarray(dc0).squeeze().copy();
if((c0 is not None) or (dc0 is not None)):
init_bezier(self._c0, self._dc0, self._n)
''' Solve the linear program
minimize c' x
subject to Alb <= A_in x <= Aub
A_eq x = b
lb <= x <= ub
Return a tuple containing:
status flag
primal solution
dual solution
'''
use_angular_momentum = l0 != None
# for the moment c is random stuff.
dim_pb = 6 if use_angular_momentum else 3
c = zeros(dim_pb); c[2] = -1
wps = self.compute_6d_control_point_inequalities(T, time_step, l0)
status, x, cost, comp_time = self._solve(dim_pb, l0, asLp)
is_stable=status==LP_status.LP_STATUS_OPTIMAL
wps = [self._p0,self._p1,x[:3],x[:3]];
wpsL = [zeros(3) if not use_angular_momentum else l0[:], zeros(3) if not use_angular_momentum else x[-3:] ,zeros(3),zeros(3)];
wpsdL = [3*(wpsL[1] - wpsL[0]) ,3*(- wpsL[1]), zeros(3)];
c_of_s = bezier(matrix([pi.tolist() for pi in wps]).transpose())
dc_of_s = c_of_s.compute_derivate(1)
ddc_of_s = c_of_s.compute_derivate(2)
dL_of_s = bezier(matrix([pi.tolist() for pi in wpsdL]).transpose())
L_of_s = bezier(matrix([pi.tolist() for pi in wpsL]).transpose())
return Bunch(is_stable=is_stable, c=x[:3], dc=zeros(3),
computation_time = comp_time, ddc_min=0.0, t = T,
c_of_t = c_of_t(c_of_s, T), dc_of_t = dc_of_t(dc_of_s, T), ddc_of_t = c_of_t(ddc_of_s, T), dL_of_t = dc_of_t(dL_of_s, T), L_of_t = c_of_t(L_of_s, T),
cost = cost, wps = wps, wpsL = wpsL, wpsdL = wpsdL);
def predict_future_state(self, t_pred, c0=None, dc0=None, MAX_ITER=1000):
''' Compute what the CoM state will be at the specified time instant if the system
applies maximum CoM deceleration parallel to the current CoM velocity
Keyword arguments:
t_pred -- Future time at which the prediction is made
c0 -- initial CoM position
dc0 -- initial CoM velocity
Output: An object with the following member variables:
t -- time at which the integration has stopped (equal to t_pred, unless something went wrong)
c -- final com position
dc -- final com velocity
'''
raise NotImplementedError('predict_future_state is not implemted so far.')
|
gpl-2.0
|
Diti24/python-ivi
|
ivi/chroma/chroma62012p8060.py
|
1
|
1825
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .chroma62000p import *
class chroma62012p8060(chroma62000p):
"Chroma ATE 62012P-80-60 series IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '62012P-80-60')
super(chroma62012p8060, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P80V': (80.0, 60.0)
},
'ovp_max': 88.0, # 1.1 x max voltage
'ocp_max': 63.0, # 1.05 x max current
'voltage_max': 80.0,
'current_max': 60.0
}
]
|
mit
|
iamthebest77/openx
|
enabler_connection.py
|
4
|
2500
|
import socket
import threading
import json
class EnablerConnection():
def __init__(self):
self.connections = []
self.stopped = False
self.enabler_listening_port = 50001
self.local_ip = socket.gethostbyname(socket.gethostname())
t = threading.Thread(target=self.listen_loop, name='0.0.0.0',
args=('0.0.0.0',))
t.setDaemon(True)
t.start()
def send(self, outString):
for socket_handler in self.connections:
try:
socket_handler.send(outString)
except Exception as e:
# TODO: Isolate dropped connection, recover from other things.
# For now, no recovery. If ANYTHING goes wrong, drop the
# connection.
print("Exception while sending data: %s" % e)
self.connections.remove(socket_handler)
print("Connection dropped.")
def listen_loop(self, this_ip):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((this_ip, self.enabler_listening_port))
s.listen(1)
print("Listening for OpenXC Enabler on " + this_ip + ":" +
str(self.enabler_listening_port))
while True:
conn, addr = s.accept()
print("New connection to " + this_ip + " from " + str(addr))
handler = SocketHandler(conn, addr)
handler.start()
self.connections.append(handler)
def send_measurement(self, name, value, event=None):
data = {'name':name,'value':value}
if event is not None and event != '':
data['event'] = event
self.send(json.dumps(data) + '\x00')
def received_messages(self):
all_received_data = ''.join(handler.received_command_data for handler in
self.connections)
return all_received_data.split('\0')
class SocketHandler(threading.Thread):
def __init__(self, connection, address):
super(SocketHandler, self).__init__()
self.daemon = True
self.connection = connection
self.address = address
self.received_command_data = ""
def send(self, data):
self.connection.sendall(data)
def run(self):
while True:
data = self.connection.recv(1024)
if not data:
break
else:
self.received_command_data += data
|
bsd-3-clause
|
dimagi/rapidsms-core
|
lib/rapidsms/parsers/keyworder.py
|
2
|
3099
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
class Keyworder(object):
TOKEN_MAP = [
("slug", "([a-z0-9\-]+)"),
("letters", "([a-z]+)"),
("numbers", "(\d+)"),
("whatever", "(.+)")]
def __init__(self):
self.regexen = []
self.prefix = ""
self.pattern = "^%s$"
def prepare(self, prefix, suffix):
# no prefix is defined, so match
# only the suffix (so simple!)
if prefix == "":
str = suffix
# we have a prefix, but no suffix,
# so accept JUST the prefix
elif suffix == "":
str = prefix
# the most common case; we have both a
# prefix and suffix, so simpley join
# them with a space
else: str = prefix + " " + suffix
# also assume that one space means
# "any amount of whitespace"
str = str.replace(" ", "\s+")
# replace friendly tokens with real chunks
# of regex, to make the patterns more readable
for token, regex in self.TOKEN_MAP:
str = str.replace("(%s)" % token, regex)
return re.compile(self.pattern % str, re.IGNORECASE)
def __call__(self, *regex_strs):
def decorator(func):
# make the current prefix into something
# iterable (so multiple prefixes can be
# specified as list, or single as string)
prefixen = self.prefix
if not hasattr(self.prefix, "__iter__"):
prefixen = [self.prefix]
# store all of the regular expressions which
# will match this function, as attributes on
# the function itself
if not hasattr(func, "regexen"):
setattr(func, "regexen", [])
# iterate and add all combinations of
# prefix and regex for this keyword
for prefix in prefixen:
for rstr in regex_strs:
regex = self.prepare(prefix, rstr)
getattr(func, "regexen").append(regex)
#print "Handler: %s" % regex.pattern
self.regexen.append((regex, func))
return func
return decorator
def match(self, sself, str):
print "\n\nMATCHING\n\n: %s" % str
for pat, func in self.regexen:
match = pat.match(str)
if match:
# clean up leading and trailing whitespace
# note: match groups can be None, hence the and/or business
groups = map(lambda x: x and x.strip() or x, match.groups())
return (func, groups)
# TODO proper logging??
#print "No method called %s" % (str)
# a semantic way to add a default
# handler (when nothing else is matched)
def blank(self):
return self.__call__("")
# another semantic way to add a catch-all
# most useful with a prefix for catching
# invalid syntax and responding with help
def invalid(self):
return self.__call__("(whatever)")
|
lgpl-3.0
|
zmeda/web-summit-2015-recap-zalando
|
node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py
|
388
|
47235
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
mit
|
OaklandPeters/recursor
|
recursor/devspace/predicate_idea.py
|
1
|
2552
|
"""
Desired syntax:
gaurdian = MaxDepth(10) & HasAttr('__dict__')
if gaurdian(current, path):
# ....
walker(obj, gaurd=MaxDepth(10) & HasAttr('__dict__'))
@note: I put example wrapper decorators at the bottom.
@todo: Come up with simple-ish logical structure alternative to the wrapper decorators
... the point is that they be iterable/viewable AND callable
@todo: ~a partial application class that lets you iterate over the arguments
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Callable
class PredicateInterface(Callable):
__metaclass__ = ABCMeta
# def __init__(self, checker):
# assert isinstance(checker, Callable)
# self.checker = checker
@abstractmethod
def checker(self):
return NotImplemented
def __call__(self, *args, **kwargs):
return self.checker(*args, **kwargs)
def __or__(self, other):
assert isinstance(other, Callable)
# replace this inner function with a wrapper
call_or = lambda *args, **kwargs: self(*args, **kwargs) | other(*args, **kwargs)
# def call_or(*args, **kwargs):
# return self(*args, **kwargs) | other(*args, **kwargs)
return Predicate(call_or)
def __and__(self, other):
assert isinstance(other, Callable)
call_and = lambda *args, **kwargs: self(*args, **kwargs) & other(*args, **kwargs)
# ... I don't think the lambda binds the closure properly, so have to use a function....
# def call_and(*args, **kwargs):
# return self(*args, **kwargs) & other(*args, **kwargs)
return Predicate(call_and)
def __invert__(self):
call_not = lambda *args, **kwargs: not self(*args, **kwargs)
return Predicate(call_not)
class Predicate(PredicateInterface):
def __init__(self, checker):
assert isinstance(checker, Callable)
self._checker = checker
def checker(self, *args, **kwargs):
return self._checker(*args, **kwargs)
# wrapping these is slightly inferior to a logic tree structure
# ... since I can't view the structure without calling it
def wrap_or(left, right):
def call_or(*args, **kwargs):
return left(*args, **kwargs) | right(*args, **kwargs)
call_or.__iter__ = iter([left, right])
return call_or
def wrap_and(left, right):
def call_and(*args, **kwargs):
return left(*args, **kwargs) & right(*args, **kwargs)
call_and.__iter__ = iter([left, right])
return call_and
|
mit
|
kcompher/topik
|
topik/models.py
|
1
|
2641
|
from __future__ import absolute_import
import logging
import gensim
import pandas as pd
# imports used only for doctests
from topik.readers import read_input
from topik.tests import test_data_path
from topik.preprocessing import preprocess
class LDA(object):
"""A high interface for an LDA (Latent Dirichlet Allocation) model.
Parameters
----------
corpus_file: string
Location of the corpus serialized in Matrix Market format
dict_file: string
Location of the dictionary
>>> raw_data = read_input(
'{}/test_data_json_stream.json'.format(test_data_path),
content_field="abstract")
>>> processed_data = preprocess(raw_data)
>>> my_lda = LDA(processed_data)
"""
def __init__(self, corpus_file, dict_file, ntopics=10, **kwargs):
self.corpus = gensim.corpora.MmCorpus(corpus_file)
self.dictionary = gensim.corpora.Dictionary.load(dict_file)
self.model = gensim.models.LdaModel(self.corpus, num_topics=ntopics, id2word=self.dictionary, **kwargs)
def save(self, filename):
self.model.save(filename)
def get_top_words(self, topn):
top_words = [self.model.show_topic(topicno, topn) for topicno in range(self.model.num_topics)]
return top_words
def termite_data(self, filename="termite.csv", topn_words=15):
"""Generate the csv file input for the termite plot.
Parameters
----------
filename: string
Desired name for the generated csv file
>>> raw_data = read_input(
'{}/test_data_json_stream.json'.format(test_data_path),
content_field="text")
>>> processed_data = preprocess(raw_data)
>>> my_lda = LDA(processed_data)
>>> my_lda.termite_data('termite.csv', 15)
"""
logging.info("generating termite plot input from %s " % self.corpus)
top_words = self.get_top_words(topn_words)
count = 1
for topic in top_words:
if count == 1:
df_temp = pd.DataFrame(topic, columns=['weight', 'word'])
df_temp['topic'] = pd.Series(count, index=df_temp.index)
df = df_temp
else:
df_temp = pd.DataFrame(topic, columns=['weight', 'word'])
df_temp['topic'] = pd.Series(count, index=df_temp.index)
df = df.append(df_temp, ignore_index=True)
count += 1
logging.info("saving termite plot input csv file to %s " % filename)
df.to_csv(filename, index=False, encoding='utf-8')
return df
|
bsd-3-clause
|
B-MOOC/edx-platform
|
lms/djangoapps/bulk_email/migrations/0001_initial.py
|
182
|
6854
|
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseEmail'
db.create_table('bulk_email_courseemail', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sender', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['auth.User'], null=True, blank=True)),
('hash', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('html_message', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('to', self.gf('django.db.models.fields.CharField')(default='myself', max_length=64)),
))
db.send_create_signal('bulk_email', ['CourseEmail'])
# Adding model 'Optout'
db.create_table('bulk_email_optout', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
))
db.send_create_signal('bulk_email', ['Optout'])
# Adding unique constraint on 'Optout', fields ['email', 'course_id']
db.create_unique('bulk_email_optout', ['email', 'course_id'])
def backwards(self, orm):
# Removing unique constraint on 'Optout', fields ['email', 'course_id']
db.delete_unique('bulk_email_optout', ['email', 'course_id'])
# Deleting model 'CourseEmail'
db.delete_table('bulk_email_courseemail')
# Deleting model 'Optout'
db.delete_table('bulk_email_optout')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'to': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
|
agpl-3.0
|
eepalms/gem5-newcache
|
tests/configs/tgen-simple-mem.py
|
4
|
3305
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
import m5
from m5.objects import *
# both traffic generator and communication monitor are only available
# if we have protobuf support, so potentially skip this test
require_sim_object("TrafficGen")
require_sim_object("CommMonitor")
# even if this is only a traffic generator, call it cpu to make sure
# the scripts are happy
cpu = TrafficGen(config_file = "tests/quick/se/70.tgen/tgen-simple-mem.cfg")
# system simulated
system = System(cpu = cpu, physmem = SimpleMemory(),
membus = NoncoherentBus(width = 16),
clk_domain = SrcClockDomain(clock = '1GHz'))
# add a communication monitor, and also trace all the packets
system.monitor = CommMonitor(trace_file = "monitor.ptrc.gz")
# connect the traffic generator to the bus via a communication monitor
system.cpu.port = system.monitor.slave
system.monitor.master = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# connect memory to the membus
system.physmem.port = system.membus.master
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
|
bsd-3-clause
|
yqm/sl4a
|
python/src/Lib/idlelib/StackViewer.py
|
69
|
3841
|
import os
import sys
import linecache
from TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from ObjectBrowser import ObjectTreeItem, make_objecttreeitem
def StackBrowser(root, flist=None, tb=None, top=None):
if top is None:
from Tkinter import Toplevel
top = Toplevel(root)
sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
item = StackTreeItem(flist, tb)
node = TreeNode(sc.canvas, None, item)
node.expand()
class StackTreeItem(TreeItem):
def __init__(self, flist=None, tb=None):
self.flist = flist
self.stack = self.get_stack(tb)
self.text = self.get_exception()
def get_stack(self, tb):
if tb is None:
tb = sys.last_traceback
stack = []
if tb and tb.tb_frame is None:
tb = tb.tb_next
while tb is not None:
stack.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
return stack
def get_exception(self):
type = sys.last_type
value = sys.last_value
if hasattr(type, "__name__"):
type = type.__name__
s = str(type)
if value is not None:
s = s + ": " + str(value)
return s
def GetText(self):
return self.text
def GetSubList(self):
sublist = []
for info in self.stack:
item = FrameTreeItem(info, self.flist)
sublist.append(item)
return sublist
class FrameTreeItem(TreeItem):
def __init__(self, info, flist):
self.info = info
self.flist = flist
def GetText(self):
frame, lineno = self.info
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(...), line %d: %s" % (modname, funcname,
lineno, sourceline)
return item
def GetSubList(self):
frame, lineno = self.info
sublist = []
if frame.f_globals is not frame.f_locals:
item = VariablesTreeItem("<locals>", frame.f_locals, self.flist)
sublist.append(item)
item = VariablesTreeItem("<globals>", frame.f_globals, self.flist)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if self.flist:
frame, lineno = self.info
filename = frame.f_code.co_filename
if os.path.isfile(filename):
self.flist.gotofileline(filename, lineno)
class VariablesTreeItem(ObjectTreeItem):
def GetText(self):
return self.labeltext
def GetLabelText(self):
return None
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return self.object.keys()
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem(key + " =", value, setfunction)
sublist.append(item)
return sublist
def _test():
try:
import testcode
reload(testcode)
except:
sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
from Tkinter import Tk
root = Tk()
StackBrowser(None, top=root)
root.mainloop()
if __name__ == "__main__":
_test()
|
apache-2.0
|
candrews/portage
|
pym/portage/sync/old_tree_timestamp.py
|
9
|
2161
|
# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division
import locale
import logging
import time
from portage import os, _unicode_decode
from portage.exception import PortageException
from portage.localization import _
from portage.output import EOutput
from portage.util import grabfile, writemsg_level
def have_english_locale():
lang, enc = locale.getdefaultlocale()
if lang is not None:
lang = lang.lower()
lang = lang.split('_', 1)[0]
return lang is None or lang in ('c', 'en')
def whenago(seconds):
sec = int(seconds)
mins = 0
days = 0
hrs = 0
years = 0
out = []
if sec > 60:
mins = sec // 60
sec = sec % 60
if mins > 60:
hrs = mins // 60
mins = mins % 60
if hrs > 24:
days = hrs // 24
hrs = hrs % 24
if days > 365:
years = days // 365
days = days % 365
if years:
out.append("%dy " % years)
if days:
out.append("%dd " % days)
if hrs:
out.append("%dh " % hrs)
if mins:
out.append("%dm " % mins)
if sec:
out.append("%ds " % sec)
return "".join(out).strip()
def old_tree_timestamp_warn(portdir, settings):
unixtime = time.time()
default_warnsync = 30
timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
try:
lastsync = grabfile(timestamp_file)
except PortageException:
return False
if not lastsync:
return False
lastsync = lastsync[0].split()
if not lastsync:
return False
try:
lastsync = int(lastsync[0])
except ValueError:
return False
var_name = 'PORTAGE_SYNC_STALE'
try:
warnsync = float(settings.get(var_name, default_warnsync))
except ValueError:
writemsg_level("!!! %s contains non-numeric value: %s\n" % \
(var_name, settings[var_name]),
level=logging.ERROR, noiselevel=-1)
return False
if warnsync <= 0:
return False
if (unixtime - 86400 * warnsync) > lastsync:
out = EOutput()
if have_english_locale():
out.ewarn("Last emerge --sync was %s ago." % \
whenago(unixtime - lastsync))
else:
out.ewarn(_("Last emerge --sync was %s.") % \
_unicode_decode(time.strftime(
'%c', time.localtime(lastsync))))
return True
return False
|
gpl-2.0
|
HLFH/CouchPotatoServer
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/canal13cl.py
|
154
|
1912
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class Canal13clIE(InfoExtractor):
_VALID_URL = r'^http://(?:www\.)?13\.cl/(?:[^/?#]+/)*(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://www.13.cl/t13/nacional/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
'md5': '4cb1fa38adcad8fea88487a078831755',
'info_dict': {
'id': '1403022125',
'display_id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
'ext': 'mp4',
'title': 'El "círculo de hierro" de Michelle Bachelet en su regreso a La Moneda',
'description': '(Foto: Agencia Uno) En nueve días más, Michelle Bachelet va a asumir por segunda vez como presidenta de la República. Entre aquellos que la acompañarán hay caras que se repiten y otras que se consolidan en su entorno de colaboradores más cercanos.',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
webpage = self._download_webpage(url, display_id)
title = self._html_search_meta(
'twitter:title', webpage, 'title', fatal=True)
description = self._html_search_meta(
'twitter:description', webpage, 'description')
url = self._html_search_regex(
r'articuloVideo = \"(.*?)\"', webpage, 'url')
real_id = self._search_regex(
r'[^0-9]([0-9]{7,})[^0-9]', url, 'id', default=display_id)
thumbnail = self._html_search_regex(
r'articuloImagen = \"(.*?)\"', webpage, 'thumbnail')
return {
'id': real_id,
'display_id': display_id,
'url': url,
'title': title,
'description': description,
'ext': 'mp4',
'thumbnail': thumbnail,
}
|
gpl-3.0
|
Jeff-Tian/mybnb
|
Python27/Lib/ctypes/test/test_structures.py
|
1
|
16610
|
import unittest
from ctypes import *
from ctypes.test import need_symbol
from struct import calcsize
import _testcapi
class SubclassesTest(unittest.TestCase):
def test_subclass(self):
class X(Structure):
_fields_ = [("a", c_int)]
class Y(X):
_fields_ = [("b", c_int)]
class Z(X):
pass
self.assertEqual(sizeof(X), sizeof(c_int))
self.assertEqual(sizeof(Y), sizeof(c_int)*2)
self.assertEqual(sizeof(Z), sizeof(c_int))
self.assertEqual(X._fields_, [("a", c_int)])
self.assertEqual(Y._fields_, [("b", c_int)])
self.assertEqual(Z._fields_, [("a", c_int)])
def test_subclass_delayed(self):
class X(Structure):
pass
self.assertEqual(sizeof(X), 0)
X._fields_ = [("a", c_int)]
class Y(X):
pass
self.assertEqual(sizeof(Y), sizeof(X))
Y._fields_ = [("b", c_int)]
class Z(X):
pass
self.assertEqual(sizeof(X), sizeof(c_int))
self.assertEqual(sizeof(Y), sizeof(c_int)*2)
self.assertEqual(sizeof(Z), sizeof(c_int))
self.assertEqual(X._fields_, [("a", c_int)])
self.assertEqual(Y._fields_, [("b", c_int)])
self.assertEqual(Z._fields_, [("a", c_int)])
class StructureTestCase(unittest.TestCase):
formats = {"c": c_char,
"b": c_byte,
"B": c_ubyte,
"h": c_short,
"H": c_ushort,
"i": c_int,
"I": c_uint,
"l": c_long,
"L": c_ulong,
"q": c_longlong,
"Q": c_ulonglong,
"f": c_float,
"d": c_double,
}
def test_simple_structs(self):
for code, tp in self.formats.items():
class X(Structure):
_fields_ = [("x", c_char),
("y", tp)]
self.assertEqual((sizeof(X), code),
(calcsize("c%c0%c" % (code, code)), code))
def test_unions(self):
for code, tp in self.formats.items():
class X(Union):
_fields_ = [("x", c_char),
("y", tp)]
self.assertEqual((sizeof(X), code),
(calcsize("%c" % (code)), code))
def test_struct_alignment(self):
class X(Structure):
_fields_ = [("x", c_char * 3)]
self.assertEqual(alignment(X), calcsize("s"))
self.assertEqual(sizeof(X), calcsize("3s"))
class Y(Structure):
_fields_ = [("x", c_char * 3),
("y", c_int)]
self.assertEqual(alignment(Y), alignment(c_int))
self.assertEqual(sizeof(Y), calcsize("3si"))
class SI(Structure):
_fields_ = [("a", X),
("b", Y)]
self.assertEqual(alignment(SI), max(alignment(Y), alignment(X)))
self.assertEqual(sizeof(SI), calcsize("3s0i 3si 0i"))
class IS(Structure):
_fields_ = [("b", Y),
("a", X)]
self.assertEqual(alignment(SI), max(alignment(X), alignment(Y)))
self.assertEqual(sizeof(IS), calcsize("3si 3s 0i"))
class XX(Structure):
_fields_ = [("a", X),
("b", X)]
self.assertEqual(alignment(XX), alignment(X))
self.assertEqual(sizeof(XX), calcsize("3s 3s 0s"))
def test_emtpy(self):
# I had problems with these
#
# Although these are pathological cases: Empty Structures!
class X(Structure):
_fields_ = []
class Y(Union):
_fields_ = []
# Is this really the correct alignment, or should it be 0?
self.assertTrue(alignment(X) == alignment(Y) == 1)
self.assertTrue(sizeof(X) == sizeof(Y) == 0)
class XX(Structure):
_fields_ = [("a", X),
("b", X)]
self.assertEqual(alignment(XX), 1)
self.assertEqual(sizeof(XX), 0)
def test_fields(self):
# test the offset and size attributes of Structure/Unoin fields.
class X(Structure):
_fields_ = [("x", c_int),
("y", c_char)]
self.assertEqual(X.x.offset, 0)
self.assertEqual(X.x.size, sizeof(c_int))
self.assertEqual(X.y.offset, sizeof(c_int))
self.assertEqual(X.y.size, sizeof(c_char))
# readonly
self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92)
self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92)
class X(Union):
_fields_ = [("x", c_int),
("y", c_char)]
self.assertEqual(X.x.offset, 0)
self.assertEqual(X.x.size, sizeof(c_int))
self.assertEqual(X.y.offset, 0)
self.assertEqual(X.y.size, sizeof(c_char))
# readonly
self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92)
self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92)
# XXX Should we check nested data types also?
# offset is always relative to the class...
def test_packed(self):
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 1
self.assertEqual(sizeof(X), 9)
self.assertEqual(X.b.offset, 1)
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 2
self.assertEqual(sizeof(X), 10)
self.assertEqual(X.b.offset, 2)
import struct
longlong_size = struct.calcsize("q")
longlong_align = struct.calcsize("bq") - longlong_size
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 4
self.assertEqual(sizeof(X), min(4, longlong_align) + longlong_size)
self.assertEqual(X.b.offset, min(4, longlong_align))
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 8
self.assertEqual(sizeof(X), min(8, longlong_align) + longlong_size)
self.assertEqual(X.b.offset, min(8, longlong_align))
d = {"_fields_": [("a", "b"),
("b", "q")],
"_pack_": -1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
# Issue 15989
d = {"_fields_": [("a", c_byte)],
"_pack_": _testcapi.INT_MAX + 1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
d = {"_fields_": [("a", c_byte)],
"_pack_": _testcapi.UINT_MAX + 2}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
def test_initializers(self):
class Person(Structure):
_fields_ = [("name", c_char*6),
("age", c_int)]
self.assertRaises(TypeError, Person, 42)
self.assertRaises(ValueError, Person, "asldkjaslkdjaslkdj")
self.assertRaises(TypeError, Person, "Name", "HI")
# short enough
self.assertEqual(Person("12345", 5).name, "12345")
# exact fit
self.assertEqual(Person("123456", 5).name, "123456")
# too long
self.assertRaises(ValueError, Person, "1234567", 5)
def test_conflicting_initializers(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
# conflicting positional and keyword args
self.assertRaises(TypeError, POINT, 2, 3, x=4)
self.assertRaises(TypeError, POINT, 2, 3, y=4)
# too many initializers
self.assertRaises(TypeError, POINT, 2, 3, 4)
def test_keyword_initializers(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
pt = POINT(1, 2)
self.assertEqual((pt.x, pt.y), (1, 2))
pt = POINT(y=2, x=1)
self.assertEqual((pt.x, pt.y), (1, 2))
def test_invalid_field_types(self):
class POINT(Structure):
pass
self.assertRaises(TypeError, setattr, POINT, "_fields_", [("x", 1), ("y", 2)])
def test_invalid_name(self):
# field name must be string
def declare_with_name(name):
class S(Structure):
_fields_ = [(name, c_int)]
self.assertRaises(TypeError, declare_with_name, u"x\xe9")
def test_intarray_fields(self):
class SomeInts(Structure):
_fields_ = [("a", c_int * 4)]
# can use tuple to initialize array (but not list!)
self.assertEqual(SomeInts((1, 2)).a[:], [1, 2, 0, 0])
self.assertEqual(SomeInts((1, 2)).a[::], [1, 2, 0, 0])
self.assertEqual(SomeInts((1, 2)).a[::-1], [0, 0, 2, 1])
self.assertEqual(SomeInts((1, 2)).a[::2], [1, 0])
self.assertEqual(SomeInts((1, 2)).a[1:5:6], [2])
self.assertEqual(SomeInts((1, 2)).a[6:4:-1], [])
self.assertEqual(SomeInts((1, 2, 3, 4)).a[:], [1, 2, 3, 4])
self.assertEqual(SomeInts((1, 2, 3, 4)).a[::], [1, 2, 3, 4])
# too long
# XXX Should raise ValueError?, not RuntimeError
self.assertRaises(RuntimeError, SomeInts, (1, 2, 3, 4, 5))
def test_nested_initializers(self):
# test initializing nested structures
class Phone(Structure):
_fields_ = [("areacode", c_char*6),
("number", c_char*12)]
class Person(Structure):
_fields_ = [("name", c_char * 12),
("phone", Phone),
("age", c_int)]
p = Person("Someone", ("1234", "5678"), 5)
self.assertEqual(p.name, "Someone")
self.assertEqual(p.phone.areacode, "1234")
self.assertEqual(p.phone.number, "5678")
self.assertEqual(p.age, 5)
@need_symbol('c_wchar')
def test_structures_with_wchar(self):
class PersonW(Structure):
_fields_ = [("name", c_wchar * 12),
("age", c_int)]
p = PersonW(u"Someone")
self.assertEqual(p.name, "Someone")
self.assertEqual(PersonW(u"1234567890").name, u"1234567890")
self.assertEqual(PersonW(u"12345678901").name, u"12345678901")
# exact fit
self.assertEqual(PersonW(u"123456789012").name, u"123456789012")
#too long
self.assertRaises(ValueError, PersonW, u"1234567890123")
def test_init_errors(self):
class Phone(Structure):
_fields_ = [("areacode", c_char*6),
("number", c_char*12)]
class Person(Structure):
_fields_ = [("name", c_char * 12),
("phone", Phone),
("age", c_int)]
cls, msg = self.get_except(Person, "Someone", (1, 2))
self.assertEqual(cls, RuntimeError)
# In Python 2.5, Exception is a new-style class, and the repr changed
if issubclass(Exception, object):
self.assertEqual(msg,
"(Phone) <type 'exceptions.TypeError'>: "
"expected string or Unicode object, int found")
else:
self.assertEqual(msg,
"(Phone) exceptions.TypeError: "
"expected string or Unicode object, int found")
cls, msg = self.get_except(Person, "Someone", ("a", "b", "c"))
self.assertEqual(cls, RuntimeError)
if issubclass(Exception, object):
self.assertEqual(msg,
"(Phone) <type 'exceptions.TypeError'>: too many initializers")
else:
self.assertEqual(msg, "(Phone) exceptions.TypeError: too many initializers")
def test_huge_field_name(self):
# issue12881: segfault with large structure field names
def create_class(length):
class S(Structure):
_fields_ = [('x' * length, c_int)]
for length in [10 ** i for i in range(0, 8)]:
try:
create_class(length)
except MemoryError:
# MemoryErrors are OK, we just don't want to segfault
pass
def get_except(self, func, *args):
try:
func(*args)
except Exception, detail:
return detail.__class__, str(detail)
@unittest.skip('test disabled')
def test_subclass_creation(self):
meta = type(Structure)
# same as 'class X(Structure): pass'
# fails, since we need either a _fields_ or a _abstract_ attribute
cls, msg = self.get_except(meta, "X", (Structure,), {})
self.assertEqual((cls, msg),
(AttributeError, "class must define a '_fields_' attribute"))
def test_abstract_class(self):
class X(Structure):
_abstract_ = "something"
# try 'X()'
cls, msg = self.get_except(eval, "X()", locals())
self.assertEqual((cls, msg), (TypeError, "abstract class"))
def test_methods(self):
## class X(Structure):
## _fields_ = []
self.assertIn("in_dll", dir(type(Structure)))
self.assertIn("from_address", dir(type(Structure)))
self.assertIn("in_dll", dir(type(Structure)))
def test_positional_args(self):
# see also http://bugs.python.org/issue5042
class W(Structure):
_fields_ = [("a", c_int), ("b", c_int)]
class X(W):
_fields_ = [("c", c_int)]
class Y(X):
pass
class Z(Y):
_fields_ = [("d", c_int), ("e", c_int), ("f", c_int)]
z = Z(1, 2, 3, 4, 5, 6)
self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f),
(1, 2, 3, 4, 5, 6))
z = Z(1)
self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f),
(1, 0, 0, 0, 0, 0))
self.assertRaises(TypeError, lambda: Z(1, 2, 3, 4, 5, 6, 7))
class PointerMemberTestCase(unittest.TestCase):
def test(self):
# a Structure with a POINTER field
class S(Structure):
_fields_ = [("array", POINTER(c_int))]
s = S()
# We can assign arrays of the correct type
s.array = (c_int * 3)(1, 2, 3)
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [1, 2, 3])
# The following are bugs, but are included here because the unittests
# also describe the current behaviour.
#
# This fails with SystemError: bad arg to internal function
# or with IndexError (with a patch I have)
s.array[0] = 42
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [42, 2, 3])
s.array[0] = 1
## s.array[1] = 42
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [1, 2, 3])
def test_none_to_pointer_fields(self):
class S(Structure):
_fields_ = [("x", c_int),
("p", POINTER(c_int))]
s = S()
s.x = 12345678
s.p = None
self.assertEqual(s.x, 12345678)
class TestRecursiveStructure(unittest.TestCase):
def test_contains_itself(self):
class Recursive(Structure):
pass
try:
Recursive._fields_ = [("next", Recursive)]
except AttributeError, details:
self.assertIn("Structure or union cannot contain itself",
str(details))
else:
self.fail("Structure or union cannot contain itself")
def test_vice_versa(self):
class First(Structure):
pass
class Second(Structure):
pass
First._fields_ = [("second", Second)]
try:
Second._fields_ = [("first", First)]
except AttributeError, details:
self.assertIn("_fields_ is final", str(details))
else:
self.fail("AttributeError not raised")
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
saurabh6790/OFF-RISAPP
|
stock/doctype/stock_reconciliation/test_stock_reconciliation.py
|
30
|
9757
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes, unittest
from webnotes.utils import flt
import json
from accounts.utils import get_fiscal_year, get_stock_and_account_difference, get_balance_on
class TestStockReconciliation(unittest.TestCase):
def test_reco_for_fifo(self):
webnotes.defaults.set_global_default("auto_accounting_for_stock", 0)
# [[qty, valuation_rate, posting_date,
# posting_time, expected_stock_value, bin_qty, bin_valuation]]
input_data = [
[50, 1000, "2012-12-26", "12:00", 50000, 45, 48000],
[5, 1000, "2012-12-26", "12:00", 5000, 0, 0],
[15, 1000, "2012-12-26", "12:00", 15000, 10, 12000],
[25, 900, "2012-12-26", "12:00", 22500, 20, 22500],
[20, 500, "2012-12-26", "12:00", 10000, 15, 18000],
[50, 1000, "2013-01-01", "12:00", 50000, 65, 68000],
[5, 1000, "2013-01-01", "12:00", 5000, 20, 23000],
["", 1000, "2012-12-26", "12:05", 15000, 10, 12000],
[20, "", "2012-12-26", "12:05", 16000, 15, 18000],
[10, 2000, "2012-12-26", "12:10", 20000, 5, 6000],
[1, 1000, "2012-12-01", "00:00", 1000, 11, 13200],
[0, "", "2012-12-26", "12:10", 0, -5, 0]
]
for d in input_data:
self.cleanup_data()
self.insert_existing_sle("FIFO")
stock_reco = self.submit_stock_reconciliation(d[0], d[1], d[2], d[3])
# check stock value
res = webnotes.conn.sql("""select stock_value from `tabStock Ledger Entry`
where item_code = '_Test Item' and warehouse = '_Test Warehouse - _TC'
and posting_date = %s and posting_time = %s order by name desc limit 1""",
(d[2], d[3]))
self.assertEqual(res and flt(res[0][0]) or 0, d[4])
# check bin qty and stock value
bin = webnotes.conn.sql("""select actual_qty, stock_value from `tabBin`
where item_code = '_Test Item' and warehouse = '_Test Warehouse - _TC'""")
self.assertEqual(bin and [flt(bin[0][0]), flt(bin[0][1])] or [], [d[5], d[6]])
# no gl entries
gl_entries = webnotes.conn.sql("""select name from `tabGL Entry`
where voucher_type = 'Stock Reconciliation' and voucher_no = %s""",
stock_reco.doc.name)
self.assertFalse(gl_entries)
def test_reco_for_moving_average(self):
webnotes.defaults.set_global_default("auto_accounting_for_stock", 0)
# [[qty, valuation_rate, posting_date,
# posting_time, expected_stock_value, bin_qty, bin_valuation]]
input_data = [
[50, 1000, "2012-12-26", "12:00", 50000, 45, 48000],
[5, 1000, "2012-12-26", "12:00", 5000, 0, 0],
[15, 1000, "2012-12-26", "12:00", 15000, 10, 12000],
[25, 900, "2012-12-26", "12:00", 22500, 20, 22500],
[20, 500, "2012-12-26", "12:00", 10000, 15, 18000],
[50, 1000, "2013-01-01", "12:00", 50000, 65, 68000],
[5, 1000, "2013-01-01", "12:00", 5000, 20, 23000],
["", 1000, "2012-12-26", "12:05", 15000, 10, 12000],
[20, "", "2012-12-26", "12:05", 18000, 15, 18000],
[10, 2000, "2012-12-26", "12:10", 20000, 5, 6000],
[1, 1000, "2012-12-01", "00:00", 1000, 11, 13200],
[0, "", "2012-12-26", "12:10", 0, -5, 0]
]
for d in input_data:
self.cleanup_data()
self.insert_existing_sle("Moving Average")
stock_reco = self.submit_stock_reconciliation(d[0], d[1], d[2], d[3])
# check stock value in sle
res = webnotes.conn.sql("""select stock_value from `tabStock Ledger Entry`
where item_code = '_Test Item' and warehouse = '_Test Warehouse - _TC'
and posting_date = %s and posting_time = %s order by name desc limit 1""",
(d[2], d[3]))
self.assertEqual(res and flt(res[0][0], 4) or 0, d[4])
# bin qty and stock value
bin = webnotes.conn.sql("""select actual_qty, stock_value from `tabBin`
where item_code = '_Test Item' and warehouse = '_Test Warehouse - _TC'""")
self.assertEqual(bin and [flt(bin[0][0]), flt(bin[0][1], 4)] or [],
[flt(d[5]), flt(d[6])])
# no gl entries
gl_entries = webnotes.conn.sql("""select name from `tabGL Entry`
where voucher_type = 'Stock Reconciliation' and voucher_no = %s""",
stock_reco.doc.name)
self.assertFalse(gl_entries)
def test_reco_fifo_gl_entries(self):
webnotes.defaults.set_global_default("auto_accounting_for_stock", 1)
# [[qty, valuation_rate, posting_date, posting_time, stock_in_hand_debit]]
input_data = [
[50, 1000, "2012-12-26", "12:00"],
[5, 1000, "2012-12-26", "12:00"],
[15, 1000, "2012-12-26", "12:00"],
[25, 900, "2012-12-26", "12:00"],
[20, 500, "2012-12-26", "12:00"],
["", 1000, "2012-12-26", "12:05"],
[20, "", "2012-12-26", "12:05"],
[10, 2000, "2012-12-26", "12:10"],
[0, "", "2012-12-26", "12:10"],
[50, 1000, "2013-01-01", "12:00"],
[5, 1000, "2013-01-01", "12:00"],
[1, 1000, "2012-12-01", "00:00"],
]
for d in input_data:
self.cleanup_data()
self.insert_existing_sle("FIFO")
self.assertFalse(get_stock_and_account_difference(["_Test Account Stock In Hand - _TC"]))
stock_reco = self.submit_stock_reconciliation(d[0], d[1], d[2], d[3])
self.assertFalse(get_stock_and_account_difference(["_Test Account Stock In Hand - _TC"]))
stock_reco.cancel()
self.assertFalse(get_stock_and_account_difference(["_Test Account Stock In Hand - _TC"]))
webnotes.defaults.set_global_default("auto_accounting_for_stock", 0)
def test_reco_moving_average_gl_entries(self):
webnotes.defaults.set_global_default("auto_accounting_for_stock", 1)
# [[qty, valuation_rate, posting_date,
# posting_time, stock_in_hand_debit]]
input_data = [
[50, 1000, "2012-12-26", "12:00", 36500],
[5, 1000, "2012-12-26", "12:00", -8500],
[15, 1000, "2012-12-26", "12:00", 1500],
[25, 900, "2012-12-26", "12:00", 9000],
[20, 500, "2012-12-26", "12:00", -3500],
["", 1000, "2012-12-26", "12:05", 1500],
[20, "", "2012-12-26", "12:05", 4500],
[10, 2000, "2012-12-26", "12:10", 6500],
[0, "", "2012-12-26", "12:10", -13500],
[50, 1000, "2013-01-01", "12:00", 50000],
[5, 1000, "2013-01-01", "12:00", 5000],
[1, 1000, "2012-12-01", "00:00", 1000],
]
for d in input_data:
self.cleanup_data()
self.insert_existing_sle("Moving Average")
stock_reco = self.submit_stock_reconciliation(d[0], d[1], d[2], d[3])
self.assertFalse(get_stock_and_account_difference(["_Test Warehouse - _TC"]))
# cancel
stock_reco.cancel()
self.assertFalse(get_stock_and_account_difference(["_Test Warehouse - _TC"]))
webnotes.defaults.set_global_default("auto_accounting_for_stock", 0)
def cleanup_data(self):
webnotes.conn.sql("delete from `tabStock Ledger Entry`")
webnotes.conn.sql("delete from tabBin")
webnotes.conn.sql("delete from `tabGL Entry`")
def submit_stock_reconciliation(self, qty, rate, posting_date, posting_time):
stock_reco = webnotes.bean([{
"doctype": "Stock Reconciliation",
"posting_date": posting_date,
"posting_time": posting_time,
"fiscal_year": get_fiscal_year(posting_date)[0],
"company": "_Test Company",
"expense_account": "Stock Adjustment - _TC",
"cost_center": "_Test Cost Center - _TC",
"reconciliation_json": json.dumps([
["Item Code", "Warehouse", "Quantity", "Valuation Rate"],
["_Test Item", "_Test Warehouse - _TC", qty, rate]
]),
}])
stock_reco.insert()
stock_reco.submit()
return stock_reco
def insert_existing_sle(self, valuation_method):
webnotes.conn.set_value("Item", "_Test Item", "valuation_method", valuation_method)
webnotes.conn.set_default("allow_negative_stock", 1)
stock_entry = [
{
"company": "_Test Company",
"doctype": "Stock Entry",
"posting_date": "2012-12-12",
"posting_time": "01:00",
"purpose": "Material Receipt",
"fiscal_year": "_Test Fiscal Year 2012",
},
{
"conversion_factor": 1.0,
"doctype": "Stock Entry Detail",
"item_code": "_Test Item",
"parentfield": "mtn_details",
"incoming_rate": 1000,
"qty": 20.0,
"stock_uom": "_Test UOM",
"transfer_qty": 20.0,
"uom": "_Test UOM",
"t_warehouse": "_Test Warehouse - _TC",
"expense_account": "Stock Adjustment - _TC",
"cost_center": "_Test Cost Center - _TC"
},
]
pr = webnotes.bean(copy=stock_entry)
pr.insert()
pr.submit()
pr1 = webnotes.bean(copy=stock_entry)
pr1.doc.posting_date = "2012-12-15"
pr1.doc.posting_time = "02:00"
pr1.doclist[1].qty = 10
pr1.doclist[1].transfer_qty = 10
pr1.doclist[1].incoming_rate = 700
pr1.insert()
pr1.submit()
pr2 = webnotes.bean(copy=stock_entry)
pr2.doc.posting_date = "2012-12-25"
pr2.doc.posting_time = "03:00"
pr2.doc.purpose = "Material Issue"
pr2.doclist[1].s_warehouse = "_Test Warehouse - _TC"
pr2.doclist[1].t_warehouse = None
pr2.doclist[1].qty = 15
pr2.doclist[1].transfer_qty = 15
pr2.doclist[1].incoming_rate = 0
pr2.insert()
pr2.submit()
pr3 = webnotes.bean(copy=stock_entry)
pr3.doc.posting_date = "2012-12-31"
pr3.doc.posting_time = "08:00"
pr3.doc.purpose = "Material Issue"
pr3.doclist[1].s_warehouse = "_Test Warehouse - _TC"
pr3.doclist[1].t_warehouse = None
pr3.doclist[1].qty = 20
pr3.doclist[1].transfer_qty = 20
pr3.doclist[1].incoming_rate = 0
pr3.insert()
pr3.submit()
pr4 = webnotes.bean(copy=stock_entry)
pr4.doc.posting_date = "2013-01-05"
pr4.doc.fiscal_year = "_Test Fiscal Year 2013"
pr4.doc.posting_time = "07:00"
pr4.doclist[1].qty = 15
pr4.doclist[1].transfer_qty = 15
pr4.doclist[1].incoming_rate = 1200
pr4.insert()
pr4.submit()
test_dependencies = ["Item", "Warehouse"]
|
agpl-3.0
|
huashiyiqike/NETLAB
|
layernet/io/iamdb/convert_iamdb.py
|
2
|
2361
|
import os
from sets import Set
from get_lineStroke import getLineStroke
from get_targetStrings import getTargetString
from get_xmlFileName import getXmlNames
import netcdf_helpers
import numpy as np
targetStrings = []
wordTargetStrings = []
charSet = Set()
inputs = []
labels = []
seqDims = []
seqLengths = []
seqTags = []
# testset_t = "./iamdb/task1/trainset.txt"
testset_t = "./iamdb/task1/my.txt"
xmlPrefix = "./iamdb/lineStrokes/"
asciiPrefix = "./iamdb/ascii/"
ncFileName = "./iamdb/iamondb.nc"
trainFileList = file(testset_t).readlines()
for l in trainFileList:
l = l.strip()
print "loading file ", l
# print l[0:7]
curXmlDir = os.path.join(xmlPrefix, l.split('-')[0], l[0:7])
curAsciiDir = os.path.join(asciiPrefix, l.split('-')[0], l[0:7])
curAsciiFilePath = os.path.join(curAsciiDir, l + ".txt")
[curTargetString, curWordTargetString, curCharSet] = getTargetString(curAsciiFilePath)
targetStrings.extend(curTargetString)
# wordTargetStrings.extend(curWordTargetString)
# print len(curTargetString)
# print curCharSet
# charSet = charSet.union(curCharSet)
# for i in range(len(curTargetString)):
# print curWordTargetString[i]
# print curTargetString[i]
xmlNames = getXmlNames(curXmlDir, l)
assert len(curTargetString) == len(xmlNames)
for xmlName in xmlNames:
seqTags.append(xmlName)
xmlFilePath = os.path.join(curXmlDir, xmlName)
curLineStroke = getLineStroke(xmlFilePath)
# print len(curLine)
inputs.extend(curLineStroke)
seqLengths.append(len(curLineStroke))
# seqDims.append([len(curLineStroke)])
inputsArr = np.array(inputs)
inputMeans = np.mean(inputsArr, 0)
inputStds = np.std(inputsArr, 0)
inputsArr[:, :-1] = (inputsArr[:, :-1] - inputMeans[:-1]) / inputStds[:-1]
inputs = inputsArr.tolist()
index = 0
file = open('test.txt', 'w');
file.write(str(max(seqLengths)) + ' ' + str(len(seqLengths)) + ' 3\n')
for i in seqLengths:
print i
strs = str(i) + '\n'
for j in range(0, i):
strs += str(inputs[index][0]) + ' ' + str(inputs[index][1]) + ' ' + str(inputs[index][2]) + '\n'
index += 1
file.write(strs)
file.write(str(inputMeans[0]) + ' ' + str(inputMeans[1]) + ' ' + str(inputMeans[2]) + '\n' + str(inputStds[0]) + ' ' + str(inputStds[1]) + ' ' + str(inputStds[2]))
|
apache-2.0
|
neilLasrado/erpnext
|
erpnext/patches/v5_4/notify_system_managers_regarding_wrong_tax_calculation.py
|
29
|
1452
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.email import sendmail_to_system_managers
from frappe.utils import get_link_to_form
def execute():
wrong_records = []
for dt in ("Quotation", "Sales Order", "Delivery Note", "Sales Invoice",
"Purchase Order", "Purchase Receipt", "Purchase Invoice"):
records = frappe.db.sql_list("""select name from `tab{0}`
where apply_discount_on = 'Net Total' and ifnull(discount_amount, 0) != 0
and modified >= '2015-02-17' and docstatus=1""".format(dt))
if records:
records = [get_link_to_form(dt, d) for d in records]
wrong_records.append([dt, records])
if wrong_records:
content = """Dear System Manager,
Due to an error related to Discount Amount on Net Total, tax calculation might be wrong in the following records. We did not fix the tax amount automatically because it can corrupt the entries, so we request you to check these records and amend if you found the calculation wrong.
Please check following Entries:
%s
Regards,
Administrator""" % "\n".join([(d[0] + ": " + ", ".join(d[1])) for d in wrong_records])
try:
sendmail_to_system_managers("[Important] [ERPNext] Tax calculation might be wrong, please check.", content)
except:
pass
print("="*50)
print(content)
print("="*50)
|
gpl-3.0
|
sangwook236/general-development-and-testing
|
sw_dev/python/rnd/test/image_processing/skimage/skimage_transform.py
|
2
|
1365
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import PiecewiseAffineTransform, warp
from skimage import data
#---------------------------------------------------------------------
# REF [site] >> http://scikit-image.org/docs/stable/auto_examples/transform/plot_piecewise_affine.html
def piecewise_affine_transform():
image = data.astronaut()
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# Add sinusoidal oscillation to row coordinates.
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
out_rows = image.shape[0] - 1.5 * 50
out_cols = cols
out = warp(image, tform, output_shape=(out_rows, out_cols))
fig, ax = plt.subplots()
ax.imshow(out)
ax.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
ax.axis((0, out_cols, out_rows, 0))
plt.show()
def main():
piecewise_affine_transform()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
gpl-2.0
|
fritsvanveen/QGIS
|
python/pyplugin_installer/qgsplugininstallerinstallingdialog.py
|
5
|
6317
|
# -*- coding:utf-8 -*-
"""
/***************************************************************************
qgsplugininstallerinstallingdialog.py
Plugin Installer module
-------------------
Date : June 2013
Copyright : (C) 2013 by Borys Jurgiel
Email : info at borysjurgiel dot pl
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2013 Borys Jurgiel
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from qgis.PyQt.QtCore import QDir, QUrl, QFile, QCoreApplication
from qgis.PyQt.QtWidgets import QDialog
from qgis.PyQt.QtNetwork import QNetworkRequest, QNetworkReply
import qgis
from qgis.core import QgsNetworkAccessManager, QgsAuthManager
from .ui_qgsplugininstallerinstallingbase import Ui_QgsPluginInstallerInstallingDialogBase
from .installer_data import removeDir, repositories
from .unzip import unzip
class QgsPluginInstallerInstallingDialog(QDialog, Ui_QgsPluginInstallerInstallingDialogBase):
# ----------------------------------------- #
def __init__(self, parent, plugin):
QDialog.__init__(self, parent)
self.setupUi(self)
self.plugin = plugin
self.mResult = ""
self.progressBar.setRange(0, 0)
self.progressBar.setFormat("%p%")
self.labelName.setText(plugin["name"])
self.buttonBox.clicked.connect(self.abort)
url = QUrl(plugin["download_url"])
fileName = plugin["filename"]
tmpDir = QDir.tempPath()
tmpPath = QDir.cleanPath(tmpDir + "/" + fileName)
self.file = QFile(tmpPath)
self.request = QNetworkRequest(url)
authcfg = repositories.all()[plugin["zip_repository"]]["authcfg"]
if authcfg and isinstance(authcfg, str):
if not QgsAuthManager.instance().updateNetworkRequest(
self.request, authcfg.strip()):
self.mResult = self.tr(
"Update of network request with authentication "
"credentials FAILED for configuration '{0}'").format(authcfg)
self.request = None
if self.request is not None:
self.reply = QgsNetworkAccessManager.instance().get(self.request)
self.reply.downloadProgress.connect(self.readProgress)
self.reply.finished.connect(self.requestFinished)
self.stateChanged(4)
def exec_(self):
if self.request is None:
return QDialog.Rejected
QDialog.exec_(self)
# ----------------------------------------- #
def result(self):
return self.mResult
# ----------------------------------------- #
def stateChanged(self, state):
messages = [self.tr("Installing..."), self.tr("Resolving host name..."), self.tr("Connecting..."), self.tr("Host connected. Sending request..."), self.tr("Downloading data..."), self.tr("Idle"), self.tr("Closing connection..."), self.tr("Error")]
self.labelState.setText(messages[state])
# ----------------------------------------- #
def readProgress(self, done, total):
if total > 0:
self.progressBar.setMaximum(total)
self.progressBar.setValue(done)
# ----------------------------------------- #
def requestFinished(self):
reply = self.sender()
self.buttonBox.setEnabled(False)
if reply.error() != QNetworkReply.NoError:
self.mResult = reply.errorString()
if reply.error() == QNetworkReply.OperationCanceledError:
self.mResult += "<br/><br/>" + QCoreApplication.translate("QgsPluginInstaller", "If you haven't cancelled the download manually, it might be caused by a timeout. In this case consider increasing the connection timeout value in QGIS options.")
self.reject()
reply.deleteLater()
return
self.file.open(QFile.WriteOnly)
self.file.write(reply.readAll())
self.file.close()
self.stateChanged(0)
reply.deleteLater()
pluginDir = qgis.utils.home_plugin_path
tmpPath = self.file.fileName()
# make sure that the parent directory exists
if not QDir(pluginDir).exists():
QDir().mkpath(pluginDir)
# if the target directory already exists as a link, remove the link without resolving:
QFile(pluginDir + str(QDir.separator()) + self.plugin["id"]).remove()
try:
unzip(str(tmpPath), str(pluginDir)) # test extract. If fails, then exception will be raised and no removing occurs
# removing old plugin files if exist
removeDir(QDir.cleanPath(pluginDir + "/" + self.plugin["id"])) # remove old plugin if exists
unzip(str(tmpPath), str(pluginDir)) # final extract.
except:
self.mResult = self.tr("Failed to unzip the plugin package. Probably it's broken or missing from the repository. You may also want to make sure that you have write permission to the plugin directory:") + "\n" + pluginDir
self.reject()
return
try:
# cleaning: removing the temporary zip file
QFile(tmpPath).remove()
except:
pass
self.close()
# ----------------------------------------- #
def abort(self):
if self.reply.isRunning():
self.reply.finished.disconnect()
self.reply.abort()
del self.reply
self.mResult = self.tr("Aborted by user")
self.reject()
|
gpl-2.0
|
carvalhodj/qunews
|
raspberry/slideshow.py
|
1
|
1193
|
from itertools import cycle
from PIL import Image
from PIL import ImageTk
try:
# Python2
import Tkinter as tk
except ImportError:
# Python3
import tkinter as tk
class App(tk.Tk):
def __init__(self, image_files, x, y, delay):
tk.Tk.__init__(self)
self.geometry('+{}+{}'.format(x, y))
self.delay = delay
self.atualiza()
self.picture_display = tk.Label(self)
self.picture_display.pack()
def show_slides(self):
img_object, img_name = next(self.pictures)
self.picture_display.config(image=img_object)
self.title(img_name)
self.after(self.delay, self.show_slides)
def atualiza(self):
self.pictures = cycle((ImageTk.PhotoImage(file=image), image)
for image in image_files)
self.after(38500, self.atualiza)
def run(self):
self.mainloop()
delay = 3500
image_files = [
'001.png',
'002.png',
'003.png',
'004.png',
'005.png',
'006.png',
'007.png',
'008.png',
'009.png',
'010.png'
]
x = 100
y = 50
try:
app = App(image_files, x, y, delay)
app.show_slides()
app.run()
except:
print('Erro no processamento das imagens')
|
apache-2.0
|
garhivelg/execom
|
migrations/versions/9474324542c6_cases.py
|
2
|
1689
|
"""cases
Revision ID: 9474324542c6
Revises:
Create Date: 2017-06-08 09:02:37.384472
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9474324542c6'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facility',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=128), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('register',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fund', sa.String(length=8), nullable=True),
sa.Column('register', sa.Integer(), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('case',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('register_id', sa.Integer(), nullable=True),
sa.Column('book_id', sa.String(length=8), nullable=True),
sa.Column('book_num', sa.Integer(), nullable=False),
sa.Column('facility_id', sa.Integer(), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.ForeignKeyConstraint(['facility_id'], ['facility.id'], ),
sa.ForeignKeyConstraint(['register_id'], ['register.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('case')
op.drop_table('register')
op.drop_table('facility')
# ### end Alembic commands ###
|
gpl-3.0
|
40223240/cadb_g3_0420
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_setups.py
|
791
|
16440
|
import io
import sys
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=io.StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegex(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
brunogamacatao/portalsaladeaula
|
simplejson/ordered_dict.py
|
1039
|
3370
|
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
from UserDict import DictMixin
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
# Modified from original to support Python 2.4, see
# http://code.google.com/p/simplejson/issues/detail?id=53
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
bsd-3-clause
|
asimshankar/tensorflow
|
tensorflow/python/eager/execution_callbacks.py
|
3
|
12617
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execution Callbacks for Eager Mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import enum # pylint: disable=g-bad-import-order
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import execute
from tensorflow.python.platform import tf_logging as logging
class ExecutionCallback(enum.Enum):
"""Valid callback actions.
These can be passed to `seterr` or `errstate` to create callbacks when
specific events occur (e.g. an operation produces `NaN`s).
IGNORE: take no action.
PRINT: print a warning to `stdout`.
RAISE: raise an error (e.g. `InfOrNanError`).
WARN: print a warning using `tf.logging.warn`.
"""
IGNORE = "ignore"
PRINT = "print"
RAISE = "raise"
WARN = "warn"
_DEFAULT_CALLBACK_ACTION = ExecutionCallback.RAISE
# TODO(cais): Consider moving this exception class to errors_impl.py.
class InfOrNanError(Exception):
"""Exception for inf and/or nan being present in tensor."""
def __init__(self,
op_type,
op_name,
output_index,
num_outputs,
value):
"""Constructor of InfOrNanError.
Args:
op_type: Type name of the op that generated the tensor that generated the
`inf`(s) or `nan`(s) (e.g., `Div`).
op_name: Name of the op that generated the tensor with `inf`(s) or
`nan`(s). This name is set by client and can be `None` if it is unset.
output_index: The 0-based output index of the tensor that contains
`inf`(s) or `nan`(s).
num_outputs: Total number of outputs of the operation.
value: The tensor value that contains `inf`(s) or `nan`(s).
"""
self._op_type = op_type
self._op_name = op_name
self._output_index = output_index
self._num_outputs = num_outputs
self._value = value
self._total_count = np.size(value)
self._inf_count = np.count_nonzero(np.isinf(value))
self._nan_count = np.count_nonzero(np.isnan(value))
super(InfOrNanError, self).__init__(self._get_error_message())
def _get_error_message(self):
"""Get the error message describing this InfOrNanError object."""
name_str = (("'%s'" % self._op_name) if self._op_name is not None
else str(self._op_name))
msg = "Output %d of %d of TFE operation %s (name: %s) contains " % (
self._output_index + 1, self._num_outputs, self._op_type, name_str)
if self._inf_count and self._nan_count:
msg += "%d inf(s) and %d nan(s) " % (self._inf_count, self._nan_count)
elif self._inf_count:
msg += "%d inf(s) " % self._inf_count
else:
msg += "%d nan(s) " % self._nan_count
msg += "out of a total of %d element(s). Tensor value: %s" % (
self._total_count, self._value)
return msg
@property
def op_type(self):
return self._op_type
@property
def op_name(self):
return self._op_name
@property
def output_index(self):
return self._output_index
@property
def num_outputs(self):
return self._num_outputs
@property
def value(self):
return self._value
def inf_nan_callback(op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=True,
check_nan=True,
action=_DEFAULT_CALLBACK_ACTION):
"""An execution callback that checks for `inf`s and `nan`s in output tensors.
This callback can be used with `tfe.add_execute_callback` to check for invalid
numeric values. E.g.,
```python
tfe.add_execute_callback(tfe.inf_nan_callback)
```
Args:
op_type: Name of the TFE operation type (e.g., `MatMul`).
inputs: The `list` of input tensors to the operation, currently unused by
this callback.
attrs: Attributes of the TFE operation, as a tuple of alternating attribute
names and attribute values.
outputs: The `list` of output tensors from the operation, checked by this
callback for `inf` and `nan` values.
op_name: Name of the TFE operation. This name is set by client and can be
`None` if it unset.
check_inf: (`bool`) Whether this callback should check for `inf` values in
the output tensor values.
check_nan: (`bool`) Whether this callback should check for `nan` values in
the output tensor values.
action: (`ExecutionCallback`) Action to be taken by the callback when
`inf` or `nan` values are detected.
Raises:
InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and
`action` is `"raise"`.
ValueError: iff the value of `action` is invalid.
"""
del attrs, inputs # Not used.
action = ExecutionCallback(action)
ctx = context.context()
for index, output in enumerate(outputs):
if not output.dtype.is_numpy_compatible:
continue
numpy_dtype = output.dtype.as_numpy_dtype
if (np.issubdtype(numpy_dtype, np.floating) or
np.issubdtype(numpy_dtype, np.complex) or
np.issubdtype(numpy_dtype, np.integer)):
try:
check_numerics_op_attrs = (
"message", "Eager-mode inf/nan check",
"T", outputs[0].dtype.as_datatype_enum)
# TODO(cais): Consider moving this into execute.py.
# pylint: disable=protected-access
pywrap_tensorflow.TFE_Py_Execute(
ctx._handle, output.device, "CheckNumerics", [output],
check_numerics_op_attrs, 1)
# pylint: enable=protected-access
except core._NotOkStatusException: # pylint: disable=protected-access
value = output.numpy()
inf_detected = np.any(np.isinf(value)) and check_inf
nan_detected = np.any(np.isnan(value)) and check_nan
if not inf_detected and not nan_detected:
continue
error = InfOrNanError(op_type, op_name, index, len(outputs), value)
if action == ExecutionCallback.PRINT:
print("Warning: %s" % str(error))
elif action == ExecutionCallback.WARN:
logging.warn(str(error))
elif action == ExecutionCallback.RAISE:
raise error
else:
raise ValueError(
"Invalid action for inf_nan_callback: %s. Valid actions are: "
"{PRINT | WARN | RAISE}" % action)
def inf_callback(op_type,
inputs,
attrs,
outputs,
op_name,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `inf`s only."""
inf_nan_callback(
op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=True,
check_nan=False,
action=action)
def nan_callback(op_type,
inputs,
attrs,
outputs,
op_name,
action=_DEFAULT_CALLBACK_ACTION):
"""A specialization of `inf_nan_callback` that checks for `nan`s only."""
inf_nan_callback(
op_type,
inputs,
attrs,
outputs,
op_name,
check_inf=False,
check_nan=True,
action=action)
def add_execution_callback(callback):
"""Add an execution callback to the default eager context.
An execution callback is invoked immediately after an eager operation or
function has finished execution, providing access to the op's type, name
input and output tensors. Multiple execution callbacks can be added, in
which case the callbacks will be invoked in the order in which they are
added. To clear all execution callbacks that have been added, use
`clear_execution_callbacks()`.
Example:
```python
def print_even_callback(op_type, op_name, attrs, inputs, outputs):
# A callback that prints only the even output values.
if outputs[0].numpy() % 2 == 0:
print("Even output from %s: %s" % (op_name or op_type, outputs))
tfe.add_execution_callback(print_even_callback)
x = tf.pow(2.0, 3.0) - 3.0
y = tf.multiply(x, tf.add(1.0, 5.0))
# When the line above is run, you will see all intermediate outputs that are
# even numbers printed to the console.
tfe.clear_execution_callbacks()
```
Args:
callback: a callable of the signature
`f(op_type, op_name, attrs, inputs, outputs)`.
`op_type` is the type of the operation that was just executed (e.g.,
`MatMul`).
`op_name` is the name of the operation that was just executed. This
name is set by the client who created the operation and can be `None` if
it is unset.
`attrs` contains the attributes of the operation as a `tuple` of
alternating attribute name and attribute value.
`inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored.
"""
execute.execute = execute.execute_with_callbacks
context.context().add_post_execution_callback(callback)
def clear_execution_callbacks():
"""Clear all execution callbacks from the default eager context."""
context.context().clear_post_execution_callbacks()
def seterr(inf_or_nan=None):
"""Set how abnormal conditions are handled by the default eager context.
Example:
```python
tfe.seterr(inf_or_nan=ExecutionCallback.RAISE)
a = tf.constant(10.0)
b = tf.constant(0.0)
try:
c = a / b # <-- Raises InfOrNanError.
except Exception as e:
print("Caught Exception: %s" % e)
tfe.seterr(inf_or_nan=ExecutionCallback.IGNORE)
c = a / b # <-- Does NOT raise exception anymore.
```
Args:
inf_or_nan: An `ExecutionCallback` determining the action for infinity
(`inf`) and NaN (`nan`) values. A value of `None` leads to no change in
the action of the condition.
Returns:
A dictionary of old actions.
Raises:
ValueError: If the value of any keyword arguments is invalid.
"""
inf_or_nan = ExecutionCallback(inf_or_nan) if inf_or_nan is not None else None
old_settings = {"inf_or_nan": ExecutionCallback.IGNORE}
default_context = context.context()
carryover_callbacks = []
for callback in default_context.post_execution_callbacks:
# Check whether the callback is inf_nan_callback or a partial object of
# inf_nan_callback.
if (callback == inf_nan_callback or
isinstance(callback, functools.partial) and
callback.func == inf_nan_callback):
if callback == inf_nan_callback:
old_settings["inf_or_nan"] = _DEFAULT_CALLBACK_ACTION
else:
old_settings["inf_or_nan"] = callback.keywords.get(
"action", _DEFAULT_CALLBACK_ACTION)
elif inf_or_nan is not None:
carryover_callbacks.append(callback)
if inf_or_nan is not None:
default_context.clear_post_execution_callbacks()
for callback in carryover_callbacks:
default_context.add_post_execution_callback(callback)
if inf_or_nan != ExecutionCallback.IGNORE:
default_context.add_post_execution_callback(
functools.partial(inf_nan_callback, action=inf_or_nan))
return old_settings
@contextlib.contextmanager
def errstate(inf_or_nan=None):
"""Context manager setting error state.
Example:
```
c = tf.log(0.) # -inf
with errstate(inf_or_nan=ExecutionCallback.RAISE):
tf.log(0.) # <-- Raises InfOrNanError.
```
Args:
inf_or_nan: An `ExecutionCallback` determining the action for infinity
(`inf`) and NaN (`nan`) values. A value of `None` leads to no change in
the action of the condition.
Yields:
None.
Raises:
ValueError: If the value of any keyword arguments is invalid.
"""
if not context.executing_eagerly():
yield
else:
old_settings = seterr(inf_or_nan=inf_or_nan)
yield
seterr(**old_settings)
|
apache-2.0
|
BeenzSyed/tempest
|
tempest/services/compute/v3/xml/tenant_usages_client.py
|
1
|
1926
|
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from lxml import etree
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import xml_to_json
class TenantUsagesV3ClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(TenantUsagesV3ClientXML, self).__init__(config, username,
password, auth_url,
tenant_name)
self.service = self.config.compute.catalog_v3_type
def _parse_array(self, node):
json = xml_to_json(node)
return json
def list_tenant_usages(self, params=None):
url = 'os-simple-tenant-usage'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
tenant_usage = self._parse_array(etree.fromstring(body))
return resp, tenant_usage['tenant_usage']
def get_tenant_usage(self, tenant_id, params=None):
url = 'os-simple-tenant-usage/%s' % tenant_id
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
tenant_usage = self._parse_array(etree.fromstring(body))
return resp, tenant_usage
|
apache-2.0
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/font-bitstream-type1/package.py
|
3
|
2117
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class FontBitstreamType1(Package):
"""X.org bitstream-type1 font."""
homepage = "http://cgit.freedesktop.org/xorg/font/bitstream-type1"
url = "https://www.x.org/archive/individual/font/font-bitstream-type1-1.0.3.tar.gz"
version('1.0.3', 'ff91738c4d3646d7999e00aa9923f2a0')
depends_on('font-util')
depends_on('fontconfig', type='build')
depends_on('mkfontdir', type='build')
depends_on('mkfontscale', type='build')
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make('install')
# `make install` copies the files to the font-util installation.
# Create a fake directory to convince Spack that we actually
# installed something.
mkdir(prefix.lib)
|
lgpl-2.1
|
mcgachey/edx-platform
|
lms/djangoapps/course_blocks/transformers/tests/test_split_test.py
|
8
|
8262
|
"""
Tests for SplitTestTransformer.
"""
import ddt
import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from student.tests.factories import CourseEnrollmentFactory
from xmodule.partitions.partitions import Group, UserPartition
from xmodule.modulestore.tests.factories import check_mongo_calls, check_mongo_calls_range
from ...api import get_course_blocks
from ..user_partitions import UserPartitionTransformer, _get_user_partition_groups
from .test_helpers import CourseStructureTestCase, create_location
@ddt.ddt
class SplitTestTransformerTestCase(CourseStructureTestCase):
"""
SplitTestTransformer Test
"""
TEST_PARTITION_ID = 0
def setUp(self):
"""
Setup course structure and create user for split test transformer test.
"""
super(SplitTestTransformerTestCase, self).setUp()
# Set up user partitions and groups.
self.groups = [Group(1, 'Group 1'), Group(2, 'Group 2'), Group(3, 'Group 3')]
self.split_test_user_partition_id = self.TEST_PARTITION_ID
self.split_test_user_partition = UserPartition(
id=self.split_test_user_partition_id,
name='Split Partition',
description='This is split partition',
groups=self.groups,
scheme=RandomUserPartitionScheme
)
self.split_test_user_partition.scheme.name = "random"
# Build course.
self.course_hierarchy = self.get_course_hierarchy()
self.blocks = self.build_course(self.course_hierarchy)
self.course = self.blocks['course']
# Enroll user in course.
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, is_active=True)
self.transformer = UserPartitionTransformer()
def get_course_hierarchy(self):
"""
Get a course hierarchy to test with.
Assumes self.split_test_user_partition has already been initialized.
Returns: dict[course_structure]
"""
org_name = 'SplitTestTransformer'
course_name = 'ST101F'
run_name = 'test_run'
def location(block_ref, block_type='vertical'):
"""
Returns the usage key for the given block_type and block reference string in the test course.
"""
return create_location(
org_name, course_name, run_name, block_type, self.create_block_id(block_type, block_ref)
)
# course
# / | \
# / | \
# A BSplit CSplit
# / \ / | \ | \
# / \ / | \ | \
# D E[1] F[2] G[3] H[1] I[2]
# / \ \ |
# / \ \ |
# J KSplit \ L
# / | \ / \
# / | \ / \
# M[2] N[3] O P
#
return [
{
'org': org_name,
'course': course_name,
'run': run_name,
'user_partitions': [self.split_test_user_partition],
'#type': 'course',
'#ref': 'course',
},
{
'#type': 'vertical',
'#ref': 'A',
'#children': [{'#type': 'vertical', '#ref': 'D'}],
},
{
'#type': 'split_test',
'#ref': 'BSplit',
'metadata': {'category': 'split_test'},
'user_partition_id': self.TEST_PARTITION_ID,
'group_id_to_child': {
'1': location('E'),
'2': location('F'),
'3': location('G'),
},
'#children': [{'#type': 'vertical', '#ref': 'G'}],
},
{
'#type': 'vertical',
'#ref': 'E',
'#parents': ['A', 'BSplit'],
},
{
'#type': 'vertical',
'#ref': 'F',
'#parents': ['BSplit'],
'#children': [
{'#type': 'vertical', '#ref': 'J'},
],
},
{
'#type': 'split_test',
'#ref': 'KSplit',
'metadata': {'category': 'split_test'},
'user_partition_id': self.TEST_PARTITION_ID,
'group_id_to_child': {
'2': location('M'),
'3': location('N'),
},
'#parents': ['F'],
'#children': [
{'#type': 'vertical', '#ref': 'M'},
{'#type': 'vertical', '#ref': 'N'},
],
},
{
'#type': 'split_test',
'#ref': 'CSplit',
'metadata': {'category': 'split_test'},
'user_partition_id': self.TEST_PARTITION_ID,
'group_id_to_child': {
'1': location('H'),
'2': location('I'),
},
'#children': [
{'#type': 'vertical', '#ref': 'I'},
{
'#type': 'vertical',
'#ref': 'H',
'#children': [
{
'#type': 'vertical',
'#ref': 'L',
'#children': [{'#type': 'vertical', '#ref': 'P'}],
},
],
},
],
},
{
'#type': 'vertical',
'#ref': 'O',
'#parents': ['G', 'L'],
},
]
@ddt.data(
# Note: Theoretically, block E should be accessible by users
# not in Group 1, since there's an open path through block A.
# Since the split_test transformer automatically sets the block
# access on its children, it bypasses the paths via other
# parents. However, we don't think this is a use case we need to
# support for split_test components (since they are now deprecated
# in favor of content groups and user partitions).
(1, ('course', 'A', 'D', 'E', 'H', 'L', 'O', 'P',)),
(2, ('course', 'A', 'D', 'F', 'J', 'M', 'I',)),
(3, ('course', 'A', 'D', 'G', 'O',)),
)
@ddt.unpack
def test_user(self, group_id, expected_blocks):
course_tag_api.set_course_tag(
self.user,
self.course.id,
RandomUserPartitionScheme.key_for_partition(self.split_test_user_partition),
group_id,
)
block_structure1 = get_course_blocks(
self.user,
self.course.location,
transformers={self.transformer},
)
self.assertEqual(
set(block_structure1.get_block_keys()),
set(self.get_block_key_set(self.blocks, *expected_blocks)),
)
def test_user_randomly_assigned(self):
# user was randomly assigned to one of the groups
user_groups = _get_user_partition_groups( # pylint: disable=protected-access
self.course.id, [self.split_test_user_partition], self.user
)
self.assertEquals(len(user_groups), 1)
# calling twice should result in the same block set
with check_mongo_calls_range(min_finds=1):
block_structure1 = get_course_blocks(
self.user,
self.course.location,
transformers={self.transformer},
)
with check_mongo_calls(0):
block_structure2 = get_course_blocks(
self.user,
self.course.location,
transformers={self.transformer},
)
self.assertEqual(
set(block_structure1.get_block_keys()),
set(block_structure2.get_block_keys()),
)
|
agpl-3.0
|
ardi69/pyload-0.4.10
|
lib/Python/Lib/PIL/ImageQt.py
|
20
|
2888
|
#
# The Python Imaging Library.
# $Id$
#
# a simple Qt image interface.
#
# history:
# 2006-06-03 fl: created
# 2006-06-04 fl: inherit from QImage instead of wrapping it
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
# 2013-11-13 fl: add support for Qt5 ([email protected])
#
# Copyright (c) 2006 by Secret Labs AB
# Copyright (c) 2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
from PIL._util import isPath
import sys
if 'PyQt4.QtGui' not in sys.modules:
try:
from PyQt5.QtGui import QImage, qRgba
except:
try:
from PyQt4.QtGui import QImage, qRgba
except:
from PySide.QtGui import QImage, qRgba
else: #PyQt4 is used
from PyQt4.QtGui import QImage, qRgba
##
# (Internal) Turns an RGB color into a Qt compatible color integer.
def rgb(r, g, b, a=255):
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgba(r, g, b, a) & 0xffffffff)
##
# An PIL image wrapper for Qt. This is a subclass of PyQt4's QImage
# class.
#
# @param im A PIL Image object, or a file name (given either as Python
# string or a PyQt string object).
class ImageQt(QImage):
def __init__(self, im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
im = unicode(im.toUtf8(), "utf-8")
if isPath(im):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i:i+3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tobytes("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
# must keep a reference, or Qt will crash!
self.__data = data or im.tobytes()
QImage.__init__(self, self.__data, im.size[0], im.size[1], format)
if colortable:
self.setColorTable(colortable)
|
gpl-3.0
|
ImaginaryLandscape/django-filer
|
filer/admin/clipboardadmin.py
|
8
|
4529
|
#-*- coding: utf-8 -*-
from django.forms.models import modelform_factory
from django.contrib import admin
from django.http import HttpResponse
from django.utils import simplejson
from django.views.decorators.csrf import csrf_exempt
from filer import settings as filer_settings
from filer.models import Clipboard, ClipboardItem
from filer.utils.files import handle_upload, UploadException
from filer.utils.loader import load_object
# ModelAdmins
class ClipboardItemInline(admin.TabularInline):
model = ClipboardItem
class ClipboardAdmin(admin.ModelAdmin):
model = Clipboard
inlines = [ClipboardItemInline]
filter_horizontal = ('files',)
raw_id_fields = ('user',)
verbose_name = "DEBUG Clipboard"
verbose_name_plural = "DEBUG Clipboards"
def get_urls(self):
try:
# django >=1.4
from django.conf.urls import patterns, url
except ImportError:
# django <1.4
from django.conf.urls.defaults import patterns, url
urls = super(ClipboardAdmin, self).get_urls()
from filer import views
url_patterns = patterns('',
url(r'^operations/paste_clipboard_to_folder/$',
self.admin_site.admin_view(views.paste_clipboard_to_folder),
name='filer-paste_clipboard_to_folder'),
url(r'^operations/discard_clipboard/$',
self.admin_site.admin_view(views.discard_clipboard),
name='filer-discard_clipboard'),
url(r'^operations/delete_clipboard/$',
self.admin_site.admin_view(views.delete_clipboard),
name='filer-delete_clipboard'),
# upload does it's own permission stuff (because of the stupid
# flash missing cookie stuff)
url(r'^operations/upload/$',
self.ajax_upload,
name='filer-ajax_upload'),
)
url_patterns.extend(urls)
return url_patterns
@csrf_exempt
def ajax_upload(self, request, folder_id=None):
"""
receives an upload from the uploader. Receives only one file at the time.
"""
mimetype = "application/json" if request.is_ajax() else "text/html"
try:
upload, filename, is_raw = handle_upload(request)
# Get clipboad
clipboard = Clipboard.objects.get_or_create(user=request.user)[0]
# find the file type
for filer_class in filer_settings.FILER_FILE_MODELS:
FileSubClass = load_object(filer_class)
#TODO: What if there are more than one that qualify?
if FileSubClass.matches_file_type(filename, upload, request):
FileForm = modelform_factory(
model = FileSubClass,
fields = ('original_filename', 'owner', 'file')
)
break
uploadform = FileForm({'original_filename': filename,
'owner': request.user.pk},
{'file': upload})
if uploadform.is_valid():
file_obj = uploadform.save(commit=False)
# Enforce the FILER_IS_PUBLIC_DEFAULT
file_obj.is_public = filer_settings.FILER_IS_PUBLIC_DEFAULT
file_obj.save()
clipboard_item = ClipboardItem(
clipboard=clipboard, file=file_obj)
clipboard_item.save()
json_response = {
'thumbnail': file_obj.icons['32'],
'alt_text': '',
'label': unicode(file_obj),
}
return HttpResponse(simplejson.dumps(json_response),
mimetype=mimetype)
else:
form_errors = '; '.join(['%s: %s' % (
field,
', '.join(errors)) for field, errors in uploadform.errors.items()
])
raise UploadException("AJAX request not valid: form invalid '%s'" % (form_errors,))
except UploadException, e:
return HttpResponse(simplejson.dumps({'error': unicode(e)}),
mimetype=mimetype)
def get_model_perms(self, request):
"""
It seems this is only used for the list view. NICE :-)
"""
return {
'add': False,
'change': False,
'delete': False,
}
|
bsd-3-clause
|
Mj258/weiboapi
|
srapyDemo/envs/Lib/site-packages/win32/Demos/RegRestoreKey.py
|
34
|
1832
|
import win32api, win32security
import win32con, ntsecuritycon, winnt
import os
temp_dir=win32api.GetTempPath()
fname=win32api.GetTempFileName(temp_dir,'rsk')[0]
print fname
## file can't exist
os.remove(fname)
## enable backup and restore privs
required_privs = ((win32security.LookupPrivilegeValue('',ntsecuritycon.SE_BACKUP_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_RESTORE_NAME),win32con.SE_PRIVILEGE_ENABLED)
)
ph = win32api.GetCurrentProcess()
th = win32security.OpenProcessToken(ph, win32con.TOKEN_READ|win32con.TOKEN_ADJUST_PRIVILEGES)
adjusted_privs=win32security.AdjustTokenPrivileges(th,0,required_privs)
try:
sa=win32security.SECURITY_ATTRIBUTES()
my_sid = win32security.GetTokenInformation(th,ntsecuritycon.TokenUser)[0]
sa.SECURITY_DESCRIPTOR.SetSecurityDescriptorOwner(my_sid,0)
k, disp=win32api.RegCreateKeyEx(win32con.HKEY_CURRENT_USER, 'Python test key', SecurityAttributes=sa,
samDesired=win32con.KEY_ALL_ACCESS, Class='some class', Options=0)
win32api.RegSetValue(k, None, win32con.REG_SZ, 'Default value for python test key')
subk, disp=win32api.RegCreateKeyEx(k, 'python test subkey', SecurityAttributes=sa,
samDesired=win32con.KEY_ALL_ACCESS, Class='some other class', Options=0)
win32api.RegSetValue(subk, None, win32con.REG_SZ, 'Default value for subkey')
win32api.RegSaveKeyEx(k, fname, Flags=winnt.REG_STANDARD_FORMAT, SecurityAttributes=sa)
restored_key, disp=win32api.RegCreateKeyEx(win32con.HKEY_CURRENT_USER, 'Python test key(restored)', SecurityAttributes=sa,
samDesired=win32con.KEY_ALL_ACCESS, Class='restored class', Options=0)
win32api.RegRestoreKey(restored_key, fname)
finally:
win32security.AdjustTokenPrivileges(th, 0, adjusted_privs)
|
mit
|
gx1997/chrome-loongson
|
tools/json_schema_compiler/h_generator.py
|
5
|
10705
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import cpp_util
import model
import os
class HGenerator(object):
"""A .h generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator):
self._cpp_type_generator = cpp_type_generator
self._namespace = namespace
self._target_namespace = (
self._cpp_type_generator.GetCppNamespaceName(self._namespace))
def Generate(self):
"""Generates a Code object with the .h for a single namespace.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
)
ifndef_name = cpp_util.GenerateIfndefName(self._namespace.source_file_dir,
self._target_namespace)
(c.Append('#ifndef %s' % ifndef_name)
.Append('#define %s' % ifndef_name)
.Append('#pragma once')
.Append()
.Append('#include <string>')
.Append('#include <vector>')
.Append()
.Append('#include "base/basictypes.h"')
.Append('#include "base/memory/linked_ptr.h"')
.Append('#include "base/memory/scoped_ptr.h"')
.Append('#include "base/values.h"')
.Append('#include "tools/json_schema_compiler/any.h"')
.Append()
)
c.Concat(self._cpp_type_generator.GetRootNamespaceStart())
# TODO(calamity): These forward declarations should be #includes to allow
# $ref types from other files to be used as required params. This requires
# some detangling of windows and tabs which will currently lead to circular
# #includes.
forward_declarations = (
self._cpp_type_generator.GenerateForwardDeclarations())
if not forward_declarations.IsEmpty():
(c.Append()
.Concat(forward_declarations)
.Append()
)
c.Concat(self._cpp_type_generator.GetNamespaceStart())
c.Append()
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for property in self._namespace.properties.values():
property_code = self._cpp_type_generator.GeneratePropertyValues(
property,
'extern const %(type)s %(name)s;')
if property_code:
c.Concat(property_code).Append()
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
)
for type_ in self._FieldDependencyOrder():
(c.Concat(self._GenerateType(type_))
.Append()
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
(c.Concat(self._GenerateFunction(function))
.Append()
)
(c.Concat(self._cpp_type_generator.GetNamespaceEnd())
.Concat(self._cpp_type_generator.GetRootNamespaceEnd())
.Append()
.Append('#endif // %s' % ifndef_name)
.Append()
)
return c
def _FieldDependencyOrder(self):
"""Generates the list of types in the current namespace in an order in which
depended-upon types appear before types which depend on them.
"""
dependency_order = []
def ExpandType(path, type_):
if type_ in path:
raise ValueError("Illegal circular dependency via cycle " +
", ".join(map(lambda x: x.name, path + [type_])))
for prop in type_.properties.values():
if not prop.optional and prop.type_ == PropertyType.REF:
ExpandType(path + [type_], self._namespace.types[prop.ref_type])
if not type_ in dependency_order:
dependency_order.append(type_)
for type_ in self._namespace.types.values():
ExpandType([], type_)
return dependency_order
def _GenerateEnumDeclaration(self, enum_name, prop, values):
"""Generate the declaration of a C++ enum for the given property and
values.
"""
c = Code()
c.Sblock('enum %s {' % enum_name)
if prop.optional:
c.Append(self._cpp_type_generator.GetEnumNoneValue(prop) + ',')
for value in values:
c.Append(self._cpp_type_generator.GetEnumValue(prop, value) + ',')
(c.Eblock('};')
.Append()
)
return c
def _GenerateFields(self, props):
"""Generates the field declarations when declaring a type.
"""
c = Code()
# Generate the enums needed for any fields with "choices"
for prop in props:
if prop.type_ == PropertyType.CHOICES:
enum_name = self._cpp_type_generator.GetChoicesEnumType(prop)
c.Append('%s %s_type;' % (enum_name, prop.unix_name))
c.Append()
for prop in self._cpp_type_generator.GetExpandedChoicesInParams(props):
if prop.description:
c.Comment(prop.description)
c.Append('%s %s;' % (
self._cpp_type_generator.GetType(prop, wrap_optional=True),
prop.unix_name))
c.Append()
return c
def _GenerateType(self, type_):
"""Generates a struct for a type.
"""
classname = cpp_util.Classname(type_.name)
c = Code()
if type_.functions:
# Types with functions are not instantiable in C++ because they are
# handled in pure Javascript and hence have no properties or
# additionalProperties.
if type_.properties:
raise NotImplementedError('\n'.join(model.GetModelHierarchy(type_)) +
'\nCannot generate both functions and properties on a type')
c.Sblock('namespace %(classname)s {')
for function in type_.functions.values():
(c.Concat(self._GenerateFunction(function))
.Append()
)
c.Eblock('}')
elif type_.type_ == PropertyType.ARRAY:
if type_.description:
c.Comment(type_.description)
c.Append('typedef std::vector<%(item_type)s> %(classname)s;')
c.Substitute({'classname': classname, 'item_type':
self._cpp_type_generator.GetType(type_.item_type,
wrap_optional=True)})
elif type_.type_ == PropertyType.STRING:
if type_.description:
c.Comment(type_.description)
c.Append('typedef std::string %(classname)s;')
c.Substitute({'classname': classname})
else:
if type_.description:
c.Comment(type_.description)
(c.Sblock('struct %(classname)s {')
.Append('~%(classname)s();')
.Append('%(classname)s();')
.Append()
.Concat(self._GeneratePropertyStructures(type_.properties.values()))
.Concat(self._GenerateFields(type_.properties.values()))
)
if type_.from_json:
(c.Comment('Populates a %s object from a Value. Returns'
' whether |out| was successfully populated.' % classname)
.Append(
'static bool Populate(const Value& value, %(classname)s* out);')
.Append()
)
if type_.from_client:
(c.Comment('Returns a new DictionaryValue representing the'
' serialized form of this %s object. Passes '
'ownership to caller.' % classname)
.Append('scoped_ptr<DictionaryValue> ToValue() const;')
)
(c.Eblock()
.Sblock(' private:')
.Append('DISALLOW_COPY_AND_ASSIGN(%(classname)s);')
.Eblock('};')
)
c.Substitute({'classname': classname})
return c
def _GenerateFunction(self, function):
"""Generates the structs for a function.
"""
c = Code()
(c.Sblock('namespace %s {' % cpp_util.Classname(function.name))
.Concat(self._GenerateFunctionParams(function))
.Append()
)
if function.callback:
(c.Concat(self._GenerateFunctionResult(function))
.Append()
)
c.Eblock('};')
return c
def _GenerateFunctionParams(self, function):
"""Generates the struct for passing parameters into a function.
"""
c = Code()
if function.params:
(c.Sblock('struct Params {')
.Concat(self._GeneratePropertyStructures(function.params))
.Concat(self._GenerateFields(function.params))
.Append('~Params();')
.Append()
.Append('static scoped_ptr<Params> Create(const ListValue& args);')
.Eblock()
.Sblock(' private:')
.Append('Params();')
.Append()
.Append('DISALLOW_COPY_AND_ASSIGN(Params);')
.Eblock('};')
)
return c
def _GeneratePropertyStructures(self, props):
"""Generate the structures required by a property such as OBJECT classes
and enums.
"""
c = Code()
for prop in props:
if prop.type_ == PropertyType.OBJECT:
c.Concat(self._GenerateType(prop))
c.Append()
elif prop.type_ == PropertyType.CHOICES:
c.Concat(self._GenerateEnumDeclaration(
self._cpp_type_generator.GetChoicesEnumType(prop),
prop,
[choice.type_.name for choice in prop.choices.values()]))
c.Concat(self._GeneratePropertyStructures(prop.choices.values()))
elif prop.type_ == PropertyType.ENUM:
enum_name = self._cpp_type_generator.GetType(prop)
c.Concat(self._GenerateEnumDeclaration(
enum_name,
prop,
prop.enum_values))
c.Append('static scoped_ptr<Value> CreateEnumValue(%s %s);' %
(enum_name, prop.unix_name))
return c
def _GenerateFunctionResult(self, function):
"""Generates functions for passing a function's result back.
"""
c = Code()
c.Sblock('namespace Result {')
params = function.callback.params
if not params:
c.Append('Value* Create();')
else:
c.Concat(self._GeneratePropertyStructures(params))
# If there is a single parameter, this is straightforward. However, if
# the callback parameter is of 'choices', this generates a Create method
# for each choice. This works because only 1 choice can be returned at a
# time.
for param in self._cpp_type_generator.GetExpandedChoicesInParams(params):
if param.description:
c.Comment(param.description)
if param.type_ == PropertyType.ANY:
c.Comment("Value* Result::Create(Value*) not generated "
"because it's redundant.")
continue
c.Append('Value* Create(const %s);' % cpp_util.GetParameterDeclaration(
param, self._cpp_type_generator.GetType(param)))
c.Eblock('};')
return c
|
bsd-3-clause
|
ohsu-computational-biology/server
|
tests/unit/test_imports.py
|
1
|
21810
|
"""
Tests that the project's module import graph conforms to certain policies
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import fnmatch
import itertools
import logging
import operator
import os
import pprint
import unittest
import sys
from snakefood.util import iter_pyfiles, setup_logging, is_python
from snakefood.find import find_dependencies
from snakefood.find import ERROR_IMPORT, ERROR_SYMBOL, ERROR_UNUSED
from snakefood.fallback.collections import defaultdict
from snakefood.roots import find_roots, relfile
import tests.utils as utils
class TestImports(unittest.TestCase):
"""
Tests that the import graph:
- doesn't contain any cycles
- doesn't violate layering constraints
"""
@classmethod
def setUpClass(cls):
snakefoodScanner = SnakefoodScanner()
cls.graph = snakefoodScanner.scan()
def testNoCycles(self):
checker = ImportGraphCycleChecker(self.graph)
checker.checkNoCycles()
def testLayering(self):
checker = ImportGraphLayerChecker(self.graph)
checker.checkLayeringEnforced()
##############
# Exceptions #
##############
class ConfigurationException(Exception):
"""
The configuration of a policy checker is invalid
"""
class PolicyException(Exception):
"""
The code violates some enforced policy
"""
class SnakefoodScannerException(Exception):
"""
Something went wrong in the snakefood wrapper
"""
###############
# ImportGraph #
###############
class ImportGraphNodeColor(object):
"""
Node color constants for cycle detection
"""
WHITE = "WHITE" # unvisited
BLACK = "BLACK" # visited
GREY = "GREY" # currently visiting
class ImportGraphNode(object):
"""
A node in the import graph
"""
def __init__(self, entry):
self.name = entry.from_filename
self.deps = set([entry.to_filename])
# below fields are for cycle detection
self.color = ImportGraphNodeColor.WHITE
def __repr__(self):
return "ImportGraphNode: {} -> {}".format(
self.name, repr(list(self.deps)))
class ImportGraph(object):
"""
A directed graph of import relationships.
Nodes are files/modules and edges are dependencies.
"""
def __init__(self):
self.graph = {}
def iterNodes(self):
return self.graph.items()
def getNodeFor(self, name):
return self.graph[name]
def addEntry(self, entry):
if entry.to_filename is None:
return
if entry.from_filename in self.graph:
self.graph[entry.from_filename].deps.add(entry.to_filename)
else:
node = ImportGraphNode(entry)
self.graph[entry.from_filename] = node
def hasAnyDependencies(self, name):
return name in self.graph and len(self.graph[name].deps) != 0
def hasDependencyOn(self, name, dependency):
if name not in self.graph:
return False
return dependency in self.graph[name].deps
def removeDependency(self, name, dependency):
self.graph[name].deps.remove(dependency)
def printGraph(self):
pprint.pprint(self.graph)
############
# Checkers #
############
class ImportGraphLayerChecker(object):
"""
Checks the import graph layering policy
TODO this class could be more efficient with some optimizations,
but as it stands the time for these tests is dominated by file
operations and and parsing the ASTs
"""
excludedPythonFilenames = set(['__init__.py', '_version.py'])
# each file/module is in one and only one moduleGroup
moduleGroupNames = {
'cli': ['ga4gh/cli.py'],
'client': ['ga4gh/client.py'],
'frontend': ['ga4gh/frontend.py', 'ga4gh/repo_manager.py'],
'backend': ['ga4gh/backend.py', 'ga4gh/datarepo.py'],
'exceptions': ['ga4gh/exceptions.py'],
'datamodel': ['ga4gh/datamodel/bio_metadata.py',
'ga4gh/datamodel/reads.py',
'ga4gh/datamodel/references.py',
'ga4gh/datamodel/rna_quantification.py',
'ga4gh/datamodel/variants.py',
'ga4gh/datamodel/datasets.py',
'ga4gh/datamodel/ontologies.py',
'ga4gh/datamodel/obo_parser.py',
'ga4gh/datamodel/sequenceAnnotations.py',
'ga4gh/datamodel/genotype_phenotype.py',
'ga4gh/datamodel/genotype_phenotype_featureset.py',
'ga4gh/gff3Parser.py',
'ga4gh/sqliteBackend.py'],
'libraries': ['ga4gh/converters.py',
'ga4gh/configtest.py'],
'protocol': ['ga4gh/protocol.py',
'ga4gh/pb.py',
'ga4gh/_protocol_version.py',
'ga4gh/_protocol_definitions.py',
'ga4gh/assay_metadata_pb2.py',
'ga4gh/bio_metadata_pb2.py',
'ga4gh/bio_metadata_service_pb2.py',
'ga4gh/common_pb2.py',
'ga4gh/metadata_pb2.py',
'ga4gh/metadata_service_pb2.py',
'ga4gh/read_service_pb2.py',
'ga4gh/reads_pb2.py',
'ga4gh/reference_service_pb2.py',
'ga4gh/references_pb2.py',
'ga4gh/variant_service_pb2.py',
'ga4gh/variants_pb2.py',
'ga4gh/allele_annotations_pb2.py',
'ga4gh/allele_annotation_service_pb2.py',
'ga4gh/sequence_annotations_pb2.py',
'ga4gh/sequence_annotation_service_pb2.py',
'ga4gh/genotype_phenotype_pb2.py',
'ga4gh/genotype_phenotype_service_pb2.py',
'ga4gh/rna_quantification_pb2.py',
'ga4gh/rna_quantification_service_pb2.py',
],
'config': ['ga4gh/serverconfig.py'],
}
# each moduleGroupName has one and only one entry here
layers = [
['cli'],
['client'],
['frontend'],
['backend'],
['libraries'],
['datamodel'],
['exceptions'],
['config'],
['protocol'],
]
def __init__(self, graph):
self._checkConfiguration()
self.graph = graph
self.moduleGroupToOrderIndex = {}
for i, layerRow in enumerate(self.layers):
for moduleGroup in layerRow:
self.moduleGroupToOrderIndex[moduleGroup] = i
self.moduleToModuleGroup = {}
for moduleGroup, modules in self.moduleGroupNames.items():
for module in modules:
self.moduleToModuleGroup[module] = moduleGroup
def checkLayeringEnforced(self):
# rules:
# - no module can import from modules in layers above it
# - no module can import from modules in moduleGroups in
# same layer as it
for layer in self.layers:
for moduleGroup in layer:
modulesInGroup = self.moduleGroupNames[moduleGroup]
self._sameLayerCheck(layer, moduleGroup, modulesInGroup)
self._aboveLayerCheck(layer, moduleGroup, modulesInGroup)
def _allModules(self):
modules = list(itertools.chain(*self.moduleGroupNames.values()))
return modules
def _checkConfiguration(self):
# each module that exists in the file tree appears in moduleGroupNames
pythonFiles = []
for root, dirnames, filenames in os.walk(utils.getGa4ghFilePath()):
for filename in fnmatch.filter(filenames, '*.py'):
pythonFilename = os.path.relpath(
os.path.join(root, filename))
if (pythonFilename not in self.excludedPythonFilenames and
filename not in self.excludedPythonFilenames):
pythonFiles.append(pythonFilename)
modules = self._allModules()
moduleSet = set(modules)
for pythonFile in pythonFiles:
if pythonFile not in moduleSet:
message = "file {} is not listed in moduleGroupNames".format(
pythonFile)
raise ConfigurationException(message)
# each module should only appear once in moduleGroupNames
modules = self._allModules()
moduleSet = set(modules)
if len(modules) != len(moduleSet):
for module in moduleSet:
modules.remove(module)
message = "duplicate module names in moduleGroupNames: {}"
raise ConfigurationException(message.format(', '.join(modules)))
# each moduleGroup should only appear once in layers
# every defined moduleGroup appears in layers
moduleGroups = self.moduleGroupNames.keys()
layersModuleGroups = list(itertools.chain(*self.layers))
if set(moduleGroups) != set(layersModuleGroups):
message = "moduleGroupNames and layer moduleGroups not equal"
raise ConfigurationException(message)
def _layerIndex(self, layerName):
return self.moduleGroupToOrderIndex[layerName]
def _moduleGroupNamesAtSameLayerAs(self, moduleGroup):
layerIndex = self._layerIndex(moduleGroup)
layerCopy = self.layers[layerIndex][::]
layerCopy.remove(moduleGroup)
return layerCopy
def _modulesInModuleGroup(self, moduleGroup):
return self.moduleGroupNames[moduleGroup]
def _modulesAtSameLayerAs(self, moduleGroup):
moduleGroupNamesAtSameLayer = self._moduleGroupNamesAtSameLayerAs(
moduleGroup)
modules = []
for moduleGroupName in moduleGroupNamesAtSameLayer:
layerModules = self._modulesInModuleGroup(moduleGroupName)
modules.extend(layerModules)
return modules
def _modulesAtLayerIndex(self, layerIndex):
modules = []
for moduleGroup in self.layers[layerIndex]:
modules.extend(self._modulesInModuleGroup(moduleGroup))
return modules
def _modulesInLayersAbove(self, moduleGroup):
layerIndex = self._layerIndex(moduleGroup)
layersAbove = self.layers[:layerIndex]
modules = []
for i, layer in enumerate(layersAbove):
layerModules = self._modulesAtLayerIndex(i)
modules.extend(layerModules)
return modules
def _sameLayerCheck(self, layer, moduleGroup, modulesInGroup):
modulesAtSameLayer = self._modulesAtSameLayerAs(moduleGroup)
for module in modulesInGroup:
for sameLayerModule in modulesAtSameLayer:
if self.graph.hasDependencyOn(module, sameLayerModule):
message = "module '{}' in moduleGroup '{}' " \
"has dependency on module '{}' in same layer '{}'"
exceptionString = message.format(
module, moduleGroup, sameLayerModule, layer)
raise PolicyException(exceptionString)
def _aboveLayerCheck(self, layer, moduleGroup, modulesInGroup):
modulesAboveLayer = self._modulesInLayersAbove(moduleGroup)
for module in modulesInGroup:
for aboveLayerModule in modulesAboveLayer:
if self.graph.hasDependencyOn(module, aboveLayerModule):
group = self.moduleToModuleGroup[aboveLayerModule]
message = "module '{}' in moduleGroup '{}' " \
"has dependency on module '{}' in moduleGroup '{}'"
exceptionString = message.format(
module, moduleGroup,
aboveLayerModule, group)
raise PolicyException(exceptionString)
class ImportGraphCycleChecker(object):
"""
Checks that there are no cycles in the import graph
(except those that are explicitly allowed)
"""
# cyclic dependencies that we want to exclude from validation;
# essentially, an entry here removes an edge from the dependency
# graph as far as cycle detection is concerned
cycleExclusions = [
]
def __init__(self, graph):
self.graph = graph
self.visitStack = []
def checkNoCycles(self):
graph = self._getPreprocessedGraph()
for name, node in graph.iterNodes():
if node.color == ImportGraphNodeColor.WHITE:
self._visitNode(graph, node)
def _getPreprocessedGraph(self):
graph = copy.deepcopy(self.graph)
for name, dependency in self.cycleExclusions:
graph.removeDependency(name, dependency)
return graph
def _visitNode(self, graph, node):
self.visitStack.append(node)
node.color = ImportGraphNodeColor.GREY
for dependency in node.deps:
if not graph.hasAnyDependencies(dependency):
continue
dependencyNode = graph.getNodeFor(dependency)
if dependencyNode.color == ImportGraphNodeColor.GREY:
self.visitStack.append(dependencyNode)
pathString = ' --> '.join(
[visited.name for visited in self.visitStack])
exceptionStr = "Circular import reference: {}".format(
pathString)
raise PolicyException(exceptionStr)
elif dependencyNode.color == ImportGraphNodeColor.WHITE:
self._visitNode(graph, dependencyNode)
node.color = ImportGraphNodeColor.BLACK
self.visitStack.pop()
#############
# Snakefood #
#############
class SnakefoodEntries(object):
"""
A list of import entries that snakefood generates
"""
def __init__(self):
self.entries = []
def append(self, entry):
self.entries.append(entry)
def printEntries(self):
pprint.pprint(self.entries)
def iterEntries(self):
return iter(self.entries)
class SnakefoodEntry(object):
"""
An import record that snakefood generates
"""
def __init__(self, from_root, from_filename, to_root, to_filename):
self.from_root = from_root
self.from_filename = from_filename
self.to_root = to_root
self.to_filename = to_filename
def __repr__(self):
return "SnakefoodEntry: {} -> {}".format(
self.from_filename, self.to_filename)
class SnakefoodScanner(object):
"""
Scans for imports within modules in the project.
Mostly taken from here:
https://bitbucket.org/blais/snakefood/src/
e0a74fa6260dcd44716d40b4eb404ca024323eac/
lib/python/snakefood/gendeps.py?at=default
"""
def __init__(self):
self.optsIgnoreUnused = None
self.optsVerbose = 0
self.optsDoPragmas = True
self.optsQuiet = 1
self.optsInternal = 1
self.optsExternal = None
self.optsIgnores = ['.svn', 'CVS', 'build', '.hg', '.git']
self.optsPrintRoots = None
self.optsFollow = True
self.args = [utils.packageName]
def scan(self):
"""
Returns an ImportGraph
"""
self.optsVerbose -= self.optsQuiet
setup_logging(self.optsVerbose)
info = logging.info
warning = logging.warning
debug = logging.debug
if self.optsInternal and self.optsExternal:
message = "Using --internal and --external at the same time " \
"does not make sense."
raise SnakefoodScannerException(message)
if self.optsPrintRoots:
inroots = find_roots(self.args, self.optsIgnores)
for dn in sorted(inroots):
print(dn)
return
info("")
info("Input paths:")
for arg in self.args:
fn = os.path.realpath(arg)
info(' {}'.format(fn))
if not os.path.exists(fn):
message = "Filename '{}' does not exist.".format(fn)
raise SnakefoodScannerException(message)
# Get the list of package roots for our input files and prepend
# them to the module search path to insure localized imports.
inroots = find_roots(self.args, self.optsIgnores)
if (self.optsInternal or self.optsExternal) and not inroots:
message = "No package roots found from the given files or " \
"directories. Using --internal with these roots will " \
"generate no dependencies."
raise SnakefoodScannerException(message)
info("")
info("Roots of the input files:")
for root in inroots:
info(' {}'.format(root))
info("")
info("Using the following import path to search for modules:")
sys.path = inroots + sys.path
for dn in sys.path:
info(" {}".format(dn))
inroots = frozenset(inroots)
# Find all the dependencies.
info("")
info("Processing files:")
info("")
allfiles = defaultdict(set)
allerrors = []
processed_files = set()
fiter = iter_pyfiles(self.args, self.optsIgnores, False)
while 1:
newfiles = set()
for fn in fiter:
if fn in processed_files:
continue # Make sure we process each file only once.
info(" {}".format(fn))
processed_files.add(fn)
if is_python(fn):
files, errors = find_dependencies(
fn, self.optsVerbose,
self.optsDoPragmas, self.optsVerbose)
allerrors.extend(errors)
else:
# If the file is not a source file, we don't know how
# to get the dependencies of that (without importing,
# which we want to avoid).
files = []
# When packages are the source of dependencies, remove the
# __init__ file. This is important because the targets
# also do not include the __init__ (i.e. when "from
# <package> import <subpackage>" is seen).
if os.path.basename(fn) == '__init__.py':
fn = os.path.dirname(fn)
# Make sure all the files at least appear in the output,
# even if it has no dependency.
from_ = relfile(fn, self.optsIgnores)
if from_ is None:
continue
infrom = from_[0] in inroots
if self.optsInternal and not infrom:
continue
if not self.optsExternal:
allfiles[from_].add((None, None))
# Add the dependencies.
for dfn in files:
xfn = dfn
if os.path.basename(xfn) == '__init__.py':
xfn = os.path.dirname(xfn)
to_ = relfile(xfn, self.optsIgnores)
into = to_[0] in inroots
if (self.optsInternal and not into) or \
(self.optsExternal and into):
continue
allfiles[from_].add(to_)
newfiles.add(dfn)
if not (self.optsFollow and newfiles):
break
else:
fiter = iter(sorted(newfiles))
# If internal is used twice, we filter down further the
# dependencies to the set of files that were processed only,
# not just to the files that live in the same roots.
if self.optsInternal >= 2:
filtfiles = type(allfiles)()
for from_, tolist in allfiles.iteritems():
filtfiles[from_] = set(
x for x in tolist if x in allfiles or x == (None, None))
allfiles = filtfiles
info("")
info("SUMMARY")
info("=======")
# Output a list of the symbols that could not
# be imported as modules.
reports = [
("Modules that were ignored because not used:",
ERROR_UNUSED, info),
("Modules that could not be imported:",
ERROR_IMPORT, warning),
]
if self.optsVerbose >= 2:
reports.append(
("Symbols that could not be imported as modules:",
ERROR_SYMBOL, debug))
for msg, errtype, efun in reports:
names = set(name for (err, name) in allerrors if err is errtype)
if names:
efun("")
efun(msg)
for name in sorted(names):
efun(" {}".format(name))
# Output the list of roots found.
info("")
info("Found roots:")
foundRoots = set()
for key, files in allfiles.iteritems():
foundRoots.add(key[0])
foundRoots.update(map(operator.itemgetter(0), files))
if None in foundRoots:
foundRoots.remove(None)
for root in sorted(foundRoots):
info(" {}".format(root))
# Output the dependencies.
entries = SnakefoodEntries()
info("")
for (from_root, from_), targets in sorted(
allfiles.iteritems(), key=operator.itemgetter(0)):
for to_root, to_ in sorted(targets):
entry = SnakefoodEntry(from_root, from_, to_root, to_)
entries.append(entry)
graph = ImportGraph()
for entry in entries.iterEntries():
graph.addEntry(entry)
return graph
|
apache-2.0
|
binhqnguyen/ln
|
nsc/scons-local-1.2.0.d20090223/SCons/Tool/RCS.py
|
19
|
2190
|
"""SCons.Tool.RCS.py
Tool-specific initialization for RCS.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/RCS.py 4043 2009/02/23 09:06:45 scons"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
RCS to an Environment."""
def RCSFactory(env=env):
""" """
act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR')
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'RCS', RCSFactory)
env.RCS = RCSFactory
env['RCS'] = 'rcs'
env['RCS_CO'] = 'co'
env['RCS_COFLAGS'] = SCons.Util.CLVar('')
env['RCS_COCOM'] = '$RCS_CO $RCS_COFLAGS $TARGET'
def exists(env):
return env.Detect('rcs')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-2.0
|
fast01/zerorpc-python
|
tests/test_client_async.py
|
73
|
2652
|
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2013 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
import sys
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_client_server_client_timeout_with_async():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
gevent.sleep(10)
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
async_result = client.add(1, 4, async=True)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
print async_result.get()
assert_raises(zerorpc.TimeoutExpired, _do_with_assert_raises)
else:
with assert_raises(zerorpc.TimeoutExpired):
print async_result.get()
client.close()
srv.close()
def test_client_server_with_async():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client()
client.connect(endpoint)
async_result = client.lolita(async=True)
assert async_result.get() == 42
async_result = client.add(1, 4, async=True)
assert async_result.get() == 5
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.